From af994308bfc98de43d584f79cd1d05df8f631b42 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Wed, 19 Nov 2025 18:38:50 +0800 Subject: [PATCH 01/36] fix op dev node eip1559 error --- Cargo.lock | 1 + crates/engine/local/Cargo.toml | 2 ++ crates/engine/local/src/payload.rs | 28 ++++++++++++++++++++++++---- 3 files changed, 27 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f10b9dc6d2..7ed4309e6c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8352,6 +8352,7 @@ dependencies = [ "reth-engine-primitives", "reth-ethereum-engine-primitives", "reth-optimism-chainspec", + "reth-optimism-forks", "reth-payload-builder", "reth-payload-primitives", "reth-storage-api", diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index dd708dee905..64839ec4363 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -34,6 +34,7 @@ tracing.workspace = true op-alloy-rpc-types-engine = { workspace = true, optional = true } reth-optimism-chainspec = { workspace = true, optional = true } +reth-optimism-forks = { workspace = true, optional = true } [lints] workspace = true @@ -42,5 +43,6 @@ workspace = true op = [ "dep:op-alloy-rpc-types-engine", "dep:reth-optimism-chainspec", + "dep:reth-optimism-forks", "reth-payload-primitives/op", ] diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index 34deaf3e10c..b2dd3563210 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -7,6 +7,9 @@ use reth_ethereum_engine_primitives::EthPayloadAttributes; use reth_payload_primitives::PayloadAttributesBuilder; use std::sync::Arc; +#[cfg(feature = "op")] +use reth_optimism_forks::OpHardforks; + /// The attributes builder for local Ethereum payload. #[derive(Debug)] #[non_exhaustive] @@ -48,9 +51,26 @@ where impl PayloadAttributesBuilder for LocalPayloadAttributesBuilder where - ChainSpec: Send + Sync + EthereumHardforks + 'static, + ChainSpec: Send + Sync + EthereumHardforks + OpHardforks + 'static, { fn build(&self, timestamp: u64) -> op_alloy_rpc_types_engine::OpPayloadAttributes { + use alloy_primitives::B64; + + let eip_1559_params = if self.chain_spec.is_holocene_active_at_timestamp(timestamp) || + self.chain_spec.is_jovian_active_at_timestamp(timestamp) + { + Some(B64::ZERO) + } else { + None + }; + + let min_base_fee = if self.chain_spec.is_jovian_active_at_timestamp(timestamp) { + + Some(1_000_000_000u64) + } else { + None + }; + op_alloy_rpc_types_engine::OpPayloadAttributes { payload_attributes: self.build(timestamp), // Add dummy system transaction @@ -59,9 +79,9 @@ where .into(), ]), no_tx_pool: None, - gas_limit: None, - eip_1559_params: None, - min_base_fee: None, + gas_limit: Some(30_000_000), + eip_1559_params, + min_base_fee, } } } From d03b2ee3a424f9937d5505c4bb5de68c04f4fb35 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Thu, 27 Nov 2025 15:25:19 +0800 Subject: [PATCH 02/36] add test_state_root_calculation_with_real_provider --- Cargo.lock | 5 + Cargo.toml | 2 +- crates/engine/local/src/miner.rs | 2 +- crates/engine/tree/Cargo.toml | 6 +- crates/engine/tree/src/tree/tests.rs | 261 ++++++++++++++++++++++++++- crates/evm/evm/src/execute.rs | 30 ++- 6 files changed, 297 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7ed4309e6c3..a1a18830652 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8425,6 +8425,8 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-rpc-types-engine", + "alloy-signer", + "alloy-signer-local", "assert_matches", "codspeed-criterion-compat", "crossbeam-channel", @@ -8457,6 +8459,7 @@ dependencies = [ "reth-exex-types", "reth-metrics", "reth-network-p2p", + "reth-node-core", "reth-node-ethereum", "reth-node-metrics", "reth-payload-builder", @@ -8469,10 +8472,12 @@ dependencies = [ "reth-stages", "reth-stages-api", "reth-static-file", + "reth-storage-api", "reth-tasks", "reth-testing-utils", "reth-tracing", "reth-trie", + "reth-trie-common", "reth-trie-parallel", "reth-trie-sparse", "reth-trie-sparse-parallel", diff --git a/Cargo.toml b/Cargo.toml index b45136f7615..71dcaf8b4ed 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -292,7 +292,7 @@ too_long_first_doc_paragraph = "allow" # Uncomment this section if you're using a debugger. [profile.dev] # https://davidlattimore.github.io/posts/2024/02/04/speeding-up-the-rust-edit-build-run-cycle.html -debug = "line-tables-only" +debug = "full" split-debuginfo = "unpacked" # Speed up tests. diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index d6298502fb5..aab19e04965 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -141,7 +141,7 @@ where /// Runs the [`LocalMiner`] in a loop, polling the miner and building payloads. pub async fn run(mut self) { - let mut fcu_interval = tokio::time::interval(Duration::from_secs(1)); + let mut fcu_interval = tokio::time::interval(Duration::from_secs(10000)); loop { tokio::select! { // Wait for the interval or the pool to receive a transaction diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index e7a3f4675d3..351e112857a 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -16,6 +16,7 @@ reth-chain-state.workspace = true reth-chainspec = { workspace = true, optional = true } reth-consensus.workspace = true reth-db.workspace = true +reth-node-core.workspace = true reth-engine-primitives.workspace = true reth-errors.workspace = true reth-execution-types.workspace = true @@ -92,9 +93,12 @@ reth-testing-utils.workspace = true reth-tracing.workspace = true reth-node-ethereum.workspace = true reth-e2e-test-utils.workspace = true - +alloy-signer-local.workspace = true +reth-storage-api.workspace = true +reth-trie-common.workspace = true # alloy revm-state.workspace = true +alloy-signer.workspace = true assert_matches.workspace = true criterion.workspace = true diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index 7fbae4cac5c..550ba88f517 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -19,14 +19,14 @@ use alloy_rpc_types_engine::{ }; use assert_matches::assert_matches; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; -use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; +use reth_chainspec::{ChainSpec, DEV, HOLESKY, MAINNET}; use reth_engine_primitives::{EngineApiValidator, ForkchoiceStatus, NoopInvalidBlockHook}; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_ethereum_primitives::{Block, EthPrimitives}; +use reth_ethereum_primitives::{Block, BlockBody, EthPrimitives}; use reth_evm_ethereum::MockEvmConfig; use reth_primitives_traits::Block as _; -use reth_provider::{test_utils::MockEthProvider, ExecutionOutcome}; +use reth_provider::{test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, ProviderFactory, providers::BlockchainProvider, LatestStateProviderRef}; use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::{ collections::BTreeMap, @@ -37,6 +37,16 @@ use std::{ }, }; use tokio::sync::oneshot; +use reth_chain_state::ExecutedBlock; +use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; +use revm::database::BundleState; +use alloy_eips::eip7685::Requests; +use alloy_eips::eip1559::INITIAL_BASE_FEE; // Import from alloy_eips instead +use alloy_signer::SignerSync; +use reth_execution_types::ExecutionOutcome; +// Add this import +use reth_db_common::init::init_genesis; +use reth_provider::test_utils::MockEthProvider; /// Mock engine validator for tests #[derive(Debug, Clone)] @@ -1991,3 +2001,248 @@ mod forkchoice_updated_tests { assert!(result.is_some(), "OpStack should handle canonical head"); } } + +#[test] +fn test_state_root_calculation_with_real_provider() { + reth_tracing::init_test_tracing(); + use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork}; + use reth_chainspec::EthChainSpec; + let mut chain_spec = Arc::try_unwrap(DEV.clone()) + .unwrap_or_else(|arc| (*arc).clone()); + chain_spec.hardforks.remove(&EthereumHardfork::Cancun); + chain_spec.hardforks.remove(&EthereumHardfork::Shanghai); + chain_spec.hardforks.remove(&EthereumHardfork::Prague); + + let chain_spec = Arc::new(chain_spec); + + // let chain_spec = Arc::new(chain_spec); + let genesis_hash = chain_spec.genesis_hash(); + + // Create a real provider factory with database + let provider_factory = create_test_provider_factory_with_chain_spec(Arc::clone(&chain_spec)); + + // Initialize genesis in the database + init_genesis(&provider_factory).expect("Failed to initialize genesis"); + + // Create BlockchainProvider from the factory + let provider = BlockchainProvider::new(provider_factory.clone()) + .expect("Failed to create BlockchainProvider"); + let genesis_block = provider.block(alloy_eips::HashOrNumber::Number(0)) + .expect("Failed to query genesis block"); + + assert!(genesis_block.is_some(), "Genesis block should exist"); + let genesis = genesis_block.unwrap(); + + // Seal the block to get its hash + let sealed_genesis = genesis.seal_slow(); + let block_hash = sealed_genesis.hash(); + + // Assert that the genesis block hash matches the expected genesis hash + assert_eq!( + block_hash, + genesis_hash, + "Genesis block hash should match chain spec genesis hash" + ); + + let consensus = Arc::new(EthBeaconConsensus::new(Arc::clone(&chain_spec))); + + let payload_validator = MockEngineValidator; + + let (from_tree_tx, from_tree_rx) = unbounded_channel(); + + let genesis_header = chain_spec.genesis_header().clone(); + let sealed_genesis_header = SealedHeader::seal_slow(genesis_header); + let engine_api_tree_state = + EngineApiTreeState::new(10, 10, sealed_genesis_header.num_hash(), EngineApiKind::Ethereum); + let canonical_in_memory_state = CanonicalInMemoryState::with_head( + sealed_genesis_header.clone(), + None, + None, + ); + + // Set up persistence + let (action_tx, _action_rx) = channel(); + let persistence_handle = PersistenceHandle::new(action_tx); + + // Set up payload builder + let (to_payload_service, _payload_command_rx) = unbounded_channel(); + let payload_builder = PayloadBuilderHandle::new(to_payload_service); + + // Use real EVM config (not mock) for actual execution + use reth_node_ethereum::EthEvmConfig; + let evm_config = EthEvmConfig::new(chain_spec.clone()); + + // Create engine validator + let engine_validator = BasicEngineValidator::new( + provider.clone(), + consensus.clone(), + evm_config.clone(), + payload_validator, + TreeConfig::default(), + Box::new(NoopInvalidBlockHook::default()), + ); + + // Create tree handler + let mut tree = EngineApiTreeHandler::new( + provider.clone(), + consensus, + engine_validator, + from_tree_tx, + engine_api_tree_state, + canonical_in_memory_state, + persistence_handle, + PersistenceState::default(), + payload_builder, + TreeConfig::default() + .with_legacy_state_root(false) + .with_has_enough_parallelism(true), + EngineApiKind::Ethereum, + evm_config, + ); + + use reth_node_core::args::DevArgs; + use alloy_signer_local::{coins_bip39::English, MnemonicBuilder, PrivateKeySigner}; + use reth_ethereum_primitives::{Transaction, TransactionSigned, Block, BlockBody}; + use alloy_consensus::{SignableTransaction, TxEip1559}; + use reth_chainspec::MIN_TRANSACTION_GAS; + use alloy_primitives::{Address, U256}; + use alloy_consensus::proofs::calculate_transaction_root; + + let dev_mnemonic = DevArgs::default().dev_mnemonic; + let sender_pk: PrivateKeySigner = MnemonicBuilder::::default() + .phrase(dev_mnemonic) + .index(0) + .expect("invalid derivation path") + .build() + .expect("failed to build signer from mnemonic"); + let sender_address = sender_pk.address(); + + let tx = Transaction::Eip1559(TxEip1559 { + chain_id: chain_spec.chain.id(), + nonce: 0, + gas_limit: MIN_TRANSACTION_GAS, + to: Address::random().into(), + max_fee_per_gas: INITIAL_BASE_FEE as u128, + max_priority_fee_per_gas: 1, + value: U256::from(10), + input: Default::default(), + access_list: Default::default(), + }); + + // Sign the transaction + let signature_hash = tx.signature_hash(); + let signature = sender_pk.sign_hash_sync(&signature_hash).unwrap(); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); + + // Create block 1 header + let mut block1_header = chain_spec.genesis_header().clone(); + block1_header.number = 1; + block1_header.parent_hash = genesis_hash; + block1_header.timestamp = block1_header.timestamp + 12; // 12 seconds later + block1_header.gas_limit = chain_spec.genesis_header().gas_limit; + + let genesis_header = chain_spec.genesis_header(); + let base_fee = chain_spec + .next_block_base_fee(genesis_header, block1_header.timestamp) + .expect("Failed to calculate base fee"); + block1_header.base_fee_per_gas = Some(base_fee); + + // Calculate transactions root + let transactions = vec![signed_tx]; + block1_header.transactions_root = calculate_transaction_root(&transactions); + + use reth_primitives_traits::proofs::calculate_receipt_root; + + // Create a temporary block to execute and get receipts + let temp_block = SealedBlock::::from_sealed_parts( + SealedHeader::seal_slow(block1_header.clone()), + BlockBody { + transactions: transactions.clone(), + ommers: Vec::new(), + withdrawals: None, + }, + ); + + // Recover senders for the block + let recovered_temp_block = temp_block.try_recover() + .expect("Failed to recover block"); + + + let db_provider = provider_factory.provider() + .expect("Failed to get database provider"); + let state_db = StateProviderDatabase::new(LatestStateProviderRef::new(&db_provider)); + let evm_config = EthEvmConfig::ethereum(chain_spec.clone()); + + use reth_evm::execute::Executor; + let execution_output = evm_config + .batch_executor(state_db) + .execute(&recovered_temp_block) + .expect("Failed to execute block"); + + // Calculate receipts root + use alloy_consensus::TxReceipt; + let receipts_with_bloom: Vec<_> = execution_output.result.receipts + .iter() + .map(|r| r.with_bloom_ref()) + .collect(); + let receipts_root = calculate_receipt_root(&receipts_with_bloom); + + block1_header.receipts_root =receipts_root; + block1_header.gas_used = execution_output.gas_used; + + + use reth_storage_api::StateRootProvider; + // Get hashed post state from bundle state + let hashed_state = HashedPostState::from_bundle_state::( + execution_output.state.state() + ); + + // Calculate state root using the same state provider + let state_provider_for_root = LatestStateProviderRef::new(&db_provider); + let (state_root, _trie_updates) = state_provider_for_root + .state_root_with_updates(hashed_state) + .expect("Failed to calculate state root"); + block1_header.state_root = state_root; + + // Seal the header + let sealed_block1_header = SealedHeader::seal_slow(block1_header); + let block1_hash = sealed_block1_header.hash(); + + // Create block 1 as a SealedBlock + use reth_primitives_traits::SealedBlock; + let sealed_block1 = SealedBlock::::from_sealed_parts( + sealed_block1_header, + BlockBody { + transactions, + ommers: Vec::new(), + withdrawals: None, + }, + ); + + // Create execution payload + let block1 = sealed_block1.into_block(); + let payload1 = ExecutionPayloadV1::from_block_unchecked(block1_hash, &block1); + + // Send newPayload for block 1 + let outcome = tree + .on_new_payload(ExecutionData { + payload: payload1.into(), + sidecar: ExecutionPayloadSidecar::none(), + }) + .expect("Failed to process new payload"); + + // Verify the outcome + assert!( + outcome.outcome.is_valid() || outcome.outcome.is_syncing(), + "Block 1 should be valid or syncing, got: {:?}", + outcome.outcome.status + ); + // + // // Verify state root was calculated by checking if the block was inserted + // // The state root calculation happens during block execution/validation + // // If the block is valid, it means state root was calculated correctly + // if outcome.outcome.is_valid() { + // // Block was successfully validated, which means state root calculation succeeded + // println!("Block 1 validated successfully - state root calculation completed"); + // } +} diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index fca8f6241d5..f2dec426f2e 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -515,9 +515,33 @@ where // calculate the state root let hashed_state = state.hashed_post_state(&db.bundle_state); - let (state_root, trie_updates) = state - .state_root_with_updates(hashed_state.clone()) - .map_err(BlockExecutionError::other)?; + use std::any::type_name_of_val; + use std::any::{Any, TypeId}; +// use reth_provider::{ +// HistoricalStateProviderRef, +// LatestStateProviderRef, +// providers::state::overlay::OverlayStateProvider, +// }; + + let type_name = type_name_of_val(&state); + println!("State type: {}", type_name); + // let type_id = state.type_id(); + // if TypeId::of::>() == type_id { + // println!("It's LatestStateProviderRef"); + // } else if TypeId::of::>() == type_id { + // println!("It's HistoricalStateProviderRef"); + // } else if TypeId::of::>() == type_id { + // println!("It's OverlayStateProvider"); + // } else if TypeId::of::>() == type_id { + // println!("It's MemoryOverlayStateProvider"); + // }else if TypeId::of::>() == type_id { + // println!("It's CachedStateProvider"); + // } else { + // println!("Unknown type: {:?}", type_id); + // } + let pr = state.state_root_with_updates(hashed_state.clone()); + let (state_root, trie_updates) = + pr.map_err(BlockExecutionError::other)?; let (transactions, senders) = self.transactions.into_iter().map(|tx| tx.into_parts()).unzip(); From 3b65710e037ce41d85aa29fa265e4f1c4e171117 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 1 Dec 2025 13:58:14 +0800 Subject: [PATCH 03/36] fix trie db test --- Cargo.lock | 133 ++++++++++-- Cargo.toml | 2 + crates/storage/db-common/Cargo.toml | 4 + crates/storage/db-common/src/init.rs | 31 ++- crates/storage/db-common/src/init_triedb.rs | 49 +++++ crates/storage/db-common/src/lib.rs | 3 + crates/trie/db/Cargo.toml | 6 +- crates/trie/db/src/lib.rs | 3 + crates/trie/db/src/tests.rs | 129 +++++++++++ crates/trie/trie/Cargo.toml | 6 + crates/trie/trie/src/lib.rs | 2 + crates/trie/trie/src/trie_ext.rs | 227 ++++++++++++++++++++ 12 files changed, 578 insertions(+), 17 deletions(-) create mode 100644 crates/storage/db-common/src/init_triedb.rs create mode 100644 crates/trie/db/src/tests.rs create mode 100644 crates/trie/trie/src/trie_ext.rs diff --git a/Cargo.lock b/Cargo.lock index a1a18830652..63d4e855097 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -119,7 +119,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-serde", - "alloy-trie", + "alloy-trie 0.9.1", "alloy-tx-macros", "arbitrary", "auto_impl", @@ -291,7 +291,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-serde", - "alloy-trie", + "alloy-trie 0.9.1", "borsh", "serde", "serde_with", @@ -885,6 +885,26 @@ dependencies = [ "ws_stream_wasm", ] +[[package]] +name = "alloy-trie" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "983d99aa81f586cef9dae38443245e585840fcf0fc58b09aee0b1f27aed1d500" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "arbitrary", + "arrayvec", + "derive_arbitrary", + "derive_more", + "nybbles 0.3.4", + "proptest", + "proptest-derive 0.5.1", + "serde", + "smallvec", + "tracing", +] + [[package]] name = "alloy-trie" version = "0.9.1" @@ -897,7 +917,7 @@ dependencies = [ "arrayvec", "derive_arbitrary", "derive_more", - "nybbles", + "nybbles 0.4.6", "proptest", "proptest-derive 0.5.1", "serde", @@ -4232,6 +4252,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "gcc" version = "0.3.55" @@ -6237,6 +6266,20 @@ dependencies = [ "libc", ] +[[package]] +name = "nybbles" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +dependencies = [ + "alloy-rlp", + "arbitrary", + "const-hex", + "proptest", + "serde", + "smallvec", +] + [[package]] name = "nybbles" version = "0.4.6" @@ -7502,6 +7545,15 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + [[package]] name = "reqwest" version = "0.11.27" @@ -7788,7 +7840,7 @@ dependencies = [ "alloy-genesis", "alloy-primitives", "alloy-rlp", - "alloy-trie", + "alloy-trie 0.9.1", "auto_impl", "derive_more", "reth-ethereum-forks", @@ -7930,7 +7982,7 @@ dependencies = [ "alloy-eips", "alloy-genesis", "alloy-primitives", - "alloy-trie", + "alloy-trie 0.9.1", "arbitrary", "bytes", "modular-bitfield", @@ -8890,7 +8942,7 @@ dependencies = [ "alloy-evm", "alloy-primitives", "alloy-rlp", - "nybbles", + "nybbles 0.4.6", "reth-storage-errors", "thiserror 2.0.17", ] @@ -9695,7 +9747,7 @@ dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", - "alloy-trie", + "alloy-trie 0.9.1", "op-alloy-consensus", "reth-chainspec", "reth-consensus", @@ -10141,7 +10193,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-rpc-types-eth", - "alloy-trie", + "alloy-trie 0.9.1", "arbitrary", "auto_impl", "bincode 1.3.3", @@ -10844,7 +10896,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", - "alloy-trie", + "alloy-trie 0.9.1", "itertools 0.14.0", "k256", "reth-chainspec", @@ -11094,28 +11146,34 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", - "alloy-trie", + "alloy-trie 0.9.1", "assert_matches", "auto_impl", "codspeed-criterion-compat", "itertools 0.14.0", "metrics", + "nybbles 0.3.4", + "nybbles 0.4.6", "parking_lot", "pretty_assertions", "proptest", "proptest-arbitrary-interop", + "reth-chainspec", "reth-ethereum-primitives", "reth-execution-errors", "reth-metrics", "reth-primitives-traits", + "reth-provider", "reth-stages-types", "reth-storage-errors", "reth-tracing", "reth-trie-common", + "reth-trie-db", "reth-trie-sparse", "revm-database", "revm-state", "tracing", + "triedb", "triehash", ] @@ -11129,7 +11187,7 @@ dependencies = [ "alloy-rlp", "alloy-rpc-types-eth", "alloy-serde", - "alloy-trie", + "alloy-trie 0.9.1", "arbitrary", "arrayvec", "bincode 1.3.3", @@ -11138,7 +11196,7 @@ dependencies = [ "derive_more", "hash-db", "itertools 0.14.0", - "nybbles", + "nybbles 0.4.6", "plain_hasher", "proptest", "proptest-arbitrary-interop", @@ -11167,13 +11225,17 @@ dependencies = [ "reth-execution-errors", "reth-primitives-traits", "reth-provider", + "reth-storage-api", "reth-trie", "reth-trie-common", + "reth-trie-db", "revm", "revm-database", "serde_json", "similar-asserts", + "tempdir", "tracing", + "triedb", "triehash", ] @@ -11213,7 +11275,7 @@ version = "1.9.2" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-trie", + "alloy-trie 0.9.1", "arbitrary", "assert_matches", "auto_impl", @@ -11246,7 +11308,7 @@ version = "1.9.2" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-trie", + "alloy-trie 0.9.1", "arbitrary", "assert_matches", "itertools 0.14.0", @@ -11940,6 +12002,17 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sealed" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22f968c5ea23d555e670b449c1c5e7b2fc399fdaec1d304a17cd48e288abc107" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + [[package]] name = "sec1" version = "0.7.3" @@ -12701,6 +12774,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "tempdir" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" +dependencies = [ + "rand 0.4.6", + "remove_dir_all", +] + [[package]] name = "tempfile" version = "3.23.0" @@ -13442,6 +13525,28 @@ dependencies = [ "syn 2.0.110", ] +[[package]] +name = "triedb" +version = "0.1.0" +source = "git+https://github.com/base/triedb.git#ee4e382f1a1aa0c773d5707156d85e170bfab488" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-trie 0.8.1", + "arrayvec", + "fxhash", + "memmap2", + "metrics", + "metrics-derive", + "parking_lot", + "proptest", + "proptest-derive 0.6.0", + "rayon", + "sealed", + "static_assertions", + "zerocopy", +] + [[package]] name = "triehash" version = "0.8.4" diff --git a/Cargo.toml b/Cargo.toml index 71dcaf8b4ed..4979c8a005e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -740,6 +740,8 @@ xlayer-db = { path = "crates/xlayer/db" } xlayer-rpc = { path = "crates/xlayer/rpc" } xlayer-exex = { path = "crates/xlayer/exex" } +triedb ={git="https://github.com/base/triedb.git"} + # [patch.crates-io] # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } # alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index a4122ebf5c0..4aebb2fd724 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -41,6 +41,10 @@ serde_json.workspace = true # tracing tracing.workspace = true +[features] +default = [] +trie-db-ext = [] + [dev-dependencies] reth-db = { workspace = true, features = ["mdbx"] } reth-provider = { workspace = true, features = ["test-utils"] } diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 810acf4d5d4..64377748c99 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -30,6 +30,9 @@ use serde::{Deserialize, Serialize}; use std::io::BufRead; use tracing::{debug, error, info, trace}; +#[cfg(feature = "trie-db-ext")] +use crate::init_triedb::calculate_state_root_with_triedb; + /// Default soft limit for number of bytes to read from state dump file, before inserting into /// database. /// @@ -153,7 +156,24 @@ where insert_genesis_state(&provider_rw, alloc.iter())?; // compute state root to populate trie tables - compute_state_root(&provider_rw, None)?; + #[cfg(feature = "trie-db-ext")] + { + let trie_db_path = std::env::var("RETH_TRIEDB_PATH") + .map(PathBuf::from) + .unwrap_or_else(|_| { + // Default: assume we're in the database directory context + // This creates triedb as a sibling to the main db directory + // Adjust this based on your actual directory structure + PathBuf::from("../triedb") + }); + let trie_ext_db = TrieExtDatabase::new(file_path); + let trie_db_path = std::env::temp_dir().join("reth_triedb_init"); + calculate_state_root_with_triedb(&provider_rw, trie_db_path, None)?; + } + #[cfg(not(feature = "trie-db-ext"))] + { + compute_state_root(&provider_rw, None)?; + } // set stage checkpoint to genesis block number for all stages let checkpoint = StageCheckpoint { block_number: genesis_block_number, ..Default::default() }; @@ -835,4 +855,13 @@ mod tests { )], ); } + + #[test] + fn init_genesis_with_triedb() { + let genesis_hash = + init_genesis(&create_test_provider_factory_with_chain_spec(MAINNET.clone())).unwrap(); + + // actual, expected + assert_eq!(genesis_hash, MAINNET_GENESIS_HASH); + } } diff --git a/crates/storage/db-common/src/init_triedb.rs b/crates/storage/db-common/src/init_triedb.rs new file mode 100644 index 00000000000..944288fd6a3 --- /dev/null +++ b/crates/storage/db-common/src/init_triedb.rs @@ -0,0 +1,49 @@ +use reth_provider::{ + DBProvider, ProviderError, TrieWriter, +}; +use reth_trie::{ + prefix_set::TriePrefixSets, + IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress, +}; +use reth_trie_db::DatabaseHashedCursorFactory; +use reth_trie::{StateRootTrieDb, TrieExtDatabase}; +use alloy_primitives::B256; +use tracing::{info, trace}; +use std::path::Path; + +/// Calculate state root using TrieDB and commit trie updates. +/// +/// This function: +/// 1. Uses `StateRootTrieDb` with `DatabaseHashedCursorFactory` to read from the database +/// 2. Calculates state root using TrieDB +/// 3. Returns the computed state root +/// +/// # Arguments +/// +/// * `provider` - Database provider that implements `DBProvider` and `TrieWriter` +/// * `trie_db_path` - Path where the TrieDB database should be created +/// * `prefix_sets` - Optional prefix sets for incremental state root calculation (currently unused) +/// +/// # Returns +/// +/// * `Ok(B256)` - The computed state root hash +/// * `Err(ProviderError)` - If state root calculation fails +pub fn calculate_state_root_with_triedb( + provider: &Provider, + trie_db_path: impl AsRef, + _prefix_sets: Option, +) -> Result +where + Provider: DBProvider + TrieWriter, +{ + trace!(target: "reth::state_root", "Calculating state root using TrieDB"); + let tx = provider.tx_ref(); + let hashed_cursor_factory = DatabaseHashedCursorFactory::new(tx); + let trie_ext_db = TrieExtDatabase::new(trie_db_path); + let state_root_ext = StateRootTrieDb::new(hashed_cursor_factory, trie_ext_db); + let ret = state_root_ext.calculate_commit(); + match ret { + Ok(root) => Ok(root), + Err(error) => Err(ProviderError::TrieWitnessError("".to_string())), + } +} diff --git a/crates/storage/db-common/src/lib.rs b/crates/storage/db-common/src/lib.rs index 22e49abfb05..0a190896607 100644 --- a/crates/storage/db-common/src/lib.rs +++ b/crates/storage/db-common/src/lib.rs @@ -11,4 +11,7 @@ pub mod init; mod db_tool; +#[cfg(feature = "trie-db-ext")] +mod init_triedb; + pub use db_tool::*; diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index 09ccd301192..946c7de9b62 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -32,7 +32,8 @@ reth-db = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } reth-trie = { workspace = true, features = ["test-utils"] } - +reth-trie-db.workspace = true +reth-storage-api.workspace = true alloy-consensus.workspace = true alloy-rlp.workspace = true revm.workspace = true @@ -40,7 +41,8 @@ revm-database.workspace = true # trie triehash.workspace = true - +triedb.workspace = true +tempdir = "0.3.7" # misc proptest.workspace = true proptest-arbitrary-interop.workspace = true diff --git a/crates/trie/db/src/lib.rs b/crates/trie/db/src/lib.rs index 5417e5bd1e5..85769201e30 100644 --- a/crates/trie/db/src/lib.rs +++ b/crates/trie/db/src/lib.rs @@ -10,6 +10,9 @@ mod storage; mod trie_cursor; mod witness; +#[cfg(test)] +mod tests; + pub use hashed_cursor::{ DatabaseHashedAccountCursor, DatabaseHashedCursorFactory, DatabaseHashedStorageCursor, }; diff --git a/crates/trie/db/src/tests.rs b/crates/trie/db/src/tests.rs new file mode 100644 index 00000000000..55455f2ef69 --- /dev/null +++ b/crates/trie/db/src/tests.rs @@ -0,0 +1,129 @@ + +#[cfg(test)] +mod tests { + use tempdir::TempDir; + use reth_provider::{ + test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, + ProviderFactory, HashingWriter, DBProvider, + }; + use reth_chainspec::MAINNET; + use reth_provider::DatabaseProviderFactory; + use crate::DatabaseHashedCursorFactory; + use reth_trie::{hashed_cursor::{HashedCursorFactory, HashedCursor}, StateRootTrieDb, TrieExtDatabase}; + use alloy_primitives::{Address, U256, keccak256, B256}; + use tracing::{info, trace}; + use reth_primitives_traits::{Account, StorageEntry}; + use triedb::{path::AddressPath}; + use reth_trie_db::{DatabaseStateRoot}; + use reth_trie::{ + prefix_set::{TriePrefixSets, TriePrefixSetsMut}, + IntermediateStateRootState, Nibbles, StateRoot as StateRootComputer, StateRootProgress, + }; + use reth_storage_api::TrieWriter; + + + #[test] + pub fn test_hashed_cursor_iteration() { + let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + + let mut provider_rw = provider_factory.database_provider_rw().unwrap(); + + let dummy_accounts: Vec<(Address, Account)> = vec![ + ( + Address::with_last_byte(1), + Account { + nonce: 10, + balance: U256::from(1000), + bytecode_hash: None, + }, + ), + ( + Address::with_last_byte(2), + Account { + nonce: 20, + balance: U256::from(2000), + bytecode_hash: None, + }, + ), + ( + Address::with_last_byte(3), + Account { + nonce: 30, + balance: U256::from(3000), + bytecode_hash: None, + }, + ), + ]; + + let accounts_for_hashing = dummy_accounts + .iter() + .map(|(address, account)| (*address, Some(*account))); + + provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); + + // Generate two random storage entries for each account + let storage_entries: Vec<(Address, Vec)> = dummy_accounts + .iter() + .map(|(address, _)| { + // Generate two random storage entries per account + // Using deterministic but varied keys based on address and index + let mut storage_vec = Vec::new(); + for i in 0..2 { + // Create a deterministic but unique storage key for each account and slot + let mut key_bytes = [0u8; 32]; + key_bytes[0..20].copy_from_slice(address.as_slice()); + key_bytes[20] = i as u8; + key_bytes[21] = 0xFF; + let storage_key = B256::from(key_bytes); + + // Generate a random value (using address and index for determinism) + let hash = keccak256([address.as_slice(), &[i as u8]].concat()); + let storage_value = U256::from_be_slice(hash.as_slice()); + + storage_vec.push(StorageEntry { + key: storage_key, + value: storage_value, + }); + } + (*address, storage_vec) + }) + .collect(); + + // Insert storage entries for hashing + provider_rw.insert_storage_for_hashing(storage_entries).unwrap(); + + provider_rw.commit().unwrap(); + + + let trie_db_ext_root = { + let provider_ro = provider_factory.database_provider_ro().unwrap(); + let tx = provider_ro.tx_ref(); + let hashed_cursor_factory = DatabaseHashedCursorFactory::new(tx); + let tmp_dir = TempDir::new("test_triedb").unwrap(); + let file_path = tmp_dir.path().join("test.db"); + let trie_ext_db = TrieExtDatabase::new(file_path); + let state_root_ext = StateRootTrieDb::new(hashed_cursor_factory, trie_ext_db); + let root = state_root_ext.calculate_commit().unwrap(); + root + }; + + let root = { + + let provider_rw = provider_factory.database_provider_rw().unwrap(); + let tx = provider_rw.tx_ref(); + let state_root = StateRootComputer::from_tx(tx); + let ret = state_root.root_with_progress().unwrap(); + match ret{ + StateRootProgress::Progress(state, _, updates) => { + let updated_len = provider_rw.write_trie_updates(updates).unwrap(); + unreachable!(); + } + StateRootProgress::Complete(root, _, updates) => { + let updated_len = provider_rw.write_trie_updates(updates).unwrap(); + root + } + } + }; + assert_eq!(trie_db_ext_root, root); + } +} \ No newline at end of file diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 403d187e46a..691697f025f 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -42,6 +42,8 @@ metrics = { workspace = true, optional = true } # `test-utils` feature triehash = { workspace = true, optional = true } +triedb.workspace = true +nybbles = "0.3.4" [dev-dependencies] # reth @@ -49,6 +51,10 @@ reth-ethereum-primitives = { workspace = true, features = ["arbitrary", "std"] } reth-primitives-traits = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } +reth-provider = { workspace = true, features = ["test-utils"] } +reth-trie-db.workspace = true +reth-chainspec.workspace = true +nybbles.workspace = true # revm revm-state.workspace = true diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index e53049b5872..2f7c681e674 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -66,3 +66,5 @@ pub mod mock; /// Verification of existing stored trie nodes against state data. pub mod verify; +mod trie_ext; +pub use trie_ext::{StateRootTrieDb, TrieExtDatabase}; \ No newline at end of file diff --git a/crates/trie/trie/src/trie_ext.rs b/crates/trie/trie/src/trie_ext.rs new file mode 100644 index 00000000000..cfbeb646907 --- /dev/null +++ b/crates/trie/trie/src/trie_ext.rs @@ -0,0 +1,227 @@ +use std::path::Path; +use alloy_primitives::B256; +use alloy_trie::{HashBuilder, EMPTY_ROOT_HASH}; +use tracing::{debug, trace}; +use reth_execution_errors::StateRootError; +use reth_trie_common::{prefix_set::TriePrefixSets}; +use crate::{IntermediateStateRootState, StateRoot, StateRootProgress, StorageRoot}; +use crate::hashed_cursor::{HashedCursor, HashedCursorFactory}; +use crate::node_iter::{TrieElement, TrieNodeIter}; +use crate::stats::TrieTracker; +use crate::trie::StateRootContext; +use crate::trie_cursor::TrieCursorFactory; +use crate::walker::TrieWalker; +use triedb::{Database as TrieDbDatabase, path::{AddressPath, StoragePath}, }; +use nybbles::Nibbles; +use triedb::account::Account as TrieDbAccount; +use alloy_consensus::constants::KECCAK_EMPTY; +#[derive(Debug)] +pub struct TrieExtDatabase { + pub inner: TrieDbDatabase, +} + +impl TrieExtDatabase { + pub fn new(db_path: impl AsRef) -> Self { + let db = TrieDbDatabase::create_new(db_path).unwrap(); + Self { + inner: db, + } + } +} + +/// `StateRoot` is used to compute the root node of a state trie. +#[derive(Debug)] +pub struct StateRootTrieDb { + /// The factory for hashed cursors. + pub hashed_cursor_factory: H, + pub db: TrieExtDatabase +} + +impl StateRootTrieDb { + /// Creates [`StateRootTrieDb`] with + pub fn new(hashed_cursor_factory: H, db: TrieExtDatabase) -> Self { + Self { + hashed_cursor_factory, + db + } + } +} +impl StateRootTrieDb +where + H: HashedCursorFactory + Clone, +{ + pub fn calculate_commit(self) -> Result { + trace!(target: "trie::state_root", "calculating state root"); + + let mut acct_cursor = self.hashed_cursor_factory.hashed_account_cursor()?; + + let mut tx = self.db.inner.begin_rw().unwrap(); + + // Start from the beginning by seeking to the first account (B256::ZERO) + let mut account_entry = acct_cursor.next().unwrap(); + while let Some((hashed_address, account)) = account_entry { + + let nibbles = Nibbles::unpack(hashed_address); + let address_path = AddressPath::new(nibbles); + + // Get storage cursor for this account first + let mut storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor(hashed_address)?; + + // Iterate through all storage entries for this account to compute storage root + // For now, we'll use EMPTY_ROOT_HASH if no storage entries exist + // TODO: Compute actual storage root from storage entries + let mut storage_entry = storage_cursor.seek(B256::ZERO)?; + let storage_root = if storage_entry.is_some() { + // If there are storage entries, we need to compute the storage root + // For now, use EMPTY_ROOT_HASH as placeholder + // In a full implementation, you'd build the storage trie and get its root + EMPTY_ROOT_HASH + } else { + EMPTY_ROOT_HASH + }; + + // Convert reth_primitives_traits::Account to triedb::account::Account + let triedb_account = TrieDbAccount { + nonce: account.nonce, + balance: account.balance, + code_hash: account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + storage_root, + }; + + tx.set_account(address_path.clone(), Some(triedb_account)).unwrap(); + + // Now set storage slots in TrieDB + while let Some((hashed_storage_key, storage_value)) = storage_entry { + let storage_path = StoragePath::for_address_path_and_slot_hash(address_path.clone(), Nibbles::unpack(hashed_storage_key)); + tx.set_storage_slot(storage_path, Some(storage_value)).unwrap(); + + storage_entry = storage_cursor.next()?; + } + + account_entry = acct_cursor.next()?; + } + + tx.commit().unwrap(); + Ok(self.db.inner.state_root()) + // Ok(EMPTY_ROOT_HASH) + } +} + +#[cfg(test)] +mod tests { + use tempdir::TempDir; + use super::{TrieExtDatabase}; + use crate::hashed_cursor::{HashedCursor, HashedCursorFactory}; + use reth_provider::{ + test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, + ProviderFactory, HashingWriter, DBProvider + }; + use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA}; + use reth_provider::DatabaseProviderFactory; + use reth_trie_db::DatabaseHashedCursorFactory; + use alloy_primitives::{Address, U256, keccak256, B256}; + use reth_primitives_traits::Account; + + #[test] + pub fn test_triedb() { + let tmp_dir = TempDir::new("test_triedb").unwrap(); + let file_path = tmp_dir.path().join("test.db"); + let trie_db = TrieExtDatabase::new(file_path); + + let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + + let mut provider_rw = provider_factory.database_provider_rw().unwrap(); + + // Generate dummy accounts + let dummy_accounts: Vec<(Address, Account)> = vec![ + ( + Address::with_last_byte(1), + Account { + nonce: 10, + balance: U256::from(1000), + bytecode_hash: None, + }, + ), + ( + Address::with_last_byte(2), + Account { + nonce: 20, + balance: U256::from(2000), + bytecode_hash: None, + }, + ), + ( + Address::with_last_byte(3), + Account { + nonce: 30, + balance: U256::from(3000), + bytecode_hash: None, + }, + ), + ]; + + // Insert accounts into the database + let accounts_for_hashing = dummy_accounts + .iter() + .map(|(address, account)| (*address, Some(*account))); + + provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); + + // Commit the transaction (this consumes provider_rw) + provider_rw.commit().unwrap(); + + // Get a new provider to read the committed data + let provider_rw = provider_factory.database_provider_rw().unwrap(); + let tx = provider_rw.tx_ref(); + let hashed_cursor_factory = DatabaseHashedCursorFactory::new(tx); + println!("hashed cursor factory: {:?}", hashed_cursor_factory); + let mut account_cursor = hashed_cursor_factory.hashed_account_cursor().unwrap(); + // + // // Start from the beginning (seek to B256::ZERO to get the first account) + // let mut account_entry = account_cursor.seek(B256::ZERO).unwrap(); + // + // let mut iterated_accounts = Vec::new(); + // + // // Iterate through all accounts + // while let Some((hashed_address, account)) = account_entry { + // iterated_accounts.push((hashed_address, account)); + // + // // Move to next account + // account_entry = account_cursor.next().unwrap(); + // } + // + // // Verify we got all the accounts we inserted + // assert_eq!(iterated_accounts.len(), dummy_accounts.len()); + // + // // Verify the accounts match (by checking hashed addresses) + // let inserted_hashed_addresses: Vec = dummy_accounts + // .iter() + // .map(|(address, _)| keccak256(address)) + // .collect(); + // + // let iterated_hashed_addresses: Vec = iterated_accounts + // .iter() + // .map(|(hashed_address, _)| *hashed_address) + // .collect(); + // + // // Sort both for comparison + // let mut inserted_sorted = inserted_hashed_addresses.clone(); + // inserted_sorted.sort(); + // let mut iterated_sorted = iterated_hashed_addresses.clone(); + // iterated_sorted.sort(); + // + // assert_eq!(inserted_sorted, iterated_sorted); + // + // // Verify account data matches + // for (hashed_address, account) in &iterated_accounts { + // let original_account = dummy_accounts + // .iter() + // .find(|(addr, _)| keccak256(addr) == *hashed_address) + // .unwrap(); + // + // assert_eq!(account.nonce, original_account.1.nonce); + // assert_eq!(account.balance, original_account.1.balance); + // assert_eq!(account.bytecode_hash, original_account.1.bytecode_hash); + // } + } +} \ No newline at end of file From cf20a56bae56236ce1eba96de83ef5c04610a3e6 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 1 Dec 2025 14:20:03 +0800 Subject: [PATCH 04/36] complete init db test --- Cargo.lock | 2 + crates/storage/db-common/Cargo.toml | 2 + crates/storage/db-common/src/init.rs | 8 +- crates/storage/db-common/src/init_triedb.rs | 116 ++++++++++++++++++ crates/trie/db/src/lib.rs | 3 - crates/trie/db/src/tests.rs | 129 -------------------- 6 files changed, 124 insertions(+), 136 deletions(-) delete mode 100644 crates/trie/db/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 63d4e855097..b25e9f433d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8162,10 +8162,12 @@ dependencies = [ "reth-provider", "reth-stages-types", "reth-static-file-types", + "reth-storage-api", "reth-trie", "reth-trie-db", "serde", "serde_json", + "tempdir", "thiserror 2.0.17", "tracing", ] diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 4aebb2fd724..100c8e33949 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -48,6 +48,8 @@ trie-db-ext = [] [dev-dependencies] reth-db = { workspace = true, features = ["mdbx"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-storage-api.workspace = true +tempdir = "0.3.7" [lints] workspace = true diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 64377748c99..80586bed05d 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -158,14 +158,14 @@ where // compute state root to populate trie tables #[cfg(feature = "trie-db-ext")] { + use std::path::PathBuf; + use reth_trie::{hashed_cursor::{HashedCursorFactory, HashedCursor}, StateRootTrieDb, TrieExtDatabase}; let trie_db_path = std::env::var("RETH_TRIEDB_PATH") .map(PathBuf::from) .unwrap_or_else(|_| { - // Default: assume we're in the database directory context - // This creates triedb as a sibling to the main db directory - // Adjust this based on your actual directory structure PathBuf::from("../triedb") }); + let file_path = trie_db_path.join("test.db"); let trie_ext_db = TrieExtDatabase::new(file_path); let trie_db_path = std::env::temp_dir().join("reth_triedb_init"); calculate_state_root_with_triedb(&provider_rw, trie_db_path, None)?; @@ -644,7 +644,7 @@ where /// Computes the state root (from scratch) based on the accounts and storages present in the /// database. -fn compute_state_root( +pub(crate) fn compute_state_root( provider: &Provider, prefix_sets: Option, ) -> Result diff --git a/crates/storage/db-common/src/init_triedb.rs b/crates/storage/db-common/src/init_triedb.rs index 944288fd6a3..d60afc4af02 100644 --- a/crates/storage/db-common/src/init_triedb.rs +++ b/crates/storage/db-common/src/init_triedb.rs @@ -47,3 +47,119 @@ where Err(error) => Err(ProviderError::TrieWitnessError("".to_string())), } } + +#[cfg(test)] +mod tests { + use super::*; + use tempdir::TempDir; + use reth_provider::{ + test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, + ProviderFactory, HashingWriter, DBProvider, + }; + use reth_chainspec::MAINNET; + use reth_provider::DatabaseProviderFactory; + use reth_trie_db::DatabaseHashedCursorFactory; + use reth_trie::{hashed_cursor::{HashedCursorFactory, HashedCursor}, StateRootTrieDb, TrieExtDatabase}; + use alloy_primitives::{Address, U256, keccak256, B256}; + use reth_primitives_traits::{Account, StorageEntry}; + use reth_trie::{ + prefix_set::{TriePrefixSets, TriePrefixSetsMut}, + IntermediateStateRootState, Nibbles, StateRoot as StateRootComputer, StateRootProgress, + }; + use reth_storage_api::TrieWriter; + use reth_trie_db::{DatabaseStateRoot}; + use crate::init::compute_state_root; + + #[test] + pub fn test_triedb_state_root() { + let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + + let mut provider_rw = provider_factory.database_provider_rw().unwrap(); + + let dummy_accounts: Vec<(Address, Account)> = vec![ + ( + Address::with_last_byte(1), + Account { + nonce: 10, + balance: U256::from(1000), + bytecode_hash: None, + }, + ), + ( + Address::with_last_byte(2), + Account { + nonce: 20, + balance: U256::from(2000), + bytecode_hash: None, + }, + ), + ( + Address::with_last_byte(3), + Account { + nonce: 30, + balance: U256::from(3000), + bytecode_hash: None, + }, + ), + ]; + + let accounts_for_hashing = dummy_accounts + .iter() + .map(|(address, account)| (*address, Some(*account))); + + provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); + + // Generate two random storage entries for each account + let storage_entries: Vec<(Address, Vec)> = dummy_accounts + .iter() + .map(|(address, _)| { + // Generate two random storage entries per account + // Using deterministic but varied keys based on address and index + let mut storage_vec = Vec::new(); + for i in 0..2 { + // Create a deterministic but unique storage key for each account and slot + let mut key_bytes = [0u8; 32]; + key_bytes[0..20].copy_from_slice(address.as_slice()); + key_bytes[20] = i as u8; + key_bytes[21] = 0xFF; + let storage_key = B256::from(key_bytes); + + // Generate a random value (using address and index for determinism) + let hash = keccak256([address.as_slice(), &[i as u8]].concat()); + let storage_value = U256::from_be_slice(hash.as_slice()); + + storage_vec.push(StorageEntry { + key: storage_key, + value: storage_value, + }); + } + (*address, storage_vec) + }) + .collect(); + + // Insert storage entries for hashing + provider_rw.insert_storage_for_hashing(storage_entries).unwrap(); + + provider_rw.commit().unwrap(); + + let origin_rot = { + let provider_rw = provider_factory.database_provider_rw().unwrap(); + let root = compute_state_root(&provider_rw, None).unwrap(); + root + }; + + let trie_db_ext_root = { + let provider_ro = provider_factory.database_provider_ro().unwrap(); + let tx = provider_ro.tx_ref(); + let hashed_cursor_factory = DatabaseHashedCursorFactory::new(tx); + let tmp_dir = TempDir::new("test_triedb").unwrap(); + let file_path = tmp_dir.path().join("test.db"); + let trie_ext_db = TrieExtDatabase::new(file_path); + let state_root_ext = StateRootTrieDb::new(hashed_cursor_factory, trie_ext_db); + let root = state_root_ext.calculate_commit().unwrap(); + root + }; + + assert_eq!(trie_db_ext_root, origin_rot); + } +} diff --git a/crates/trie/db/src/lib.rs b/crates/trie/db/src/lib.rs index 85769201e30..5417e5bd1e5 100644 --- a/crates/trie/db/src/lib.rs +++ b/crates/trie/db/src/lib.rs @@ -10,9 +10,6 @@ mod storage; mod trie_cursor; mod witness; -#[cfg(test)] -mod tests; - pub use hashed_cursor::{ DatabaseHashedAccountCursor, DatabaseHashedCursorFactory, DatabaseHashedStorageCursor, }; diff --git a/crates/trie/db/src/tests.rs b/crates/trie/db/src/tests.rs deleted file mode 100644 index 55455f2ef69..00000000000 --- a/crates/trie/db/src/tests.rs +++ /dev/null @@ -1,129 +0,0 @@ - -#[cfg(test)] -mod tests { - use tempdir::TempDir; - use reth_provider::{ - test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, - ProviderFactory, HashingWriter, DBProvider, - }; - use reth_chainspec::MAINNET; - use reth_provider::DatabaseProviderFactory; - use crate::DatabaseHashedCursorFactory; - use reth_trie::{hashed_cursor::{HashedCursorFactory, HashedCursor}, StateRootTrieDb, TrieExtDatabase}; - use alloy_primitives::{Address, U256, keccak256, B256}; - use tracing::{info, trace}; - use reth_primitives_traits::{Account, StorageEntry}; - use triedb::{path::AddressPath}; - use reth_trie_db::{DatabaseStateRoot}; - use reth_trie::{ - prefix_set::{TriePrefixSets, TriePrefixSetsMut}, - IntermediateStateRootState, Nibbles, StateRoot as StateRootComputer, StateRootProgress, - }; - use reth_storage_api::TrieWriter; - - - #[test] - pub fn test_hashed_cursor_iteration() { - let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); - - let mut provider_rw = provider_factory.database_provider_rw().unwrap(); - - let dummy_accounts: Vec<(Address, Account)> = vec![ - ( - Address::with_last_byte(1), - Account { - nonce: 10, - balance: U256::from(1000), - bytecode_hash: None, - }, - ), - ( - Address::with_last_byte(2), - Account { - nonce: 20, - balance: U256::from(2000), - bytecode_hash: None, - }, - ), - ( - Address::with_last_byte(3), - Account { - nonce: 30, - balance: U256::from(3000), - bytecode_hash: None, - }, - ), - ]; - - let accounts_for_hashing = dummy_accounts - .iter() - .map(|(address, account)| (*address, Some(*account))); - - provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); - - // Generate two random storage entries for each account - let storage_entries: Vec<(Address, Vec)> = dummy_accounts - .iter() - .map(|(address, _)| { - // Generate two random storage entries per account - // Using deterministic but varied keys based on address and index - let mut storage_vec = Vec::new(); - for i in 0..2 { - // Create a deterministic but unique storage key for each account and slot - let mut key_bytes = [0u8; 32]; - key_bytes[0..20].copy_from_slice(address.as_slice()); - key_bytes[20] = i as u8; - key_bytes[21] = 0xFF; - let storage_key = B256::from(key_bytes); - - // Generate a random value (using address and index for determinism) - let hash = keccak256([address.as_slice(), &[i as u8]].concat()); - let storage_value = U256::from_be_slice(hash.as_slice()); - - storage_vec.push(StorageEntry { - key: storage_key, - value: storage_value, - }); - } - (*address, storage_vec) - }) - .collect(); - - // Insert storage entries for hashing - provider_rw.insert_storage_for_hashing(storage_entries).unwrap(); - - provider_rw.commit().unwrap(); - - - let trie_db_ext_root = { - let provider_ro = provider_factory.database_provider_ro().unwrap(); - let tx = provider_ro.tx_ref(); - let hashed_cursor_factory = DatabaseHashedCursorFactory::new(tx); - let tmp_dir = TempDir::new("test_triedb").unwrap(); - let file_path = tmp_dir.path().join("test.db"); - let trie_ext_db = TrieExtDatabase::new(file_path); - let state_root_ext = StateRootTrieDb::new(hashed_cursor_factory, trie_ext_db); - let root = state_root_ext.calculate_commit().unwrap(); - root - }; - - let root = { - - let provider_rw = provider_factory.database_provider_rw().unwrap(); - let tx = provider_rw.tx_ref(); - let state_root = StateRootComputer::from_tx(tx); - let ret = state_root.root_with_progress().unwrap(); - match ret{ - StateRootProgress::Progress(state, _, updates) => { - let updated_len = provider_rw.write_trie_updates(updates).unwrap(); - unreachable!(); - } - StateRootProgress::Complete(root, _, updates) => { - let updated_len = provider_rw.write_trie_updates(updates).unwrap(); - root - } - } - }; - assert_eq!(trie_db_ext_root, root); - } -} \ No newline at end of file From 7ad04ac7d5872d4f156d553ae0ee13b6542b6d77 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 1 Dec 2025 15:19:48 +0800 Subject: [PATCH 05/36] complete benchmark --- Cargo.lock | 2 + crates/storage/db-common/Cargo.toml | 7 + crates/storage/db-common/README.md | 11 ++ .../benches/state_root_comparison.rs | 127 ++++++++++++++++++ crates/storage/db-common/src/init.rs | 2 +- crates/storage/db-common/src/init_triedb.rs | 127 ++++++++---------- crates/storage/db-common/src/lib.rs | 2 +- crates/trie/trie/src/trie_ext.rs | 1 + 8 files changed, 209 insertions(+), 70 deletions(-) create mode 100644 crates/storage/db-common/README.md create mode 100644 crates/storage/db-common/benches/state_root_comparison.rs diff --git a/Cargo.lock b/Cargo.lock index b25e9f433d7..984bd9c0cea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8148,7 +8148,9 @@ dependencies = [ "alloy-genesis", "alloy-primitives", "boyer-moore-magiclen", + "codspeed-criterion-compat", "eyre", + "rand 0.8.5", "reth-chainspec", "reth-codecs", "reth-config", diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 100c8e33949..b8ce0ad0840 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -50,6 +50,13 @@ reth-db = { workspace = true, features = ["mdbx"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-storage-api.workspace = true tempdir = "0.3.7" +rand = "0.8" +criterion = { workspace = true } + +[[bench]] +name = "state_root_comparison" +harness = false +required-features = ["trie-db-ext"] [lints] workspace = true diff --git a/crates/storage/db-common/README.md b/crates/storage/db-common/README.md new file mode 100644 index 00000000000..de58ef685f3 --- /dev/null +++ b/crates/storage/db-common/README.md @@ -0,0 +1,11 @@ + +## test + +```aiignore +cargo test -p reth-db-common --features trie-db-ext test_triedb_state_root -- --nocapture +``` + +## bench +```aiignore +cargo bench -p reth-db-common --features trie-db-ext +``` \ No newline at end of file diff --git a/crates/storage/db-common/benches/state_root_comparison.rs b/crates/storage/db-common/benches/state_root_comparison.rs new file mode 100644 index 00000000000..e854ed4afe0 --- /dev/null +++ b/crates/storage/db-common/benches/state_root_comparison.rs @@ -0,0 +1,127 @@ +#![allow(missing_docs, unreachable_pub)] +use alloy_primitives::{Address, B256, U256}; +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use rand::Rng; +use reth_chainspec::MAINNET; +use reth_primitives_traits::{Account, StorageEntry}; +use reth_provider::{ + test_utils::create_test_provider_factory_with_chain_spec, + DatabaseProviderFactory, DBProvider, HashingWriter, ProviderFactory, TrieWriter, +}; +use reth_storage_api::TrieWriter as _; +use reth_trie::StateRoot as StateRootComputer; +use reth_trie_db::DatabaseHashedCursorFactory; +use reth_trie::{StateRootTrieDb, TrieExtDatabase}; +use std::path::PathBuf; +use tempdir::TempDir; +use reth_db_common::init::compute_state_root; +use reth_db_common::init_triedb::calculate_state_root_with_triedb; + +fn generate_random_accounts_and_storage( + num_accounts: usize, + storage_per_account: usize, + rng: &mut impl Rng, +) -> (Vec<(Address, Account)>, Vec<(Address, Vec)>) { + let mut accounts = Vec::new(); + let mut storage_entries = Vec::new(); + + for _ in 0..num_accounts { + let mut address_bytes = [0u8; 20]; + rng.fill(&mut address_bytes); + let address = Address::from_slice(&address_bytes); + + let account = Account { + nonce: rng.gen_range(0..=u64::MAX), + balance: U256::from(rng.gen_range(0u128..=u128::MAX)), + bytecode_hash: { + let mut hash_bytes = [0u8; 32]; + rng.fill(&mut hash_bytes); + Some(B256::from(hash_bytes)) + }, + }; + accounts.push((address, account)); + + let mut storage_vec = Vec::new(); + for _ in 0..storage_per_account { + let mut storage_key_bytes = [0u8; 32]; + rng.fill(&mut storage_key_bytes); + let storage_key = B256::from(storage_key_bytes); + + let mut storage_value_bytes = [0u8; 32]; + rng.fill(&mut storage_value_bytes); + let storage_value = U256::from_be_slice(&storage_value_bytes); + + storage_vec.push(StorageEntry { + key: storage_key, + value: storage_value, + }); + } + storage_entries.push((address, storage_vec)); + } + + (accounts, storage_entries) +} + +fn setup_test_data( + num_accounts: usize, + storage_per_account: usize, +) -> reth_provider::providers::ProviderFactory { + let mut rng = rand::thread_rng(); + let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + + let (accounts, storage_entries) = + generate_random_accounts_and_storage(num_accounts, storage_per_account, &mut rng); + + let mut provider_rw = provider_factory.provider_rw().unwrap(); + + let accounts_for_hashing = accounts + .iter() + .map(|(address, account)| (*address, Some(*account))); + + provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); + provider_rw.insert_storage_for_hashing(storage_entries).unwrap(); + provider_rw.commit().unwrap(); + + provider_factory +} + +pub fn bench_state_root_comparison(c: &mut Criterion) { + let mut group = c.benchmark_group("State Root Calculation"); + group.sample_size(10); + + for size in [100000] { + let provider_factory = setup_test_data(size, 5); + + // Benchmark traditional method + group.bench_function(BenchmarkId::new("traditional", size), |b| { + b.iter(|| { + let provider_rw = provider_factory.provider_rw().unwrap(); + compute_state_root(&*provider_rw, None).unwrap() + }) + }); + + // Benchmark TrieDB method + group.bench_function(BenchmarkId::new("triedb", size), |b| { + b.iter_with_setup( + || { + let tmp_dir = TempDir::new("bench_triedb").unwrap(); + let db_path = tmp_dir.path().join(format!("test_{}.db", size)); + (tmp_dir, db_path) + }, + |(tmp_dir, trie_db_path)| { + let provider_rw = provider_factory.provider_rw().unwrap(); + calculate_state_root_with_triedb(&*provider_rw, trie_db_path, None).unwrap() + }, + ) + }); + } + + group.finish(); +} + +criterion_group! { + name = benches; + config = Criterion::default(); + targets = bench_state_root_comparison +} +criterion_main!(benches); diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 80586bed05d..4b57f37911a 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -644,7 +644,7 @@ where /// Computes the state root (from scratch) based on the accounts and storages present in the /// database. -pub(crate) fn compute_state_root( +pub fn compute_state_root( provider: &Provider, prefix_sets: Option, ) -> Result diff --git a/crates/storage/db-common/src/init_triedb.rs b/crates/storage/db-common/src/init_triedb.rs index d60afc4af02..4d8c4ce1341 100644 --- a/crates/storage/db-common/src/init_triedb.rs +++ b/crates/storage/db-common/src/init_triedb.rs @@ -59,96 +59,88 @@ mod tests { use reth_chainspec::MAINNET; use reth_provider::DatabaseProviderFactory; use reth_trie_db::DatabaseHashedCursorFactory; - use reth_trie::{hashed_cursor::{HashedCursorFactory, HashedCursor}, StateRootTrieDb, TrieExtDatabase}; + use reth_trie::{StateRootTrieDb, TrieExtDatabase}; use alloy_primitives::{Address, U256, keccak256, B256}; use reth_primitives_traits::{Account, StorageEntry}; use reth_trie::{ - prefix_set::{TriePrefixSets, TriePrefixSetsMut}, - IntermediateStateRootState, Nibbles, StateRoot as StateRootComputer, StateRootProgress, + StateRoot as StateRootComputer, StateRootProgress, }; use reth_storage_api::TrieWriter; - use reth_trie_db::{DatabaseStateRoot}; use crate::init::compute_state_root; + use rand::Rng; + + fn generate_random_accounts_and_storage( + num_accounts: usize, + storage_per_account: usize, + rng: &mut impl Rng, + ) -> (Vec<(Address, Account)>, Vec<(Address, Vec)>) { + let mut accounts = Vec::new(); + let mut storage_entries = Vec::new(); + + for _ in 0..num_accounts { + let mut address_bytes = [0u8; 20]; + rng.fill(&mut address_bytes); + let address = Address::from_slice(&address_bytes); + + let account = Account { + nonce: rng.gen_range(0..=u64::MAX), + balance: U256::from(rng.gen_range(0u128..=u128::MAX)), + bytecode_hash: { + let mut hash_bytes = [0u8; 32]; + rng.fill(&mut hash_bytes); + Some(B256::from(hash_bytes)) + } + }; + accounts.push((address, account)); + + let mut storage_vec = Vec::new(); + for _ in 0..storage_per_account { + let mut storage_key_bytes = [0u8; 32]; + rng.fill(&mut storage_key_bytes); + let storage_key = B256::from(storage_key_bytes); + + let mut storage_value_bytes = [0u8; 32]; + rng.fill(&mut storage_value_bytes); + let storage_value = U256::from_be_slice(&storage_value_bytes); + + storage_vec.push(StorageEntry { + key: storage_key, + value: storage_value, + }); + } + storage_entries.push((address, storage_vec)); + } + + (accounts, storage_entries) + } #[test] pub fn test_triedb_state_root() { + let mut rng = rand::thread_rng(); let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); let mut provider_rw = provider_factory.database_provider_rw().unwrap(); - let dummy_accounts: Vec<(Address, Account)> = vec![ - ( - Address::with_last_byte(1), - Account { - nonce: 10, - balance: U256::from(1000), - bytecode_hash: None, - }, - ), - ( - Address::with_last_byte(2), - Account { - nonce: 20, - balance: U256::from(2000), - bytecode_hash: None, - }, - ), - ( - Address::with_last_byte(3), - Account { - nonce: 30, - balance: U256::from(3000), - bytecode_hash: None, - }, - ), - ]; + let (dummy_accounts, storage_entries) = generate_random_accounts_and_storage( + 100, // num_accounts + 5, // storage_per_account + &mut rng, + ); let accounts_for_hashing = dummy_accounts .iter() .map(|(address, account)| (*address, Some(*account))); provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); - - // Generate two random storage entries for each account - let storage_entries: Vec<(Address, Vec)> = dummy_accounts - .iter() - .map(|(address, _)| { - // Generate two random storage entries per account - // Using deterministic but varied keys based on address and index - let mut storage_vec = Vec::new(); - for i in 0..2 { - // Create a deterministic but unique storage key for each account and slot - let mut key_bytes = [0u8; 32]; - key_bytes[0..20].copy_from_slice(address.as_slice()); - key_bytes[20] = i as u8; - key_bytes[21] = 0xFF; - let storage_key = B256::from(key_bytes); - - // Generate a random value (using address and index for determinism) - let hash = keccak256([address.as_slice(), &[i as u8]].concat()); - let storage_value = U256::from_be_slice(hash.as_slice()); - - storage_vec.push(StorageEntry { - key: storage_key, - value: storage_value, - }); - } - (*address, storage_vec) - }) - .collect(); - - // Insert storage entries for hashing provider_rw.insert_storage_for_hashing(storage_entries).unwrap(); - provider_rw.commit().unwrap(); - let origin_rot = { + let traditional_root = { let provider_rw = provider_factory.database_provider_rw().unwrap(); - let root = compute_state_root(&provider_rw, None).unwrap(); - root + compute_state_root(&provider_rw, None).unwrap() }; - let trie_db_ext_root = { + let triedb_root = { let provider_ro = provider_factory.database_provider_ro().unwrap(); let tx = provider_ro.tx_ref(); let hashed_cursor_factory = DatabaseHashedCursorFactory::new(tx); @@ -156,10 +148,9 @@ mod tests { let file_path = tmp_dir.path().join("test.db"); let trie_ext_db = TrieExtDatabase::new(file_path); let state_root_ext = StateRootTrieDb::new(hashed_cursor_factory, trie_ext_db); - let root = state_root_ext.calculate_commit().unwrap(); - root + state_root_ext.calculate_commit().unwrap() }; - assert_eq!(trie_db_ext_root, origin_rot); + assert_eq!(triedb_root, traditional_root, "State roots should match"); } } diff --git a/crates/storage/db-common/src/lib.rs b/crates/storage/db-common/src/lib.rs index 0a190896607..492c9a496f4 100644 --- a/crates/storage/db-common/src/lib.rs +++ b/crates/storage/db-common/src/lib.rs @@ -12,6 +12,6 @@ pub mod init; mod db_tool; #[cfg(feature = "trie-db-ext")] -mod init_triedb; +pub mod init_triedb; pub use db_tool::*; diff --git a/crates/trie/trie/src/trie_ext.rs b/crates/trie/trie/src/trie_ext.rs index cfbeb646907..b0d4facdaa5 100644 --- a/crates/trie/trie/src/trie_ext.rs +++ b/crates/trie/trie/src/trie_ext.rs @@ -22,6 +22,7 @@ pub struct TrieExtDatabase { impl TrieExtDatabase { pub fn new(db_path: impl AsRef) -> Self { + let db_path = db_path.as_ref(); let db = TrieDbDatabase::create_new(db_path).unwrap(); Self { inner: db, From ba642db56b34d2019b1600285e8fe16f6d5dc3ee Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 1 Dec 2025 15:21:38 +0800 Subject: [PATCH 06/36] update test --- crates/storage/db-common/src/init_triedb.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/storage/db-common/src/init_triedb.rs b/crates/storage/db-common/src/init_triedb.rs index 4d8c4ce1341..b3453e91260 100644 --- a/crates/storage/db-common/src/init_triedb.rs +++ b/crates/storage/db-common/src/init_triedb.rs @@ -122,8 +122,8 @@ mod tests { let mut provider_rw = provider_factory.database_provider_rw().unwrap(); let (dummy_accounts, storage_entries) = generate_random_accounts_and_storage( - 100, // num_accounts - 5, // storage_per_account + 10000, // num_accounts + 10, // storage_per_account &mut rng, ); From 162366f5313f556afcbf323e04281396d0f813a0 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 1 Dec 2025 17:16:29 +0800 Subject: [PATCH 07/36] add perf --- Cargo.toml | 4 +- crates/storage/db-common/Cargo.toml | 13 +- crates/storage/db-common/README.md | 4 +- .../benches/state_root_comparison.rs | 9 +- .../db-common/src/bin/state_root_runner.rs | 126 ++++++++++++++++++ crates/trie/trie/src/trie_ext.rs | 5 +- 6 files changed, 151 insertions(+), 10 deletions(-) create mode 100644 crates/storage/db-common/src/bin/state_root_runner.rs diff --git a/Cargo.toml b/Cargo.toml index 4979c8a005e..46d8ddb5140 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -311,8 +311,8 @@ lto = "thin" [profile.release] opt-level = 3 lto = "thin" -debug = "none" -strip = "symbols" +debug = 1 +strip = "none" panic = "unwind" codegen-units = 16 diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index b8ce0ad0840..774e36cd2fa 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true # reth reth-chainspec.workspace = true reth-db-api.workspace = true -reth-provider.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } reth-primitives-traits.workspace = true reth-config.workspace = true reth-trie.workspace = true @@ -19,6 +19,7 @@ reth-trie-db.workspace = true reth-etl.workspace = true reth-codecs.workspace = true reth-stages-types.workspace = true +reth-storage-api.workspace = true reth-fs-util.workspace = true reth-node-types.workspace = true reth-static-file-types.workspace = true @@ -33,6 +34,7 @@ alloy-primitives.workspace = true eyre.workspace = true thiserror.workspace = true boyer-moore-magiclen.workspace = true +rand = "0.8" # io serde.workspace = true @@ -41,9 +43,13 @@ serde_json.workspace = true # tracing tracing.workspace = true +# Add tempdir for the binary +tempdir = "0.3.7" + [features] default = [] trie-db-ext = [] +bin-utils = ["reth-provider/test-utils"] [dev-dependencies] reth-db = { workspace = true, features = ["mdbx"] } @@ -58,5 +64,10 @@ name = "state_root_comparison" harness = false required-features = ["trie-db-ext"] +[[bin]] +name = "state_root_runner" +path = "src/bin/state_root_runner.rs" +required-features = ["trie-db-ext"] # so you can also run the TrieDB method + [lints] workspace = true diff --git a/crates/storage/db-common/README.md b/crates/storage/db-common/README.md index de58ef685f3..b02f44f5c6a 100644 --- a/crates/storage/db-common/README.md +++ b/crates/storage/db-common/README.md @@ -8,4 +8,6 @@ cargo test -p reth-db-common --features trie-db-ext test_triedb_state_root -- -- ## bench ```aiignore cargo bench -p reth-db-common --features trie-db-ext -``` \ No newline at end of file +``` + +cargo run --release -p reth-db-common --features trie-db-ext --bin state_root_runner -- traditional 100000 5 \ No newline at end of file diff --git a/crates/storage/db-common/benches/state_root_comparison.rs b/crates/storage/db-common/benches/state_root_comparison.rs index e854ed4afe0..022f44fabf7 100644 --- a/crates/storage/db-common/benches/state_root_comparison.rs +++ b/crates/storage/db-common/benches/state_root_comparison.rs @@ -91,12 +91,13 @@ pub fn bench_state_root_comparison(c: &mut Criterion) { for size in [100000] { let provider_factory = setup_test_data(size, 5); - + // Benchmark traditional method group.bench_function(BenchmarkId::new("traditional", size), |b| { b.iter(|| { let provider_rw = provider_factory.provider_rw().unwrap(); - compute_state_root(&*provider_rw, None).unwrap() + compute_state_root(&*provider_rw, None).unwrap(); + provider_rw.commit().unwrap(); }) }); @@ -109,8 +110,8 @@ pub fn bench_state_root_comparison(c: &mut Criterion) { (tmp_dir, db_path) }, |(tmp_dir, trie_db_path)| { - let provider_rw = provider_factory.provider_rw().unwrap(); - calculate_state_root_with_triedb(&*provider_rw, trie_db_path, None).unwrap() + let provider = provider_factory.provider_rw().unwrap(); + calculate_state_root_with_triedb(&*provider, trie_db_path, None).unwrap() }, ) }); diff --git a/crates/storage/db-common/src/bin/state_root_runner.rs b/crates/storage/db-common/src/bin/state_root_runner.rs new file mode 100644 index 00000000000..04cd5fc4a4a --- /dev/null +++ b/crates/storage/db-common/src/bin/state_root_runner.rs @@ -0,0 +1,126 @@ +use std::time::Instant; + +use alloy_primitives::{Address, B256, U256}; +use rand::Rng; +use reth_chainspec::MAINNET; +use reth_primitives_traits::{Account, StorageEntry}; +use reth_provider::{ + test_utils::create_test_provider_factory_with_chain_spec, + DatabaseProviderFactory, HashingWriter, ProviderFactory, TrieWriter, +}; +use reth_storage_api::TrieWriter as _; +use reth_db_common::init::compute_state_root; +use reth_db_common::init_triedb::calculate_state_root_with_triedb; + +fn generate_random_accounts_and_storage( + num_accounts: usize, + storage_per_account: usize, + rng: &mut impl Rng, +) -> (Vec<(Address, Account)>, Vec<(Address, Vec)>) { + let mut accounts = Vec::new(); + let mut storage_entries = Vec::new(); + + for _ in 0..num_accounts { + let mut address_bytes = [0u8; 20]; + rng.fill(&mut address_bytes); + let address = Address::from_slice(&address_bytes); + + let account = Account { + nonce: rng.gen_range(0..=u64::MAX), + balance: U256::from(rng.gen_range(0u128..=u128::MAX)), + bytecode_hash: { + let mut hash_bytes = [0u8; 32]; + rng.fill(&mut hash_bytes); + Some(B256::from(hash_bytes)) + }, + }; + accounts.push((address, account)); + + let mut storage_vec = Vec::new(); + for _ in 0..storage_per_account { + let mut key_bytes = [0u8; 32]; + rng.fill(&mut key_bytes); + let key = B256::from(key_bytes); + + let mut value_bytes = [0u8; 32]; + rng.fill(&mut value_bytes); + let value = U256::from_be_slice(&value_bytes); + + storage_vec.push(StorageEntry { key, value }); + } + storage_entries.push((address, storage_vec)); + } + + (accounts, storage_entries) +} + +fn setup_test_data( + num_accounts: usize, + storage_per_account: usize, +) -> ProviderFactory { + let mut rng = rand::thread_rng(); + let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + + let (accounts, storage_entries) = + generate_random_accounts_and_storage(num_accounts, storage_per_account, &mut rng); + + // single RW tx to populate DB, then commit + let mut provider_rw = provider_factory.provider_rw().unwrap(); + + let accounts_for_hashing = accounts + .iter() + .map(|(address, account)| (*address, Some(*account))); + + provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); + provider_rw.insert_storage_for_hashing(storage_entries).unwrap(); + provider_rw.commit().unwrap(); + + provider_factory +} + +fn main() { + // args: traditional | triedb [num_accounts] [storage_per_account] + let mut args = std::env::args().skip(1); + let mode = args.next().unwrap_or_else(|| "traditional".to_string()); + let num_accounts: usize = args.next().unwrap_or_else(|| "100000".to_string()).parse().unwrap(); + let storage_per_account: usize = + args.next().unwrap_or_else(|| "5".to_string()).parse().unwrap(); + + println!( + "Running state root with mode={mode}, num_accounts={num_accounts}, storage_per_account={storage_per_account}" + ); + + let provider_factory = setup_test_data(num_accounts, storage_per_account); + + match mode.as_str() { + "traditional" => { + let provider_rw = provider_factory.provider_rw().unwrap(); + let start = Instant::now(); + let root = compute_state_root(&*provider_rw, None).unwrap(); + // If you want to persist trie tables, commit here: + provider_rw.commit().unwrap(); + let elapsed = start.elapsed(); + println!("traditional: root={root:?}, elapsed={:?}", elapsed); + } + "triedb" => { + use tempdir::TempDir; + + let provider_rw = provider_factory.provider_rw().unwrap(); + let tmp_dir = TempDir::new("state_root_triedb").unwrap(); + let trie_db_path = tmp_dir.path().join("triedb.db"); + + let start = Instant::now(); + let root = + calculate_state_root_with_triedb(&*provider_rw, trie_db_path.clone(), None).unwrap(); + let elapsed = start.elapsed(); + println!( + "triedb: root={root:?}, elapsed={:?}", + elapsed + ); + } + other => { + eprintln!("Unknown mode: {other}. Use 'traditional' or 'triedb'."); + std::process::exit(1); + } + } +} diff --git a/crates/trie/trie/src/trie_ext.rs b/crates/trie/trie/src/trie_ext.rs index b0d4facdaa5..67ca71b057a 100644 --- a/crates/trie/trie/src/trie_ext.rs +++ b/crates/trie/trie/src/trie_ext.rs @@ -1,4 +1,5 @@ use std::path::Path; +use std::time::Instant; use alloy_primitives::B256; use alloy_trie::{HashBuilder, EMPTY_ROOT_HASH}; use tracing::{debug, trace}; @@ -101,10 +102,10 @@ where account_entry = acct_cursor.next()?; } - +let start_commit = Instant::now(); tx.commit().unwrap(); + println!("commit elapsed: {:?}", start_commit.elapsed()); Ok(self.db.inner.state_root()) - // Ok(EMPTY_ROOT_HASH) } } From cda987b42dda22707ac3ee8369b5fc973b07cebb Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Tue, 2 Dec 2025 17:06:20 +0800 Subject: [PATCH 08/36] refactor --- crates/storage/db-common/src/init.rs | 4 +- crates/storage/db-common/src/init_triedb.rs | 89 ++++++++++++++++++++- crates/storage/db-common/src/lib.rs | 2 +- 3 files changed, 90 insertions(+), 5 deletions(-) diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 4b57f37911a..a543579c1e2 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -25,11 +25,11 @@ use reth_trie::{ prefix_set::{TriePrefixSets, TriePrefixSetsMut}, IntermediateStateRootState, Nibbles, StateRoot as StateRootComputer, StateRootProgress, }; -use reth_trie_db::DatabaseStateRoot; +use reth_trie_db::{DatabaseStateRoot, DatabaseTrieCursorFactory}; use serde::{Deserialize, Serialize}; use std::io::BufRead; use tracing::{debug, error, info, trace}; - +use reth_trie::{trie_cursor::{TrieCursor, TrieCursorFactory}}; #[cfg(feature = "trie-db-ext")] use crate::init_triedb::calculate_state_root_with_triedb; diff --git a/crates/storage/db-common/src/init_triedb.rs b/crates/storage/db-common/src/init_triedb.rs index b3453e91260..ce2f21bb84d 100644 --- a/crates/storage/db-common/src/init_triedb.rs +++ b/crates/storage/db-common/src/init_triedb.rs @@ -58,7 +58,7 @@ mod tests { }; use reth_chainspec::MAINNET; use reth_provider::DatabaseProviderFactory; - use reth_trie_db::DatabaseHashedCursorFactory; + use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use reth_trie::{StateRootTrieDb, TrieExtDatabase}; use alloy_primitives::{Address, U256, keccak256, B256}; use reth_primitives_traits::{Account, StorageEntry}; @@ -68,6 +68,7 @@ mod tests { use reth_storage_api::TrieWriter; use crate::init::compute_state_root; use rand::Rng; + use reth_trie::trie_cursor::{TrieCursor, TrieCursorFactory}; fn generate_random_accounts_and_storage( num_accounts: usize, @@ -115,7 +116,7 @@ mod tests { } #[test] - pub fn test_triedb_state_root() { + pub fn test_triedb_state_root_with_random_accts() { let mut rng = rand::thread_rng(); let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); @@ -153,4 +154,88 @@ mod tests { assert_eq!(triedb_root, traditional_root, "State roots should match"); } + + #[test] + pub fn test_triedb_state_root_with_determistic_accts() { + let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + let mut provider_rw = provider_factory.database_provider_rw().unwrap(); + + let accounts: Vec<(Address, Account)> = vec![ + ( + Address::from_slice(&[ + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, + ]), // keccak256(address) = [20, 104, 40, 128, 86, 49, 12, 130, 170, 76, 1, 167, 225, 42, 16, 248, 17, 26, 5, 96, 231, 43, 112, 5, 85, 71, 144, 49, 184, 108, 53, 125] + Account { nonce: 1, balance: U256::from(100u64), bytecode_hash: None }, + ), + ( + Address::from_slice(&[ + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, + ]), // keccak256(address) = [213, 38, 136, 168, 249, 38, 200, 22, 202, 30, 7, 144, 103, 202, 186, 148, 79, 21, 142, 118, 72, 23, 184, 63, 196, 53, 148, 55, 12, 169, 207, 98] + Account { nonce: 2, balance: U256::from(200u64), bytecode_hash: None }, + ), + ( + Address::from_slice(&[ + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x10, + ]), // keccak256(address) = [144,176, 210, 137, 234, 33, 29, 202, 142, 2, 12, 156, 200, 197, 214, 186, 47, 65, 111, 225, 95, 166, 146, 180, 113, 132, 164, 185, 70, 178, 33, 77] + Account { nonce: 3, balance: U256::from(300u64), bytecode_hash: None }, + ), + ]; + + // let storage_entries: Vec<(Address, Vec)> = accounts + // .iter() + // .map(|(address, _)| { + // let addr_bytes = address.as_slice(); + // + // let key1 = B256::from_slice(&keccak256([addr_bytes, &[0x01]].concat()).as_slice()); + // let key2 = B256::from_slice(&keccak256([addr_bytes, &[0x02]].concat()).as_slice()); + // + // let value1 = U256::from(keccak256([addr_bytes, &[0xA1]].concat()).as_slice()); + // let value2 = U256::from(keccak256([addr_bytes, &[0xA2]].concat()).as_slice()); + // + // let slots = vec![ + // StorageEntry { key: key1, value: value1 }, + // StorageEntry { key: key2, value: value2 }, + // ]; + // + // (*address, slots) + // }) + // .collect(); + + let accounts_for_hashing = accounts + .iter() + .map(|(address, account)| (*address, Some(*account))); + + provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); + // provider_rw.insert_storage_for_hashing(storage_entries).unwrap(); + provider_rw.commit().unwrap(); + + // Traditional root + let traditional_root = { + let provider_rw = provider_factory.database_provider_rw().unwrap(); + compute_state_root(&provider_rw, None).unwrap() + }; + + // TrieDB root + let triedb_root = { + let provider_ro = provider_factory.database_provider_ro().unwrap(); + let tx = provider_ro.tx_ref(); + let hashed_cursor_factory = DatabaseHashedCursorFactory::new(tx); + let tmp_dir = TempDir::new("test_triedb_deterministic").unwrap(); + let file_path = tmp_dir.path().join("test.db"); + let trie_ext_db = TrieExtDatabase::new(file_path); + let state_root_ext = StateRootTrieDb::new(hashed_cursor_factory, trie_ext_db); + state_root_ext.calculate_commit().unwrap() + }; + + assert_eq!(triedb_root, traditional_root, "Deterministic state roots should match"); + } } diff --git a/crates/storage/db-common/src/lib.rs b/crates/storage/db-common/src/lib.rs index 492c9a496f4..307476a518f 100644 --- a/crates/storage/db-common/src/lib.rs +++ b/crates/storage/db-common/src/lib.rs @@ -11,7 +11,7 @@ pub mod init; mod db_tool; -#[cfg(feature = "trie-db-ext")] +// #[cfg(feature = "trie-db-ext")] pub mod init_triedb; pub use db_tool::*; From 1980db4f6edf8c2f3600b8837887ef9f228f2f4b Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 8 Dec 2025 11:10:57 +0800 Subject: [PATCH 09/36] add op engine tests --- Cargo.lock | 2 + crates/e2e-test-utils/src/lib.rs | 5 +- crates/engine/tree/src/tree/tests.rs | 112 +++++++++++ crates/ethereum/node/Cargo.toml | 1 + crates/ethereum/node/tests/e2e/engine.rs | 191 +++++++++++++++++++ crates/ethereum/node/tests/e2e/main.rs | 1 + crates/node/core/src/args/payload_builder.rs | 2 +- crates/optimism/node/Cargo.toml | 2 + crates/optimism/node/tests/it/engine.rs | 151 +++++++++++++++ crates/optimism/node/tests/it/main.rs | 2 + crates/payload/basic/src/lib.rs | 2 +- crates/payload/builder/src/service.rs | 8 +- 12 files changed, 475 insertions(+), 4 deletions(-) create mode 100644 crates/ethereum/node/tests/e2e/engine.rs create mode 100644 crates/optimism/node/tests/it/engine.rs diff --git a/Cargo.lock b/Cargo.lock index 984bd9c0cea..9cb54b54cef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9857,6 +9857,7 @@ name = "reth-optimism-node" version = "1.9.2" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-genesis", "alloy-network", "alloy-primitives", @@ -9891,6 +9892,7 @@ dependencies = [ "reth-optimism-storage", "reth-optimism-txpool", "reth-payload-builder", + "reth-payload-primitives", "reth-payload-util", "reth-primitives-traits", "reth-provider", diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 57d03f70fa5..ae45a962951 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -58,7 +58,10 @@ where PayloadAttributesBuilder<<::Payload as PayloadTypes>::PayloadAttributes>, { E2ETestSetupBuilder::new(num_nodes, chain_spec, attributes_generator) - .with_node_config_modifier(move |config| config.set_dev(is_dev)) + .with_node_config_modifier(move |mut config| { + config.set_dev(is_dev) + + }) .build() .await } diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index 550ba88f517..b853e26adc1 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -2002,6 +2002,118 @@ mod forkchoice_updated_tests { } } +#[test] +fn test_fcu_with_real_provider() { + use reth_chainspec::{EthereumHardfork}; + use reth_chainspec::EthChainSpec; + use reth_payload_primitives::EngineApiMessageVersion; + use reth_node_ethereum::EthEvmConfig; + use alloy_rpc_types_engine::PayloadAttributes; + use alloy_primitives::Address; + + reth_tracing::init_test_tracing(); + + let mut chain_spec = Arc::try_unwrap(DEV.clone()) + .unwrap_or_else(|arc| (*arc).clone()); + chain_spec.hardforks.remove(&EthereumHardfork::Cancun); + chain_spec.hardforks.remove(&EthereumHardfork::Shanghai); + chain_spec.hardforks.remove(&EthereumHardfork::Prague); + + let chain_spec = Arc::new(chain_spec); + let genesis_hash = chain_spec.genesis_hash(); + + let provider_factory = create_test_provider_factory_with_chain_spec(Arc::clone(&chain_spec)); + + init_genesis(&provider_factory).expect("Failed to initialize genesis"); + + let provider = BlockchainProvider::new(provider_factory.clone()) + .expect("Failed to create BlockchainProvider"); + + let consensus = Arc::new(EthBeaconConsensus::new(Arc::clone(&chain_spec))); + let payload_validator = MockEngineValidator; + + let (from_tree_tx, _from_tree_rx) = unbounded_channel(); + + let genesis_header = chain_spec.genesis_header().clone(); + let sealed_genesis_header = SealedHeader::seal_slow(genesis_header); + let engine_api_tree_state = + EngineApiTreeState::new(10, 10, sealed_genesis_header.num_hash(), EngineApiKind::Ethereum); + let canonical_in_memory_state = CanonicalInMemoryState::with_head( + sealed_genesis_header.clone(), + None, + None, + ); + + let (action_tx, _action_rx) = channel(); + let persistence_handle = PersistenceHandle::new(action_tx); + + let (to_payload_service, _payload_command_rx) = unbounded_channel(); + let payload_builder = PayloadBuilderHandle::new(to_payload_service); + payload_builder.spawn_payload_builder_service(); + let evm_config = EthEvmConfig::new(chain_spec.clone()); + + let engine_validator = BasicEngineValidator::new( + provider.clone(), + consensus.clone(), + evm_config.clone(), + payload_validator, + TreeConfig::default(), + Box::new(NoopInvalidBlockHook::default()), + ); + + let mut tree = EngineApiTreeHandler::new( + provider.clone(), + consensus, + engine_validator, + from_tree_tx, + engine_api_tree_state, + canonical_in_memory_state, + persistence_handle, + PersistenceState::default(), + payload_builder, + TreeConfig::default() + .with_legacy_state_root(false) + .with_has_enough_parallelism(true), + EngineApiKind::Ethereum, + evm_config, + ); + + let fcu_state = ForkchoiceState { + head_block_hash: genesis_hash, + safe_block_hash: genesis_hash, + finalized_block_hash: genesis_hash, + }; + + let genesis_timestamp = chain_spec.genesis_header().timestamp; + let payload_attrs = Some(PayloadAttributes { + timestamp: genesis_timestamp + 12, // 12 seconds after genesis + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: None, + parent_beacon_block_root: None, + }); + + let mut outcome = tree + .on_forkchoice_updated(fcu_state, payload_attrs, EngineApiMessageVersion::default()) + .expect("Failed to process forkchoice update"); + + println!("outcome: {outcome:?}"); + + // let rt = tokio::runtime::Builder::new_current_thread() + // .enable_all() + // .build() + // .unwrap(); + // rt.block_on(tokio::time::sleep(tokio::time::Duration::from_secs(86400))); + std::thread::sleep(std::time::Duration::from_secs(86400)); + + // let fcu_result = outcome.outcome.await.expect("Failed to await forkchoice result"); + // assert!( + // fcu_result.payload_status.is_valid() || fcu_result.payload_status.is_syncing(), + // "Forkchoice update should be valid or syncing, got: {:?}", + // fcu_result.payload_status + // ); +} + #[test] fn test_state_root_calculation_with_real_provider() { reth_tracing::init_test_tracing(); diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 575934007f9..f3b20bc13ae 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -69,6 +69,7 @@ alloy-sol-types.workspace = true alloy-contract.workspace = true alloy-rpc-types-beacon = { workspace = true, features = ["ssz"] } alloy-consensus.workspace = true +alloy-eips.workspace = true futures.workspace = true tokio.workspace = true diff --git a/crates/ethereum/node/tests/e2e/engine.rs b/crates/ethereum/node/tests/e2e/engine.rs new file mode 100644 index 00000000000..5182e52e14e --- /dev/null +++ b/crates/ethereum/node/tests/e2e/engine.rs @@ -0,0 +1,191 @@ +use crate::utils::eth_payload_attributes; +use alloy_genesis::Genesis; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_e2e_test_utils::{ + node::NodeTestContext, setup, transaction::TransactionTestContext, wallet::Wallet, +}; +use reth_node_builder::{NodeBuilder, NodeHandle}; +use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; +use reth_node_ethereum::EthereumNode; +use reth_tasks::TaskManager; +use std::sync::Arc; +use reth_provider::BlockReaderIdExt; +use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; + +#[tokio::test] +async fn can_call_fcu_with_attributes_to_execute_next_block() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_sepc = ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(); + let (mut nodes, _tasks, _wallet) = setup::( + 1, + Arc::new(chain_sepc.clone()), + false, + eth_payload_attributes, + ) + .await?; + + let mut node = nodes.pop().unwrap(); + + let genesis_hash = node.block_hash(0); + + let provider = &node.inner.provider; + let current_head = provider + .sealed_header_by_number_or_tag(BlockNumberOrTag::Latest) + .unwrap() + .unwrap(); + let current_head_hash = current_head.hash(); + assert_eq!(current_head_hash,chain_sepc.genesis_hash()); + + // let current_head_number = current_head.number(); + let current_timestamp = current_head.timestamp; + // + // Create payload attributes for the next block + use alloy_rpc_types_engine::PayloadAttributes; + use alloy_primitives::{Address, B256}; + use reth_payload_primitives::EngineApiMessageVersion; + use reth_ethereum_engine_primitives::EthPayloadBuilderAttributes; + + let wallet = Wallet::default(); + let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallet.inner).await; + let _tx_hash = node.rpc.inject_tx(raw_tx).await?; + + let payload_attrs = PayloadAttributes { + timestamp: current_timestamp + 12, // 12 seconds after current block + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + // + // Call FCU with payload attributes + use alloy_rpc_types_engine::ForkchoiceState; + let fcu_state = ForkchoiceState { + head_block_hash: current_head_hash, + safe_block_hash: current_head_hash, + finalized_block_hash: current_head_hash, + }; + + let fcu_result = node + .inner + .add_ons_handle + .beacon_engine_handle + .fork_choice_updated( + fcu_state, + Some(payload_attrs.into()), + EngineApiMessageVersion::default(), + ) + .await?; + println!("fcu_result: {fcu_result:?}"); + + let payload_id = fcu_result + .payload_id + .expect("FCU with attributes should return a payload ID"); + + // Wait a bit for payload to be built + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + // Get the built payload + use reth_rpc_api::clients::EngineApiClient; + // use reth_ethereum_engine_primitives::EthEngineTypes; + + let engine_client = node.inner.add_ons_handle.beacon_engine_handle.clone(); + // engine_client. + // engine_client.new_payload().await; + let payload_builder_handle = node.inner.payload_builder_handle.clone(); + + // Wait a bit for payload to be built + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + // Get the best payload by payload_id + let built_payload = payload_builder_handle + .best_payload(payload_id) + .await + .transpose() + .ok() + .flatten() + .expect("Payload should be built"); + + // Convert the built payload to ExecutionData using the helper method + use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_node_api::PayloadTypes; + + let execution_data = EthEngineTypes::::block_to_payload( + built_payload.block().clone() + ); + + let new_payload_result = engine_client.new_payload(execution_data).await?; + println!("new_payload_result: {new_payload_result:?}"); + + + // // Verify FCU was successful and got a payload ID + // assert!( + // fcu_result.payload_status.is_valid(), + // "FCU should return valid status, got: {:?}", + // fcu_result.payload_status.status + // ); + // + // let payload_id = fcu_result + // .payload_id + // .expect("FCU with attributes should return a payload ID"); + // + // // Wait a bit for payload to be built + // tokio::time::sleep(std::time::Duration::from_millis(500)).await; + // + // // Get the built payload + // use reth_rpc_api::clients::EngineApiClient; + // use reth_ethereum_engine_primitives::EthEngineTypes; + // + // let engine_client = node.inner.add_ons_handle.beacon_engine_handle.clone(); + // let payload_envelope = EngineApiClient::::get_payload_v3( + // &engine_client, + // payload_id, + // ) + // .await?; + // + // // Verify the payload + // let built_block = payload_envelope.block(); + // assert_eq!( + // built_block.header.parent_hash, + // current_head_hash, + // "Built block should have correct parent hash" + // ); + // assert_eq!( + // built_block.header.number, + // current_head_number + 1, + // "Built block should be next block number" + // ); + // assert_eq!( + // built_block.header.timestamp, + // payload_attrs.timestamp, + // "Built block should have correct timestamp" + // ); + // + // // Submit the payload + // let new_block_hash = node.submit_payload(payload_envelope.payload().clone()).await?; + // + // // Update forkchoice to make the new block canonical + // node.update_forkchoice(current_head_hash, new_block_hash).await?; + // + // // Verify the new block is now the head + // let new_head = provider + // .sealed_header_by_number_or_tag(alloy_eips::eip2718::BlockNumberOrTag::Latest) + // .unwrap() + // .unwrap(); + // assert_eq!( + // new_head.hash(), + // new_block_hash, + // "New block should be the canonical head" + // ); + // assert_eq!( + // new_head.number(), + // current_head_number + 1, + // "New head should be next block number" + // ); + + Ok(()) +} \ No newline at end of file diff --git a/crates/ethereum/node/tests/e2e/main.rs b/crates/ethereum/node/tests/e2e/main.rs index 0ebee83cd55..238addd3bd5 100644 --- a/crates/ethereum/node/tests/e2e/main.rs +++ b/crates/ethereum/node/tests/e2e/main.rs @@ -7,5 +7,6 @@ mod p2p; mod pool; mod rpc; mod utils; +mod engine; const fn main() {} diff --git a/crates/node/core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs index ca7befc0f08..1ef07735a18 100644 --- a/crates/node/core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -41,7 +41,7 @@ impl Default for PayloadBuilderArgs { fn default() -> Self { Self { extra_data: default_extra_data(), - interval: Duration::from_secs(1), + interval: Duration::from_secs(1000), gas_limit: None, deadline: SLOT_DURATION, max_payload_tasks: 3, diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 085362059f2..47d95da46ec 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -81,8 +81,10 @@ reth-payload-util.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-rpc.workspace = true reth-rpc-eth-types.workspace = true +reth-payload-primitives.workspace = true alloy-network.workspace = true +alloy-eips.workspace = true futures.workspace = true op-alloy-network.workspace = true diff --git a/crates/optimism/node/tests/it/engine.rs b/crates/optimism/node/tests/it/engine.rs new file mode 100644 index 00000000000..70c34039ad5 --- /dev/null +++ b/crates/optimism/node/tests/it/engine.rs @@ -0,0 +1,151 @@ +use alloy_genesis::Genesis; +use alloy_primitives::{Address, B256}; +use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes, PayloadStatusEnum}; +use op_alloy_rpc_types_engine::OpPayloadAttributes; +use reth_e2e_test_utils::{ + setup, transaction::TransactionTestContext, wallet::Wallet, +}; +// use reth_ethereum_engine_primitives::EthPayloadBuilderAttributes; +use reth_node_api::PayloadTypes; +use reth_optimism_chainspec::{OpChainSpecBuilder, OP_MAINNET, OP_SEPOLIA}; +use reth_optimism_node::{OpEngineTypes, OpNode}; +use reth_optimism_payload_builder::{OpPayloadTypes,OpPayloadBuilderAttributes}; +use reth_optimism_primitives::OpTransactionSigned; +use reth_provider::BlockReaderIdExt; +use std::sync::Arc; +use reth_payload_builder::EthPayloadBuilderAttributes; +use reth_payload_primitives::EngineApiMessageVersion; + +#[tokio::test] +async fn can_call_fcu_with_attributes_to_execute_next_block() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = OpChainSpecBuilder::default() + .chain(OP_SEPOLIA.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .regolith_activated() + .canyon_activated() + .ecotone_activated() + .build(); + + let (mut nodes, _tasks, _wallet) = setup::( + 1, + Arc::new(chain_spec.clone()), + false, + |timestamp| { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + // Construct Optimism-specific payload attributes + OpPayloadBuilderAttributes:: { + payload_attributes: EthPayloadBuilderAttributes::new(B256::ZERO, attributes), + transactions: vec![], // Empty vector of transactions for the builder + no_tx_pool: false, + gas_limit: Some(30_000_000), + eip_1559_params: None, + min_base_fee: None, + } + }, + ) + .await?; + + let mut node = nodes.pop().unwrap(); + let provider = node.inner.provider.clone(); + + let genesis_hash = node.block_hash(0); + + // Create a wallet from genesis account and add a transaction to the txpool + let wallet = Wallet::default(); + let raw_tx = TransactionTestContext::transfer_tx_bytes(OP_SEPOLIA.chain.id(), wallet.inner).await; + let _tx_hash = node.rpc.inject_tx(raw_tx).await?; + + // Create payload attributes for the next block + let current_head = provider.sealed_header_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest)?.unwrap(); + let current_timestamp = current_head.timestamp; + + let payload_attrs = PayloadAttributes { + timestamp: current_timestamp + 2, // 2 seconds after current block (OP block time) + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + // Call FCU with payload attributes + let fcu_state = ForkchoiceState { + head_block_hash: genesis_hash, + safe_block_hash: genesis_hash, + finalized_block_hash: genesis_hash, + }; + + // Wrap in OpPayloadAttributes + let op_attrs = OpPayloadAttributes { + payload_attributes: payload_attrs.clone(), + transactions: None, + no_tx_pool: None, + gas_limit: Some(30_000_000), + eip_1559_params: None, + min_base_fee: None, + }; + + let engine_api = node.inner.add_ons_handle.beacon_engine_handle.clone(); + + // Use V3 because we included parent_beacon_block_root (Ecotone) + let fcu_result = engine_api + .fork_choice_updated( + fcu_state, + Some(op_attrs), + EngineApiMessageVersion::V3, + ) + .await?; + + assert_eq!(fcu_result.payload_status.status, PayloadStatusEnum::Valid); + let payload_id = fcu_result + .payload_id + .expect("FCU with attributes should return a payload ID"); + + // Wait a bit for payload to be built + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + // Get the built payload from the builder service + let payload_builder_handle = node.inner.payload_builder_handle.clone(); + let built_payload = payload_builder_handle + .best_payload(payload_id) + .await + .transpose() + .ok() + .flatten() + .expect("Payload should be built"); + + // Convert to ExecutionData for NewPayload + let execution_data = OpEngineTypes::::block_to_payload(built_payload.block().clone()); + + // Submit the payload via Engine API + let new_payload_result = engine_api.new_payload(execution_data).await?; + assert_eq!(new_payload_result.status, PayloadStatusEnum::Valid); + + let new_block_hash = built_payload.block().hash(); + + // Update forkchoice to make the new block canonical + engine_api.fork_choice_updated( + ForkchoiceState { + head_block_hash: new_block_hash, + safe_block_hash: new_block_hash, + finalized_block_hash: genesis_hash, + }, + None, + EngineApiMessageVersion::V3, + ).await?; + + // Verify the new block is now the head + let new_head = provider.sealed_header_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest)?.unwrap(); + assert_eq!(new_head.number, 1); + assert_eq!(new_head.hash(), new_block_hash); + + Ok(()) +} diff --git a/crates/optimism/node/tests/it/main.rs b/crates/optimism/node/tests/it/main.rs index fbd49d4c1cf..ef6533c707e 100644 --- a/crates/optimism/node/tests/it/main.rs +++ b/crates/optimism/node/tests/it/main.rs @@ -4,4 +4,6 @@ mod builder; mod priority; +mod engine; + const fn main() {} diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index aa2b1f66802..44c5f0f1a13 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -520,7 +520,7 @@ where empty_payload: empty_payload.filter(|_| kind != PayloadKind::WaitForPending), }; - (fut, KeepPayloadJobAlive::No) + (fut, KeepPayloadJobAlive::Yes) } } diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index f3f1b03ab2e..2b77f73b8bd 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -131,7 +131,13 @@ impl PayloadBuilderHandle { attr: T::PayloadBuilderAttributes, ) -> Receiver> { let (tx, rx) = oneshot::channel(); - let _ = self.to_service.send(PayloadServiceCommand::BuildNewPayload(attr, tx)); + let ret = self.to_service.send(PayloadServiceCommand::BuildNewPayload(attr, tx)); + match ret { + Ok(_) => {}, + Err(payload_err) => { + eprintln!("payload error: {payload_err:?}"); + } + } rx } From 6aadd2c7872996f583001feddf14bbfc0183e1b8 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 8 Dec 2025 17:27:34 +0800 Subject: [PATCH 10/36] add op engine api e2e tests --- crates/optimism/node/tests/it/engine.rs | 74 +++++++++---------------- 1 file changed, 27 insertions(+), 47 deletions(-) diff --git a/crates/optimism/node/tests/it/engine.rs b/crates/optimism/node/tests/it/engine.rs index 70c34039ad5..150b6b7ef5d 100644 --- a/crates/optimism/node/tests/it/engine.rs +++ b/crates/optimism/node/tests/it/engine.rs @@ -1,20 +1,21 @@ -use alloy_genesis::Genesis; +use std::hash::Hash; use alloy_primitives::{Address, B256}; use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes, PayloadStatusEnum}; use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_e2e_test_utils::{ setup, transaction::TransactionTestContext, wallet::Wallet, }; -// use reth_ethereum_engine_primitives::EthPayloadBuilderAttributes; use reth_node_api::PayloadTypes; -use reth_optimism_chainspec::{OpChainSpecBuilder, OP_MAINNET, OP_SEPOLIA}; -use reth_optimism_node::{OpEngineTypes, OpNode}; -use reth_optimism_payload_builder::{OpPayloadTypes,OpPayloadBuilderAttributes}; +use reth_optimism_chainspec::{OpChainSpecBuilder, OP_SEPOLIA}; +use reth_optimism_node::{OpNode}; +use reth_optimism_payload_builder::{OpPayloadBuilderAttributes}; use reth_optimism_primitives::OpTransactionSigned; use reth_provider::BlockReaderIdExt; use std::sync::Arc; use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_payload_primitives::EngineApiMessageVersion; +use reth_revm::database::EvmStateProvider; +use reth_rpc_api::{EngineApiClient}; +use alloy_rpc_types_engine::ExecutionPayloadV3; #[tokio::test] async fn can_call_fcu_with_attributes_to_execute_next_block() -> eyre::Result<()> { @@ -59,12 +60,10 @@ async fn can_call_fcu_with_attributes_to_execute_next_block() -> eyre::Result<() let genesis_hash = node.block_hash(0); - // Create a wallet from genesis account and add a transaction to the txpool let wallet = Wallet::default(); let raw_tx = TransactionTestContext::transfer_tx_bytes(OP_SEPOLIA.chain.id(), wallet.inner).await; let _tx_hash = node.rpc.inject_tx(raw_tx).await?; - // Create payload attributes for the next block let current_head = provider.sealed_header_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest)?.unwrap(); let current_timestamp = current_head.timestamp; @@ -76,14 +75,12 @@ async fn can_call_fcu_with_attributes_to_execute_next_block() -> eyre::Result<() parent_beacon_block_root: Some(B256::ZERO), }; - // Call FCU with payload attributes let fcu_state = ForkchoiceState { head_block_hash: genesis_hash, safe_block_hash: genesis_hash, finalized_block_hash: genesis_hash, }; - // Wrap in OpPayloadAttributes let op_attrs = OpPayloadAttributes { payload_attributes: payload_attrs.clone(), transactions: None, @@ -95,24 +92,20 @@ async fn can_call_fcu_with_attributes_to_execute_next_block() -> eyre::Result<() let engine_api = node.inner.add_ons_handle.beacon_engine_handle.clone(); - // Use V3 because we included parent_beacon_block_root (Ecotone) - let fcu_result = engine_api - .fork_choice_updated( - fcu_state, - Some(op_attrs), - EngineApiMessageVersion::V3, - ) + let engine_client = node.inner.engine_http_client(); + let fcu_result = engine_client + .fork_choice_updated_v3(fcu_state, Some(op_attrs)) .await?; - - assert_eq!(fcu_result.payload_status.status, PayloadStatusEnum::Valid); - let payload_id = fcu_result - .payload_id - .expect("FCU with attributes should return a payload ID"); + let payload_id = fcu_result.payload_id.expect("payload id"); // Wait a bit for payload to be built tokio::time::sleep(std::time::Duration::from_millis(500)).await; - // Get the built payload from the builder service + let payload_v3 = engine_client.get_payload_v3(payload_id).await?; + assert_eq!(genesis_hash, payload_v3.execution_payload.payload_inner.payload_inner.parent_hash); + assert_eq!(21000, payload_v3.execution_payload.payload_inner.payload_inner.gas_used); + + // newPaylaod let payload_builder_handle = node.inner.payload_builder_handle.clone(); let built_payload = payload_builder_handle .best_payload(payload_id) @@ -121,31 +114,18 @@ async fn can_call_fcu_with_attributes_to_execute_next_block() -> eyre::Result<() .ok() .flatten() .expect("Payload should be built"); - - // Convert to ExecutionData for NewPayload - let execution_data = OpEngineTypes::::block_to_payload(built_payload.block().clone()); - - // Submit the payload via Engine API - let new_payload_result = engine_api.new_payload(execution_data).await?; + let block = Arc::new(built_payload.block().clone()); + let payload_v3 = ExecutionPayloadV3::from_block_unchecked( + block.hash(), + &Arc::unwrap_or_clone(block.clone()).into_block(), + ); + let versioned_hashes: Vec = Vec::new(); + let parent_beacon_block_root = block.parent_beacon_block_root.unwrap_or_default(); + + let new_payload_result = engine_client + .new_payload_v3(payload_v3, versioned_hashes, parent_beacon_block_root) + .await?; assert_eq!(new_payload_result.status, PayloadStatusEnum::Valid); - - let new_block_hash = built_payload.block().hash(); - - // Update forkchoice to make the new block canonical - engine_api.fork_choice_updated( - ForkchoiceState { - head_block_hash: new_block_hash, - safe_block_hash: new_block_hash, - finalized_block_hash: genesis_hash, - }, - None, - EngineApiMessageVersion::V3, - ).await?; - - // Verify the new block is now the head - let new_head = provider.sealed_header_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest)?.unwrap(); - assert_eq!(new_head.number, 1); - assert_eq!(new_head.hash(), new_block_hash); Ok(()) } From ede2ca5da67f3dd62f49879750c748aaa62f758e Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Tue, 9 Dec 2025 12:05:33 +0800 Subject: [PATCH 11/36] add state root overlay benchamrk --- Cargo.lock | 2 + crates/engine/tree/src/tree/mod.rs | 16 +- crates/optimism/node/tests/it/engine.rs | 160 +++++++++++++++++- crates/storage/db-common/Cargo.toml | 2 + crates/storage/db-common/README.md | 2 + .../benches/state_root_comparison.rs | 100 ++++++++++- crates/storage/db-common/benches/util.rs | 137 +++++++++++++++ 7 files changed, 404 insertions(+), 15 deletions(-) create mode 100644 crates/storage/db-common/benches/util.rs diff --git a/Cargo.lock b/Cargo.lock index 9cb54b54cef..27880006461 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8147,6 +8147,7 @@ dependencies = [ "alloy-consensus", "alloy-genesis", "alloy-primitives", + "alloy-trie 0.9.1", "boyer-moore-magiclen", "codspeed-criterion-compat", "eyre", @@ -8172,6 +8173,7 @@ dependencies = [ "tempdir", "thiserror 2.0.17", "tracing", + "triedb", ] [[package]] diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 5db698107c5..c28b05547cc 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1370,14 +1370,14 @@ where } } - if !self.persistence_state.in_progress() { - if let Some(new_tip_num) = self.find_disk_reorg()? { - self.remove_blocks(new_tip_num) - } else if self.should_persist() { - let blocks_to_persist = self.get_canonical_blocks_to_persist()?; - self.persist_blocks(blocks_to_persist); - } - } + // if !self.persistence_state.in_progress() { + // if let Some(new_tip_num) = self.find_disk_reorg()? { + // self.remove_blocks(new_tip_num) + // } else if self.should_persist() { + // let blocks_to_persist = self.get_canonical_blocks_to_persist()?; + // self.persist_blocks(blocks_to_persist); + // } + // } Ok(()) } diff --git a/crates/optimism/node/tests/it/engine.rs b/crates/optimism/node/tests/it/engine.rs index 150b6b7ef5d..2f1bbec5ac7 100644 --- a/crates/optimism/node/tests/it/engine.rs +++ b/crates/optimism/node/tests/it/engine.rs @@ -5,6 +5,7 @@ use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_e2e_test_utils::{ setup, transaction::TransactionTestContext, wallet::Wallet, }; +use alloy_primitives::{TxKind, U256}; use reth_node_api::PayloadTypes; use reth_optimism_chainspec::{OpChainSpecBuilder, OP_SEPOLIA}; use reth_optimism_node::{OpNode}; @@ -12,13 +13,15 @@ use reth_optimism_payload_builder::{OpPayloadBuilderAttributes}; use reth_optimism_primitives::OpTransactionSigned; use reth_provider::BlockReaderIdExt; use std::sync::Arc; +use alloy_eips::Encodable2718; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_revm::database::EvmStateProvider; use reth_rpc_api::{EngineApiClient}; use alloy_rpc_types_engine::ExecutionPayloadV3; +use alloy_rpc_types_eth::TransactionRequest; #[tokio::test] -async fn can_call_fcu_with_attributes_to_execute_next_block() -> eyre::Result<()> { +async fn full_engine_api_bock_building_get_validation() -> eyre::Result<()> { reth_tracing::init_test_tracing(); let chain_spec = OpChainSpecBuilder::default() @@ -90,7 +93,118 @@ async fn can_call_fcu_with_attributes_to_execute_next_block() -> eyre::Result<() min_base_fee: None, }; - let engine_api = node.inner.add_ons_handle.beacon_engine_handle.clone(); + let engine_client = node.inner.engine_http_client(); + let fcu_result = engine_client + .fork_choice_updated_v3(fcu_state, Some(op_attrs)) + .await?; + let payload_id = fcu_result.payload_id.expect("payload id"); + + // Wait a bit for payload to be built + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + let payload_v3 = engine_client.get_payload_v3(payload_id).await?; + assert_eq!(genesis_hash, payload_v3.execution_payload.payload_inner.payload_inner.parent_hash); + assert_eq!(21000, payload_v3.execution_payload.payload_inner.payload_inner.gas_used); + + // newPaylaod + let payload_builder_handle = node.inner.payload_builder_handle.clone(); + let built_payload = payload_builder_handle + .best_payload(payload_id) + .await + .transpose() + .ok() + .flatten() + .expect("Payload should be built"); + let block = Arc::new(built_payload.block().clone()); + let payload_v3 = ExecutionPayloadV3::from_block_unchecked( + block.hash(), + &Arc::unwrap_or_clone(block.clone()).into_block(), + ); + let versioned_hashes: Vec = Vec::new(); + let parent_beacon_block_root = block.parent_beacon_block_root.unwrap_or_default(); + + let new_payload_result = engine_client + .new_payload_v3(payload_v3, versioned_hashes, parent_beacon_block_root) + .await?; + assert_eq!(new_payload_result.status, PayloadStatusEnum::Valid); + + Ok(()) +} + + +#[tokio::test] +async fn full_engine_api_bock_building_continuously() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = OpChainSpecBuilder::default() + .chain(OP_SEPOLIA.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .regolith_activated() + .canyon_activated() + .ecotone_activated() + .build(); + + let (mut nodes, _tasks, _wallet) = setup::( + 1, + Arc::new(chain_spec.clone()), + false, + |timestamp| { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + // Construct Optimism-specific payload attributes + OpPayloadBuilderAttributes:: { + payload_attributes: EthPayloadBuilderAttributes::new(B256::ZERO, attributes), + transactions: vec![], // Empty vector of transactions for the builder + no_tx_pool: false, + gas_limit: Some(30_000_000), + eip_1559_params: None, + min_base_fee: None, + } + }, + ) + .await?; + + let mut node = nodes.pop().unwrap(); + let provider = node.inner.provider.clone(); + + let genesis_hash = node.block_hash(0); + + let wallet = Wallet::default(); + let signer = wallet.inner.clone(); + let raw_tx = TransactionTestContext::transfer_tx_bytes(OP_SEPOLIA.chain.id(), signer.clone()).await; + let _tx_hash = node.rpc.inject_tx(raw_tx).await?; + + let current_head = provider.sealed_header_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest)?.unwrap(); + let current_timestamp = current_head.timestamp; + + let payload_attrs = PayloadAttributes { + timestamp: current_timestamp + 2, // 2 seconds after current block (OP block time) + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + let fcu_state = ForkchoiceState { + head_block_hash: genesis_hash, + safe_block_hash: genesis_hash, + finalized_block_hash: genesis_hash, + }; + + let op_attrs = OpPayloadAttributes { + payload_attributes: payload_attrs.clone(), + transactions: None, + no_tx_pool: None, + gas_limit: Some(30_000_000), + eip_1559_params: None, + min_base_fee: None, + }; let engine_client = node.inner.engine_http_client(); let fcu_result = engine_client @@ -102,6 +216,7 @@ async fn can_call_fcu_with_attributes_to_execute_next_block() -> eyre::Result<() tokio::time::sleep(std::time::Duration::from_millis(500)).await; let payload_v3 = engine_client.get_payload_v3(payload_id).await?; + let block_1_hash = payload_v3.execution_payload.payload_inner.payload_inner.block_hash; assert_eq!(genesis_hash, payload_v3.execution_payload.payload_inner.payload_inner.parent_hash); assert_eq!(21000, payload_v3.execution_payload.payload_inner.payload_inner.gas_used); @@ -126,6 +241,47 @@ async fn can_call_fcu_with_attributes_to_execute_next_block() -> eyre::Result<() .new_payload_v3(payload_v3, versioned_hashes, parent_beacon_block_root) .await?; assert_eq!(new_payload_result.status, PayloadStatusEnum::Valid); + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + // Build block2 + // let raw_tx2 = TransactionTestContext::transfer_tx_bytes(chain_spec.chain.id(), signer.clone()).await; + let tx2 = TransactionRequest { + nonce: Some(1), + value: Some(U256::from(100)), + to: Some(TxKind::Call(Address::random())), + gas: Some(21000), + max_fee_per_gas: Some(25e9 as u128), // bump fee as needed + max_priority_fee_per_gas: Some(20e9 as u128), + chain_id: Some(chain_spec.chain.id()), + ..Default::default() + }; + let signed2 = TransactionTestContext::sign_tx(signer.clone(), tx2).await; + let raw_tx2 = signed2.encoded_2718().into(); + let _tx_hash2 = node.rpc.inject_tx(raw_tx2).await?; + let fcu_state_2 = ForkchoiceState { + head_block_hash: block_1_hash, + safe_block_hash: block_1_hash, + finalized_block_hash: genesis_hash, + }; + let payload_attrs_2 = PayloadAttributes { + timestamp: payload_attrs.timestamp + 2, + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + let op_attrs_2 = OpPayloadAttributes { + payload_attributes: payload_attrs_2.clone(), + transactions: None, + no_tx_pool: None, + gas_limit: Some(30_000_000), + eip_1559_params: None, + min_base_fee: None, + }; + let fcu_result_2 = engine_client + .fork_choice_updated_v3(fcu_state_2, Some(op_attrs_2)) + .await?; + assert_eq!(fcu_result_2.payload_status.status, PayloadStatusEnum::Valid); + let payload_id_2 = fcu_result_2.payload_id.expect("second payload id"); Ok(()) } diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 774e36cd2fa..c4cbecb3976 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -58,6 +58,8 @@ reth-storage-api.workspace = true tempdir = "0.3.7" rand = "0.8" criterion = { workspace = true } +triedb.workspace = true +alloy-trie.workspace = true [[bench]] name = "state_root_comparison" diff --git a/crates/storage/db-common/README.md b/crates/storage/db-common/README.md index b02f44f5c6a..fd3c4857903 100644 --- a/crates/storage/db-common/README.md +++ b/crates/storage/db-common/README.md @@ -8,6 +8,8 @@ cargo test -p reth-db-common --features trie-db-ext test_triedb_state_root -- -- ## bench ```aiignore cargo bench -p reth-db-common --features trie-db-ext +cargo bench -p reth-db-common --features trie-db-ext --bench state_root_comparison -- state_root_with_account_overlay +cargo bench -p reth-db-common --features trie-db-ext --bench state_root_comparison -- bench_state_root_mdbx_with_overlay ``` cargo run --release -p reth-db-common --features trie-db-ext --bin state_root_runner -- traditional 100000 5 \ No newline at end of file diff --git a/crates/storage/db-common/benches/state_root_comparison.rs b/crates/storage/db-common/benches/state_root_comparison.rs index 022f44fabf7..244f155bf24 100644 --- a/crates/storage/db-common/benches/state_root_comparison.rs +++ b/crates/storage/db-common/benches/state_root_comparison.rs @@ -1,21 +1,32 @@ #![allow(missing_docs, unreachable_pub)] -use alloy_primitives::{Address, B256, U256}; + +mod util; + +use alloy_primitives::{keccak256,Address, B256, U256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; -use rand::Rng; +use rand::prelude::*; +use rand::{Rng, SeedableRng}; +use rand::rngs::StdRng; use reth_chainspec::MAINNET; use reth_primitives_traits::{Account, StorageEntry}; +use reth_provider::LatestStateProvider; use reth_provider::{ test_utils::create_test_provider_factory_with_chain_spec, DatabaseProviderFactory, DBProvider, HashingWriter, ProviderFactory, TrieWriter, }; -use reth_storage_api::TrieWriter as _; -use reth_trie::StateRoot as StateRootComputer; +use reth_storage_api::{StateRootProvider, TrieWriter as _}; +use reth_trie::{HashedPostState, StateRoot as StateRootComputer}; use reth_trie_db::DatabaseHashedCursorFactory; use reth_trie::{StateRootTrieDb, TrieExtDatabase}; use std::path::PathBuf; +use std::time::Duration; +use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; use tempdir::TempDir; +use triedb::overlay::{OverlayStateMut, OverlayValue}; +use triedb::{path::AddressPath, account::Account as TrieDBAccount, Database}; use reth_db_common::init::compute_state_root; use reth_db_common::init_triedb::calculate_state_root_with_triedb; +use crate::util::{get_flat_trie_database, copy_files, DEFAULT_SETUP_DB_CONTRACT_SIZE, DEFAULT_SETUP_DB_EOA_SIZE, DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, SEED_CONTRACT, BATCH_SIZE, generate_random_address}; fn generate_random_accounts_and_storage( num_accounts: usize, @@ -120,9 +131,88 @@ pub fn bench_state_root_comparison(c: &mut Criterion) { group.finish(); } +fn bench_state_root_with_overlay(c: &mut Criterion) { + let mut group = c.benchmark_group("state_root_with_overlay"); + let base_dir = get_flat_trie_database( + DEFAULT_SETUP_DB_EOA_SIZE, + DEFAULT_SETUP_DB_CONTRACT_SIZE, + DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, + ); + let dir = TempDir::new("triedb_bench_state_root_with_overlay").unwrap(); + let file_name = base_dir.main_file_name.clone(); + copy_files(&base_dir, dir.path()).unwrap(); + + let mut rng = StdRng::seed_from_u64(SEED_CONTRACT); + let addresses: Vec = + (0..BATCH_SIZE).map(|_| generate_random_address(&mut rng)).collect(); + + let mut account_overlay_mut = OverlayStateMut::new(); + addresses.iter().enumerate().for_each(|(i, addr)| { + let new_account = + TrieDBAccount::new(i as u64, U256::from(i as u64), EMPTY_ROOT_HASH, KECCAK_EMPTY); + account_overlay_mut.insert(addr.clone().into(), Some(OverlayValue::Account(new_account))); + }); + let account_overlay = account_overlay_mut.freeze(); + + group.throughput(criterion::Throughput::Elements(BATCH_SIZE as u64)); + group.measurement_time(Duration::from_secs(30)); + group.bench_function(BenchmarkId::new("state_root_with_account_overlay", BATCH_SIZE), |b| { + b.iter_with_setup( + || { + let db_path = dir.path().join(&file_name); + Database::open(db_path).unwrap() + }, + |db| { + let tx = db.begin_ro().unwrap(); + + // Compute the root hash with the overlay + let _root_result = tx.compute_root_with_overlay(account_overlay.clone()).unwrap(); + + tx.commit().unwrap(); + }, + ); + }); + + group.finish(); + +} + +fn bench_state_root_mdbx_with_overlay(c: &mut Criterion) { + let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + let db_provider_ro = provider_factory.database_provider_ro().unwrap(); + let latest_ro = LatestStateProvider::new(db_provider_ro); + let mut rng = rand::thread_rng(); + let hashed_accounts: Vec<(B256, Option)> = (0..1000).map(|_| { + let mut addr = [0u8; 20]; + rng.fill(&mut addr); + let hashed = keccak256(addr); + let acct = Account { + nonce: rng.gen_range(0..=u64::MAX), + balance: U256::from(rng.gen_range(0u128..=u128::MAX)), + bytecode_hash: { + let mut hash_bytes = [0u8; 32]; + rng.fill(&mut hash_bytes); + Some(B256::from(hash_bytes)) + }, + }; + (hashed, Some(acct)) + }).collect(); + let hashed_state = HashedPostState::default().with_accounts(hashed_accounts); + c.bench_with_input( + BenchmarkId::new("bench_state_root_mdbx_with_overlay", 1000), + &hashed_state, + |b, hs| { + b.iter(|| { + let _ = latest_ro.state_root_with_updates(hs.clone()); + }) + }, + ); +} + + criterion_group! { name = benches; config = Criterion::default(); - targets = bench_state_root_comparison + targets = bench_state_root_comparison, bench_state_root_with_overlay, bench_state_root_mdbx_with_overlay } criterion_main!(benches); diff --git a/crates/storage/db-common/benches/util.rs b/crates/storage/db-common/benches/util.rs new file mode 100644 index 00000000000..6520ed10042 --- /dev/null +++ b/crates/storage/db-common/benches/util.rs @@ -0,0 +1,137 @@ +use std::path::{Path, PathBuf}; +use tempdir::TempDir; +use rand::prelude::*; +use rand::RngCore; +use alloy_primitives::{Address, StorageKey, StorageValue, U256}; +use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; +use triedb::{ + account::Account, + path::{AddressPath, StoragePath}, + transaction::TransactionError, + Database, +}; +use std::{ + fs, io, + sync::{Arc, Barrier}, + thread, + time::Duration, +}; +pub const BATCH_SIZE: usize = 10_000; + +pub fn generate_random_address(rng: &mut StdRng) -> AddressPath { + let mut bytes = [0u8; 20]; + rng.fill_bytes(&mut bytes); + let addr = Address::from_slice(&bytes); + AddressPath::for_address(addr) +} + +pub const DEFAULT_SETUP_DB_EOA_SIZE: usize = 1_000_000; +pub const DEFAULT_SETUP_DB_CONTRACT_SIZE: usize = 100_000; +pub const DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT: usize = 10; +pub const SEED_EOA: u64 = 42; // EOA seeding value +pub const SEED_CONTRACT: u64 = 43; // contract account seeding value + + +#[derive(Debug)] +#[allow(dead_code)] +pub struct FlatTrieDatabase { + _base_dir: Option, + pub main_file_name: String, + pub file_name_path: PathBuf, + pub meta_file_name: String, + pub meta_file_name_path: PathBuf, +} +pub fn get_flat_trie_database( + fallback_eoa_size: usize, + fallback_contract_size: usize, + fallback_storage_per_contract: usize, +) -> FlatTrieDatabase { + let base_dir = std::env::var("BASE_DIR").ok(); + if let Some(base_dir) = base_dir { + let file_name = + std::env::var("FILE_NAME").expect("FILE_NAME must be set when using BASE_DIR"); + let main_file_name = file_name.to_string(); + let meta_file_name = format!("{file_name}.meta"); + let file_name_path = Path::new(&base_dir).join(&main_file_name); + let meta_file_name_path = Path::new(&base_dir).join(&meta_file_name); + + return FlatTrieDatabase { + _base_dir: None, + main_file_name, + meta_file_name, + file_name_path, + meta_file_name_path, + }; + } + let dir = TempDir::new("triedb_bench_base").unwrap(); + + let main_file_name_path = dir.path().join("triedb"); + let meta_file_name_path = dir.path().join("triedb.meta"); + let db = Database::create_new(&main_file_name_path).unwrap(); + + setup_database(&db, fallback_eoa_size, fallback_contract_size, fallback_storage_per_contract) + .unwrap(); + + FlatTrieDatabase { + _base_dir: Some(dir), + main_file_name: "triedb".to_string(), + file_name_path: main_file_name_path, + meta_file_name: "triedb.meta".to_string(), + meta_file_name_path, + } +} + +fn setup_database( + db: &Database, + eoa_count: usize, + contract_count: usize, + storage_per_contract: usize, +) -> Result<(), TransactionError> { + // Populate database with initial accounts + let mut eoa_rng = StdRng::seed_from_u64(SEED_EOA); + let mut contract_rng = StdRng::seed_from_u64(SEED_CONTRACT); + { + let mut tx = db.begin_rw()?; + for i in 1..=eoa_count { + let address = generate_random_address(&mut eoa_rng); + let account = + Account::new(i as u64, U256::from(i as u64), EMPTY_ROOT_HASH, KECCAK_EMPTY); + + tx.set_account(address, Some(account))?; + } + + for i in 1..=contract_count { + let address = generate_random_address(&mut contract_rng); + let account = + Account::new(i as u64, U256::from(i as u64), EMPTY_ROOT_HASH, KECCAK_EMPTY); + + tx.set_account(address.clone(), Some(account))?; + + // add random storage to each account + for key in 1..=storage_per_contract { + let storage_key = StorageKey::from(U256::from(key)); + let storage_path = + StoragePath::for_address_path_and_slot(address.clone(), storage_key); + let storage_value = + StorageValue::from_be_slice(storage_path.get_slot().pack().as_slice()); + + tx.set_storage_slot(storage_path, Some(storage_value))?; + } + } + + tx.commit()?; + } + + Ok(()) +} + +pub fn copy_files(from: &FlatTrieDatabase, to: &Path) -> Result<(), io::Error> { + for (file, from_path) in [ + (&from.main_file_name, &from.file_name_path), + (&from.meta_file_name, &from.meta_file_name_path), + ] { + let to_path = to.join(file); + fs::copy(from_path, &to_path)?; + } + Ok(()) +} \ No newline at end of file From 42405e40c4d8a6d7954121f682ecb494d9ba7219 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Tue, 9 Dec 2025 12:56:18 +0800 Subject: [PATCH 12/36] refactor triedb and mdbx bench overlay --- crates/storage/db-common/README.md | 4 +- .../benches/state_root_comparison.rs | 164 ++++++++---- crates/storage/db-common/benches/util.rs | 249 ++++++++++++++---- 3 files changed, 320 insertions(+), 97 deletions(-) diff --git a/crates/storage/db-common/README.md b/crates/storage/db-common/README.md index fd3c4857903..15829884655 100644 --- a/crates/storage/db-common/README.md +++ b/crates/storage/db-common/README.md @@ -8,8 +8,8 @@ cargo test -p reth-db-common --features trie-db-ext test_triedb_state_root -- -- ## bench ```aiignore cargo bench -p reth-db-common --features trie-db-ext -cargo bench -p reth-db-common --features trie-db-ext --bench state_root_comparison -- state_root_with_account_overlay -cargo bench -p reth-db-common --features trie-db-ext --bench state_root_comparison -- bench_state_root_mdbx_with_overlay +cargo bench -p reth-db-common --features trie-db-ext --bench state_root_comparison -- state_root_with_overlay_triedb +cargo bench -p reth-db-common --features trie-db-ext --bench state_root_comparison -- state_root_with_overlay_mdbx ``` cargo run --release -p reth-db-common --features trie-db-ext --bin state_root_runner -- traditional 100000 5 \ No newline at end of file diff --git a/crates/storage/db-common/benches/state_root_comparison.rs b/crates/storage/db-common/benches/state_root_comparison.rs index 244f155bf24..6c972e846fe 100644 --- a/crates/storage/db-common/benches/state_root_comparison.rs +++ b/crates/storage/db-common/benches/state_root_comparison.rs @@ -2,7 +2,7 @@ mod util; -use alloy_primitives::{keccak256,Address, B256, U256}; +use alloy_primitives::{keccak256, Address, StorageKey, StorageValue, B256, U256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use rand::prelude::*; use rand::{Rng, SeedableRng}; @@ -15,15 +15,17 @@ use reth_provider::{ DatabaseProviderFactory, DBProvider, HashingWriter, ProviderFactory, TrieWriter, }; use reth_storage_api::{StateRootProvider, TrieWriter as _}; -use reth_trie::{HashedPostState, StateRoot as StateRootComputer}; +use reth_trie::{HashedPostState, HashedStorage, StateRoot as StateRootComputer}; use reth_trie_db::DatabaseHashedCursorFactory; use reth_trie::{StateRootTrieDb, TrieExtDatabase}; use std::path::PathBuf; use std::time::Duration; +use alloy_primitives::map::{B256Map, HashMap}; use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; use tempdir::TempDir; use triedb::overlay::{OverlayStateMut, OverlayValue}; use triedb::{path::AddressPath, account::Account as TrieDBAccount, Database}; +use triedb::path::StoragePath; use reth_db_common::init::compute_state_root; use reth_db_common::init_triedb::calculate_state_root_with_triedb; use crate::util::{get_flat_trie_database, copy_files, DEFAULT_SETUP_DB_CONTRACT_SIZE, DEFAULT_SETUP_DB_EOA_SIZE, DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, SEED_CONTRACT, BATCH_SIZE, generate_random_address}; @@ -130,33 +132,57 @@ pub fn bench_state_root_comparison(c: &mut Criterion) { group.finish(); } - -fn bench_state_root_with_overlay(c: &mut Criterion) { +fn bench_state_root_with_overlay_triedb(c: &mut Criterion) { let mut group = c.benchmark_group("state_root_with_overlay"); - let base_dir = get_flat_trie_database( + let (base_dir, (overlay_acct, overlay_storage)) = get_flat_trie_database( DEFAULT_SETUP_DB_EOA_SIZE, DEFAULT_SETUP_DB_CONTRACT_SIZE, DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, + BATCH_SIZE ); let dir = TempDir::new("triedb_bench_state_root_with_overlay").unwrap(); let file_name = base_dir.main_file_name.clone(); copy_files(&base_dir, dir.path()).unwrap(); - let mut rng = StdRng::seed_from_u64(SEED_CONTRACT); - let addresses: Vec = - (0..BATCH_SIZE).map(|_| generate_random_address(&mut rng)).collect(); - + // Generate overlay from the returned overlay data (accounts + storage) let mut account_overlay_mut = OverlayStateMut::new(); - addresses.iter().enumerate().for_each(|(i, addr)| { - let new_account = - TrieDBAccount::new(i as u64, U256::from(i as u64), EMPTY_ROOT_HASH, KECCAK_EMPTY); - account_overlay_mut.insert(addr.clone().into(), Some(OverlayValue::Account(new_account))); - }); + + // Add account overlays + for (address, account) in &overlay_acct { + let address_path = AddressPath::for_address(*address); + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, + KECCAK_EMPTY, + ); + account_overlay_mut.insert(address_path.clone().into(), Some(OverlayValue::Account(trie_account))); + } + + // Add storage overlays + for (address, storage) in &overlay_storage { + let address_path = AddressPath::for_address(*address); + for (storage_key, storage_value) in storage { + let storage_path = StoragePath::for_address_path_and_slot( + address_path.clone(), + StorageKey::from(*storage_key), + ); + account_overlay_mut.insert( + storage_path.clone().into(), + Some(OverlayValue::Storage(StorageValue::from_be_slice( + storage_path.get_slot().pack().as_slice() + ))), + ); + } + } + let account_overlay = account_overlay_mut.freeze(); - group.throughput(criterion::Throughput::Elements(BATCH_SIZE as u64)); + let overlay_count = overlay_acct.len() + overlay_storage.values().map(|s| s.len()).sum::(); + + group.throughput(criterion::Throughput::Elements(overlay_count as u64)); group.measurement_time(Duration::from_secs(30)); - group.bench_function(BenchmarkId::new("state_root_with_account_overlay", BATCH_SIZE), |b| { + group.bench_function(BenchmarkId::new("state_root_with_overlay_triedb", overlay_count), |b| { b.iter_with_setup( || { let db_path = dir.path().join(&file_name); @@ -165,7 +191,6 @@ fn bench_state_root_with_overlay(c: &mut Criterion) { |db| { let tx = db.begin_ro().unwrap(); - // Compute the root hash with the overlay let _root_result = tx.compute_root_with_overlay(account_overlay.clone()).unwrap(); tx.commit().unwrap(); @@ -174,45 +199,90 @@ fn bench_state_root_with_overlay(c: &mut Criterion) { }); group.finish(); - } -fn bench_state_root_mdbx_with_overlay(c: &mut Criterion) { +fn bench_state_root_with_overlay_mdbx(c: &mut Criterion) { + let mut group = c.benchmark_group("state_root_mdbx_with_overlay"); + + // Generate random data and overlay + let (addresses, accounts_map, storage_map, overlay_acct, overlay_storage) = + util::generate_shared_test_data( + DEFAULT_SETUP_DB_EOA_SIZE, // eoa_count + DEFAULT_SETUP_DB_CONTRACT_SIZE, // contract_count + DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, + BATCH_SIZE, // overlay_count + ); + + // Write base data into database using provider_rw let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + { + let mut provider_rw = provider_factory.provider_rw().unwrap(); + + // Convert base accounts to vector format + let accounts: Vec<(Address, Account)> = accounts_map.into_iter().collect(); + let storage_entries: Vec<(Address, Vec)> = storage_map.into_iter() + .map(|(address, storage)| { + let entries: Vec = storage.into_iter() + .map(|(key, value)| StorageEntry { key, value }) + .collect(); + (address, entries) + }) + .collect(); + + let accounts_for_hashing = accounts.iter().map(|(address, account)| (*address, Some(*account))); + provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); + provider_rw.insert_storage_for_hashing(storage_entries).unwrap(); + provider_rw.commit().unwrap(); + } + + // Create HashedPostState from overlay data + let mut hashed_accounts: Vec<(B256, Option)> = overlay_acct.iter() + .map(|(address, account)| { + let hashed = keccak256(address); + (hashed, Some(*account)) + }) + .collect(); + + // Build HashedStorage for overlay storage + let mut hashed_storages: B256Map = HashMap::default(); + for (address, storage) in &overlay_storage { + let hashed_address = keccak256(address); + let hashed_storage = HashedStorage::from_iter( + false, // wiped = false + storage.iter().map(|(key, value)| { + // key is a raw storage slot (B256), need to hash it + let hashed_slot = keccak256(*key); + (hashed_slot, *value) + }), + ); + hashed_storages.insert(hashed_address, hashed_storage); + } + + let hashed_state = HashedPostState { + accounts: hashed_accounts.into_iter().collect(), + storages: hashed_storages, + }; + + // Use provider_ro for state_root_with_updates let db_provider_ro = provider_factory.database_provider_ro().unwrap(); let latest_ro = LatestStateProvider::new(db_provider_ro); - let mut rng = rand::thread_rng(); - let hashed_accounts: Vec<(B256, Option)> = (0..1000).map(|_| { - let mut addr = [0u8; 20]; - rng.fill(&mut addr); - let hashed = keccak256(addr); - let acct = Account { - nonce: rng.gen_range(0..=u64::MAX), - balance: U256::from(rng.gen_range(0u128..=u128::MAX)), - bytecode_hash: { - let mut hash_bytes = [0u8; 32]; - rng.fill(&mut hash_bytes); - Some(B256::from(hash_bytes)) - }, - }; - (hashed, Some(acct)) - }).collect(); - let hashed_state = HashedPostState::default().with_accounts(hashed_accounts); - c.bench_with_input( - BenchmarkId::new("bench_state_root_mdbx_with_overlay", 1000), - &hashed_state, - |b, hs| { - b.iter(|| { - let _ = latest_ro.state_root_with_updates(hs.clone()); - }) - }, - ); -} + let overlay_count = overlay_acct.len() + overlay_storage.values().map(|s| s.len()).sum::(); + + group.throughput(criterion::Throughput::Elements(overlay_count as u64)); + group.measurement_time(Duration::from_secs(30)); + group.bench_function(BenchmarkId::new("state_root_with_overlay_mdbx", overlay_count), |b| { + b.iter(|| { + let _ = latest_ro.state_root_with_updates(hashed_state.clone()); + }) + }); + + group.finish(); +} criterion_group! { name = benches; config = Criterion::default(); - targets = bench_state_root_comparison, bench_state_root_with_overlay, bench_state_root_mdbx_with_overlay + targets = bench_state_root_comparison, bench_state_root_with_overlay_triedb, bench_state_root_with_overlay_mdbx } criterion_main!(benches); diff --git a/crates/storage/db-common/benches/util.rs b/crates/storage/db-common/benches/util.rs index 6520ed10042..1646e287bcb 100644 --- a/crates/storage/db-common/benches/util.rs +++ b/crates/storage/db-common/benches/util.rs @@ -2,10 +2,11 @@ use std::path::{Path, PathBuf}; use tempdir::TempDir; use rand::prelude::*; use rand::RngCore; -use alloy_primitives::{Address, StorageKey, StorageValue, U256}; +use alloy_primitives::{Address, StorageKey, StorageValue, U256, B256}; +use reth_primitives_traits::{Account, StorageEntry}; use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; use triedb::{ - account::Account, + account::Account as TrieDBAccount, path::{AddressPath, StoragePath}, transaction::TransactionError, Database, @@ -16,6 +17,8 @@ use std::{ thread, time::Duration, }; +use std::collections::HashMap; + pub const BATCH_SIZE: usize = 10_000; pub fn generate_random_address(rng: &mut StdRng) -> AddressPath { @@ -45,40 +48,25 @@ pub fn get_flat_trie_database( fallback_eoa_size: usize, fallback_contract_size: usize, fallback_storage_per_contract: usize, -) -> FlatTrieDatabase { - let base_dir = std::env::var("BASE_DIR").ok(); - if let Some(base_dir) = base_dir { - let file_name = - std::env::var("FILE_NAME").expect("FILE_NAME must be set when using BASE_DIR"); - let main_file_name = file_name.to_string(); - let meta_file_name = format!("{file_name}.meta"); - let file_name_path = Path::new(&base_dir).join(&main_file_name); - let meta_file_name_path = Path::new(&base_dir).join(&meta_file_name); - - return FlatTrieDatabase { - _base_dir: None, - main_file_name, - meta_file_name, - file_name_path, - meta_file_name_path, - }; - } + overlay_size: usize, +) -> (FlatTrieDatabase,(HashMap, HashMap>) ){ + let dir = TempDir::new("triedb_bench_base").unwrap(); let main_file_name_path = dir.path().join("triedb"); let meta_file_name_path = dir.path().join("triedb.meta"); let db = Database::create_new(&main_file_name_path).unwrap(); - setup_database(&db, fallback_eoa_size, fallback_contract_size, fallback_storage_per_contract) + let ret = setup_database(&db, fallback_eoa_size, fallback_contract_size, fallback_storage_per_contract, overlay_size) .unwrap(); - FlatTrieDatabase { + (FlatTrieDatabase { _base_dir: Some(dir), main_file_name: "triedb".to_string(), file_name_path: main_file_name_path, meta_file_name: "triedb.meta".to_string(), meta_file_name_path, - } + }, ret) } fn setup_database( @@ -86,43 +74,208 @@ fn setup_database( eoa_count: usize, contract_count: usize, storage_per_contract: usize, -) -> Result<(), TransactionError> { - // Populate database with initial accounts - let mut eoa_rng = StdRng::seed_from_u64(SEED_EOA); - let mut contract_rng = StdRng::seed_from_u64(SEED_CONTRACT); + overlay_size: usize, +) -> Result<(HashMap, HashMap>), TransactionError> { + // Generate shared test data (overlay not used, so pass 0) + let (addresses, accounts_map, storage_map, overlay_acct, overlay_storage) = generate_shared_test_data( + eoa_count, + contract_count, + storage_per_contract, + overlay_size, // overlay_count not used + ); { let mut tx = db.begin_rw()?; - for i in 1..=eoa_count { - let address = generate_random_address(&mut eoa_rng); - let account = - Account::new(i as u64, U256::from(i as u64), EMPTY_ROOT_HASH, KECCAK_EMPTY); - tx.set_account(address, Some(account))?; + // Set accounts from the generated data + for address in &addresses { + if let Some(account) = accounts_map.get(address) { + let address_path = AddressPath::for_address(*address); + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, + KECCAK_EMPTY, + ); + tx.set_account(address_path, Some(trie_account))?; + } } - for i in 1..=contract_count { - let address = generate_random_address(&mut contract_rng); - let account = - Account::new(i as u64, U256::from(i as u64), EMPTY_ROOT_HASH, KECCAK_EMPTY); + // Set storage from the generated data (only for contracts) + for (address, storage) in &storage_map { + let address_path = AddressPath::for_address(*address); + for (storage_key, storage_value) in storage { + let storage_path = StoragePath::for_address_path_and_slot( + address_path.clone(), + StorageKey::from(*storage_key), + ); + let storage_value_triedb = StorageValue::from_be_slice( + storage_path.get_slot().pack().as_slice() + ); + tx.set_storage_slot(storage_path, Some(storage_value_triedb))?; + } + } - tx.set_account(address.clone(), Some(account))?; + tx.commit()?; + } - // add random storage to each account - for key in 1..=storage_per_contract { - let storage_key = StorageKey::from(U256::from(key)); - let storage_path = - StoragePath::for_address_path_and_slot(address.clone(), storage_key); - let storage_value = - StorageValue::from_be_slice(storage_path.get_slot().pack().as_slice()); + Ok((overlay_acct, overlay_storage)) +} - tx.set_storage_slot(storage_path, Some(storage_value))?; - } +// Helper function to generate shared test data using alloy primitives +pub fn generate_shared_test_data( + eoa_count: usize, + contract_count: usize, + storage_per_contract: usize, + overlay_count: usize, // total number of overlay addresses (can include duplicates and new ones) +) -> ( + Vec
, // all base addresses (EOA + contracts) + HashMap, // base accounts map + HashMap>, // base storage map: address -> storage_key -> value + HashMap, // overlay accounts map (can have duplicates with base + new addresses) + HashMap>, // overlay storage map +) { + let mut rng = StdRng::seed_from_u64(SEED_CONTRACT); + + // Generate EOA addresses + let eoa_addresses: Vec
= (0..eoa_count).map(|_| { + let mut addr_bytes = [0u8; 20]; + rng.fill(&mut addr_bytes); + Address::from_slice(&addr_bytes) + }).collect(); + + // Generate contract addresses + let contract_addresses: Vec
= (0..contract_count).map(|_| { + let mut addr_bytes = [0u8; 20]; + rng.fill(&mut addr_bytes); + Address::from_slice(&addr_bytes) + }).collect(); + + // Combine all base addresses + let mut addresses = eoa_addresses.clone(); + addresses.extend(contract_addresses.clone()); + + // Generate base accounts map + let mut accounts_map = HashMap::new(); + for (i, address) in addresses.iter().enumerate() { + let account = Account { + nonce: i as u64, + balance: U256::from(i as u64), + bytecode_hash: if contract_addresses.contains(address) { + // Contracts have bytecode hash + Some(EMPTY_ROOT_HASH) + } else { + // EOAs have no bytecode + None + }, + }; + accounts_map.insert(*address, account); + } + + // Generate base storage map (only for contracts) + let mut storage_map: HashMap> = HashMap::new(); + for address in &contract_addresses { + let mut contract_storage = HashMap::new(); + for key in 1..=storage_per_contract { + let storage_key = B256::from(U256::from(key)); + let storage_value = U256::from(key); + contract_storage.insert(storage_key, storage_value); } + storage_map.insert(*address, contract_storage); + } - tx.commit()?; + // Generate overlay states + // Some addresses can be duplicates (updates to existing), some can be new + let mut overlay_accounts_map = HashMap::new(); + let mut overlay_storage_map: HashMap> = HashMap::new(); + + for i in 0..overlay_count { + // Randomly decide: duplicate existing address or new address + let address = if rng.gen_bool(0.5) && !addresses.is_empty() { + // 50% chance to update existing account + addresses[rng.gen_range(0..addresses.len())] + } else { + // 50% chance to create new account + let mut addr_bytes = [0u8; 20]; + rng.fill(&mut addr_bytes); + Address::from_slice(&addr_bytes) + }; + + // Generate overlay account (with different values) + let overlay_account = Account { + nonce: (i + 1000) as u64, // different nonce + balance: U256::from((i + 2000) as u64), // different balance + bytecode_hash: if rng.gen_bool(0.3) { + // 30% chance to be a contract + Some(EMPTY_ROOT_HASH) + } else { + None + }, + }; + overlay_accounts_map.insert(address, overlay_account); + + // Generate overlay storage (only for contracts) + if overlay_account.bytecode_hash.is_some() { + let mut contract_storage = HashMap::new(); + + // Random number of storage changes (max half of storage_per_contract) + let max_changes = (storage_per_contract / 2).max(1); + let num_changes = rng.gen_range(1..=max_changes); + + // Get existing storage if this address exists in base storage_map + let existing_storage = storage_map.get(&address); + + for _ in 0..num_changes { + let change_type = rng.gen_range(0..3); // 0: new, 1: delete, 2: update + + match change_type { + 0 => { + // New storage slot + let storage_key = B256::from(U256::from(rng.gen_range(1000..2000))); + let storage_value = U256::from(rng.gen_range(5000..10000)); + contract_storage.insert(storage_key, storage_value); + } + 1 => { + // Delete existing storage (value = 0) + if let Some(existing) = existing_storage { + if !existing.is_empty() { + let keys: Vec = existing.keys().copied().collect(); + if !keys.is_empty() { + let key_to_delete = keys[rng.gen_range(0..keys.len())]; + contract_storage.insert(key_to_delete, U256::ZERO); + } + } + } + } + 2 => { + // Update existing storage + if let Some(existing) = existing_storage { + if !existing.is_empty() { + let keys: Vec = existing.keys().copied().collect(); + if !keys.is_empty() { + let key_to_update = keys[rng.gen_range(0..keys.len())]; + let new_value = U256::from(rng.gen_range(10000..20000)); + contract_storage.insert(key_to_update, new_value); + } + } + } + } + _ => unreachable!(), + } + } + + if !contract_storage.is_empty() { + overlay_storage_map.insert(address, contract_storage); + } + } } - Ok(()) + ( + addresses, + accounts_map, + storage_map, + overlay_accounts_map, + overlay_storage_map, + ) } pub fn copy_files(from: &FlatTrieDatabase, to: &Path) -> Result<(), io::Error> { From e5bcef9e2637e1981fb8bc66ffc14d0303e0ea5a Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Tue, 16 Dec 2025 12:11:59 +0800 Subject: [PATCH 13/36] add bench tdb and mdbx state root --- Cargo.lock | 2 + crates/optimism/node/Cargo.toml | 2 +- .../optimism/node/tests/assets/genesis.json | 108 ++++++- .../node/tests/assets/genesis_token.json | 107 +++++++ crates/optimism/node/tests/it/engine.rs | 286 +++++++++++++++++- crates/storage/db-common/Cargo.toml | 7 +- crates/storage/db-common/README.md | 3 +- crates/storage/db-common/benches/util.rs | 136 ++++++--- .../db-common/src/bin/state_root_overlay.rs | 181 +++++++++++ .../src/bin/state_root_overlay_min.rs | 98 ++++++ 10 files changed, 882 insertions(+), 48 deletions(-) create mode 100644 crates/optimism/node/tests/assets/genesis_token.json create mode 100644 crates/storage/db-common/src/bin/state_root_overlay.rs create mode 100644 crates/storage/db-common/src/bin/state_root_overlay_min.rs diff --git a/Cargo.lock b/Cargo.lock index 27880006461..af82943c6d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8167,6 +8167,7 @@ dependencies = [ "reth-static-file-types", "reth-storage-api", "reth-trie", + "reth-trie-common", "reth-trie-db", "serde", "serde_json", @@ -9865,6 +9866,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", "alloy-rpc-types-eth", + "alloy-sol-types", "clap", "eyre", "futures", diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 47d95da46ec..71d5e2dbd48 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -31,7 +31,7 @@ reth-node-core.workspace = true reth-rpc-engine-api.workspace = true reth-engine-local = { workspace = true, features = ["op"] } reth-rpc-api.workspace = true - +alloy-sol-types.workspace = true # op-reth reth-optimism-payload-builder.workspace = true reth-optimism-evm = { workspace = true, features = ["rpc"] } diff --git a/crates/optimism/node/tests/assets/genesis.json b/crates/optimism/node/tests/assets/genesis.json index e59d90f4ff1..19036e6df1d 100644 --- a/crates/optimism/node/tests/assets/genesis.json +++ b/crates/optimism/node/tests/assets/genesis.json @@ -1 +1,107 @@ -{"config":{"chainId":8453,"homesteadBlock":0,"eip150Block":0,"eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"berlinBlock":0,"londonBlock":0,"arrowGlacierBlock":0,"grayGlacierBlock":0,"mergeNetsplitBlock":0,"bedrockBlock":0,"regolithTime":0,"terminalTotalDifficulty":0,"terminalTotalDifficultyPassed":true,"optimism":{"eip1559Elasticity":6,"eip1559Denominator":50}},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x1c9c380","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0x14dc79964da2c08b23698b3d3cc7ca32193d9955":{"balance":"0xd3c21bcecceda1000000"},"0x15d34aaf54267db7d7c367839aaf71a00a2c6a65":{"balance":"0xd3c21bcecceda1000000"},"0x1cbd3b2770909d4e10f157cabc84c7264073c9ec":{"balance":"0xd3c21bcecceda1000000"},"0x23618e81e3f5cdf7f54c3d65f7fbc0abf5b21e8f":{"balance":"0xd3c21bcecceda1000000"},"0x2546bcd3c84621e976d8185a91a922ae77ecec30":{"balance":"0xd3c21bcecceda1000000"},"0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc":{"balance":"0xd3c21bcecceda1000000"},"0x70997970c51812dc3a010c7d01b50e0d17dc79c8":{"balance":"0xd3c21bcecceda1000000"},"0x71be63f3384f5fb98995898a86b02fb2426c5788":{"balance":"0xd3c21bcecceda1000000"},"0x8626f6940e2eb28930efb4cef49b2d1f2c9c1199":{"balance":"0xd3c21bcecceda1000000"},"0x90f79bf6eb2c4f870365e785982e1f101e93b906":{"balance":"0xd3c21bcecceda1000000"},"0x976ea74026e726554db657fa54763abd0c3a0aa9":{"balance":"0xd3c21bcecceda1000000"},"0x9965507d1a55bcc2695c58ba16fb37d819b0a4dc":{"balance":"0xd3c21bcecceda1000000"},"0x9c41de96b2088cdc640c6182dfcf5491dc574a57":{"balance":"0xd3c21bcecceda1000000"},"0xa0ee7a142d267c1f36714e4a8f75612f20a79720":{"balance":"0xd3c21bcecceda1000000"},"0xbcd4042de499d14e55001ccbb24a551f3b954096":{"balance":"0xd3c21bcecceda1000000"},"0xbda5747bfd65f08deb54cb465eb87d40e51b197e":{"balance":"0xd3c21bcecceda1000000"},"0xcd3b766ccdd6ae721141f452c550ca635964ce71":{"balance":"0xd3c21bcecceda1000000"},"0xdd2fd4581271e230360230f9337d5c0430bf44c0":{"balance":"0xd3c21bcecceda1000000"},"0xdf3e18d64bc6a983f673ab319ccae4f1a57c7097":{"balance":"0xd3c21bcecceda1000000"},"0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266":{"balance":"0xd3c21bcecceda1000000"},"0xfabb0ac9d68b0b445fb7357272ff202c5651694a":{"balance":"0xd3c21bcecceda1000000"}},"number":"0x0"} \ No newline at end of file +{ + "config": { + "chainId": 8453, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "arrowGlacierBlock": 0, + "grayGlacierBlock": 0, + "mergeNetsplitBlock": 0, + "bedrockBlock": 0, + "regolithTime": 0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true, + "optimism": { + "eip1559Elasticity": 6, + "eip1559Denominator": 50 + } + }, + "nonce": "0x0", + "timestamp": "0x0", + "extraData": "0x00", + "gasLimit": "0x1c9c380", + "difficulty": "0x0", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "0x14dc79964da2c08b23698b3d3cc7ca32193d9955": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x15d34aaf54267db7d7c367839aaf71a00a2c6a65": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x1cbd3b2770909d4e10f157cabc84c7264073c9ec": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x23618e81e3f5cdf7f54c3d65f7fbc0abf5b21e8f": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x2546bcd3c84621e976d8185a91a922ae77ecec30": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x70997970c51812dc3a010c7d01b50e0d17dc79c8": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x71be63f3384f5fb98995898a86b02fb2426c5788": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x8626f6940e2eb28930efb4cef49b2d1f2c9c1199": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x90f79bf6eb2c4f870365e785982e1f101e93b906": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x976ea74026e726554db657fa54763abd0c3a0aa9": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x9965507d1a55bcc2695c58ba16fb37d819b0a4dc": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x9c41de96b2088cdc640c6182dfcf5491dc574a57": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xa0ee7a142d267c1f36714e4a8f75612f20a79720": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xbcd4042de499d14e55001ccbb24a551f3b954096": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xbda5747bfd65f08deb54cb465eb87d40e51b197e": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xcd3b766ccdd6ae721141f452c550ca635964ce71": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xdd2fd4581271e230360230f9337d5c0430bf44c0": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xdf3e18d64bc6a983f673ab319ccae4f1a57c7097": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xfabb0ac9d68b0b445fb7357272ff202c5651694a": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x5FbDB2315678afecb367f032d93F642f64180aa3": { + "balance": "0x0", + "code": "0x608060405234801561000f575f5ffd5b5060043610610060575f3560e01c806306fdde031461006457806318160ddd14610082578063313ce567146100a057806370a08231146100be57806395d89b41146100ee578063a9059cbb1461010c575b5f5ffd5b61006c61013c565b60405161007991906103c1565b60405180910390f35b61008a610175565b60405161009791906103f9565b60405180910390f35b6100a861017a565b6040516100b5919061042d565b60405180910390f35b6100d860048036038101906100d391906104a4565b61017f565b6040516100e591906103f9565b60405180910390f35b6100f6610194565b60405161010391906103c1565b60405180910390f35b610126600480360381019061012191906104f9565b6101cd565b6040516101339190610551565b60405180910390f35b6040518060400160405280600781526020017f4d79546f6b656e0000000000000000000000000000000000000000000000000081525081565b5f5481565b601281565b6001602052805f5260405f205f915090505481565b6040518060400160405280600381526020017f4d544b000000000000000000000000000000000000000000000000000000000081525081565b5f8160015f3373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f2054101561024e576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610245906105b4565b60405180910390fd5b8160015f3373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f205f82825403925050819055508160015f8573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f205f82825401925050819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8460405161033f91906103f9565b60405180910390a36001905092915050565b5f81519050919050565b5f82825260208201905092915050565b8281835e5f83830152505050565b5f601f19601f8301169050919050565b5f61039382610351565b61039d818561035b565b93506103ad81856020860161036b565b6103b681610379565b840191505092915050565b5f6020820190508181035f8301526103d98184610389565b905092915050565b5f819050919050565b6103f3816103e1565b82525050565b5f60208201905061040c5f8301846103ea565b92915050565b5f60ff82169050919050565b61042781610412565b82525050565b5f6020820190506104405f83018461041e565b92915050565b5f5ffd5b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f6104738261044a565b9050919050565b61048381610469565b811461048d575f5ffd5b50565b5f8135905061049e8161047a565b92915050565b5f602082840312156104b9576104b8610446565b5b5f6104c684828501610490565b91505092915050565b6104d8816103e1565b81146104e2575f5ffd5b50565b5f813590506104f3816104cf565b92915050565b5f5f6040838503121561050f5761050e610446565b5b5f61051c85828601610490565b925050602061052d858286016104e5565b9150509250929050565b5f8115159050919050565b61054b81610537565b82525050565b5f6020820190506105645f830184610542565b92915050565b7f696e73756666696369656e7400000000000000000000000000000000000000005f82015250565b5f61059e600c8361035b565b91506105a98261056a565b602082019050919050565b5f6020820190508181035f8301526105cb81610592565b905091905056fea26469706673582212207fc7a19cb674d2b14161fa2594a527523f58b654fdda0568a842bdb287ff2a9b64736f6c634300081e0033", + "storage": { + "0xa3c1274aadd82e4d12c8004c33fb244ca686dad4fcc8957fc5668588c11d9502": "0x1000000000000000000000000" + } + } + }, + "number": "0x0" +} \ No newline at end of file diff --git a/crates/optimism/node/tests/assets/genesis_token.json b/crates/optimism/node/tests/assets/genesis_token.json new file mode 100644 index 00000000000..18fbf4364a9 --- /dev/null +++ b/crates/optimism/node/tests/assets/genesis_token.json @@ -0,0 +1,107 @@ +{ + "config": { + "chainId": 8453, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "arrowGlacierBlock": 0, + "grayGlacierBlock": 0, + "mergeNetsplitBlock": 0, + "bedrockBlock": 0, + "regolithTime": 0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true, + "optimism": { + "eip1559Elasticity": 6, + "eip1559Denominator": 50 + } + }, + "nonce": "0x0", + "timestamp": "0x0", + "extraData": "0x00", + "gasLimit": "0x1c9c380", + "difficulty": "0x0", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "0x14dc79964da2c08b23698b3d3cc7ca32193d9955": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x15d34aaf54267db7d7c367839aaf71a00a2c6a65": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x1cbd3b2770909d4e10f157cabc84c7264073c9ec": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x23618e81e3f5cdf7f54c3d65f7fbc0abf5b21e8f": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x2546bcd3c84621e976d8185a91a922ae77ecec30": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x70997970c51812dc3a010c7d01b50e0d17dc79c8": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x71be63f3384f5fb98995898a86b02fb2426c5788": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x8626f6940e2eb28930efb4cef49b2d1f2c9c1199": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x90f79bf6eb2c4f870365e785982e1f101e93b906": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x976ea74026e726554db657fa54763abd0c3a0aa9": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x9965507d1a55bcc2695c58ba16fb37d819b0a4dc": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x9c41de96b2088cdc640c6182dfcf5491dc574a57": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xa0ee7a142d267c1f36714e4a8f75612f20a79720": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xbcd4042de499d14e55001ccbb24a551f3b954096": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xbda5747bfd65f08deb54cb465eb87d40e51b197e": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xcd3b766ccdd6ae721141f452c550ca635964ce71": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xdd2fd4581271e230360230f9337d5c0430bf44c0": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xdf3e18d64bc6a983f673ab319ccae4f1a57c7097": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xfabb0ac9d68b0b445fb7357272ff202c5651694a": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x5FbDB2315678afecb367f032d93F642f64180aa3": { + "balance": "0x0", + "code": "0x583560e01c806370a082311461001f578063a9059cbb14610039576100a6575b6004355f6000526000602001526040600020545f5260205ff35b60043573ffffffffffffffffffffffffffffffffffffffff1633602435815f6000526000602001526040600020549003825f600052600060200152604060002055815f60005260006020015260406000205401825f60005260006020015260406000205560015f5260205ff35b5f80fd", + "storage": { + "0xa3c1274aadd82e4d12c8004c33fb244ca686dad4fcc8957fc5668588c11d9502": "0x0000000000000000000000000000000000000001000000000000000000000000" + } + } + }, + "number": "0x0" +} \ No newline at end of file diff --git a/crates/optimism/node/tests/it/engine.rs b/crates/optimism/node/tests/it/engine.rs index 2f1bbec5ac7..f9eba7898fa 100644 --- a/crates/optimism/node/tests/it/engine.rs +++ b/crates/optimism/node/tests/it/engine.rs @@ -1,24 +1,29 @@ use std::hash::Hash; -use alloy_primitives::{Address, B256}; +use alloy_primitives::{keccak256,hex, Bytes, Address, B256}; use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes, PayloadStatusEnum}; use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_e2e_test_utils::{ setup, transaction::TransactionTestContext, wallet::Wallet, }; use alloy_primitives::{TxKind, U256}; +use alloy_sol_types::sol; use reth_node_api::PayloadTypes; +use alloy_network::{EthereumWallet, TransactionBuilder}; use reth_optimism_chainspec::{OpChainSpecBuilder, OP_SEPOLIA}; use reth_optimism_node::{OpNode}; use reth_optimism_payload_builder::{OpPayloadBuilderAttributes}; use reth_optimism_primitives::OpTransactionSigned; use reth_provider::BlockReaderIdExt; use std::sync::Arc; -use alloy_eips::Encodable2718; +use alloy_eips::{BlockId, BlockNumberOrTag, Encodable2718}; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_revm::database::EvmStateProvider; use reth_rpc_api::{EngineApiClient}; use alloy_rpc_types_engine::ExecutionPayloadV3; -use alloy_rpc_types_eth::TransactionRequest; +use alloy_rpc_types_eth::{BlockTransactions, TransactionRequest}; +use alloy_rpc_types_eth::transaction::request::TransactionInput; +use reth_rpc_api::EthApiServer; +use alloy_sol_types::{SolCall, SolValue}; #[tokio::test] async fn full_engine_api_bock_building_get_validation() -> eyre::Result<()> { @@ -285,3 +290,278 @@ async fn full_engine_api_bock_building_continuously() -> eyre::Result<()> { Ok(()) } + +#[tokio::test] +async fn full_engine_api_multi_address() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = OpChainSpecBuilder::default() + .chain(OP_SEPOLIA.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis_token.json")).unwrap()) + .regolith_activated() + .canyon_activated() + .ecotone_activated() + .build(); + + let (mut nodes, _tasks, _wallet) = setup::( + 1, + Arc::new(chain_spec.clone()), + false, + |timestamp| { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + // Construct Optimism-specific payload attributes + OpPayloadBuilderAttributes:: { + payload_attributes: EthPayloadBuilderAttributes::new(B256::ZERO, attributes), + transactions: vec![], // Empty vector of transactions for the builder + no_tx_pool: false, + gas_limit: Some(30_000_000), + eip_1559_params: None, + min_base_fee: None, + } + }, + ) + .await?; + + let mut node = nodes.pop().unwrap(); + let provider = node.inner.provider.clone(); + let genesis_hash = node.block_hash(0); + + sol! { + function balanceOf(address) view returns (uint256); + } + + let token: Address = "0x5FbDB2315678afecb367f032d93F642f64180aa3".parse().unwrap(); + let receiver: Address = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92265".parse().unwrap(); + + let calldata = balanceOfCall(receiver).abi_encode(); + + let call_req = TransactionRequest { + to: Some(TxKind::Call(token)), + input: TransactionInput::from(calldata), // not wrapped in Option + ..Default::default() + }; + let res_bytes = node + .rpc + .inner + .eth_api() + .call(call_req.clone().into(), Some(BlockId::latest()), None, None) + .await?; + + if res_bytes.is_empty() { + println!("Call returned empty bytes (revert or no code)"); + } else { + let balance = balanceOfCall::abi_decode_returns(&res_bytes).expect("decode failed"); + println!("balanceOf receiver {receiver:x} = {balance}"); + } + + // let provider = node.inner.provider.clone(); + // + // let genesis_hash = node.block_hash(0); + // + let wallet = Wallet::default(); + let sender_address = wallet.inner.address(); + + + let calldata: Bytes = hex!("a9059cbb000000000000000000000011f39Fd6e51aad88F6F4ce6aB8827279cffFb922650000000000000000000000000000000000000000000000000000000000000001").into(); + + // Build tx + let nonce = node.rpc.inner.eth_api().transaction_count(wallet.inner.address(), None).await.unwrap(); + let tx_request = TransactionRequest { + from: Some(sender_address), + to: Some(token.into()), // TxKind::Call + value: Some(U256::ZERO), + gas: Some(300_000), // sufficient gas + gas_price: Some(1_000_000_000), // 1 gwei + nonce: Some(nonce.to::()), + input: calldata.into(), // TransactionInput + chain_id: Some(OP_SEPOLIA.chain.id()), + ..Default::default() + }; + + let signer = wallet.inner.clone(); + let wallet_wrapper = EthereumWallet::from(signer); // Wrap the signer + let envelope = tx_request.build(&wallet_wrapper).await.unwrap(); + let raw_tx = envelope.encoded_2718(); + + // Inject + let tx_hash = node.rpc.inject_tx(raw_tx.into()).await?; + println!("Injected transfer tx: {tx_hash}"); + + let current_head = provider.sealed_header_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest)?.unwrap(); + let current_timestamp = current_head.timestamp; + + let payload_attrs = PayloadAttributes { + timestamp: current_timestamp + 2, // 2 seconds after current block (OP block time) + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + let fcu_state = ForkchoiceState { + head_block_hash: genesis_hash, + safe_block_hash: genesis_hash, + finalized_block_hash: genesis_hash, + }; + + let op_attrs = OpPayloadAttributes { + payload_attributes: payload_attrs.clone(), + transactions: None, + no_tx_pool: None, + gas_limit: Some(30_000_000), + eip_1559_params: None, + min_base_fee: None, + }; + + let engine_client = node.inner.engine_http_client(); + let fcu_result = engine_client + .fork_choice_updated_v3(fcu_state, Some(op_attrs)) + .await?; + let payload_id = fcu_result.payload_id.expect("payload id"); + + // Wait a bit for payload to be built + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + let payload_v3 = engine_client.get_payload_v3(payload_id).await?; + assert_eq!(genesis_hash, payload_v3.execution_payload.payload_inner.payload_inner.parent_hash); + // assert_eq!(68354, payload_v3.execution_payload.payload_inner.payload_inner.gas_used); + + // newPaylaod + let payload_builder_handle = node.inner.payload_builder_handle.clone(); + let built_payload = payload_builder_handle + .best_payload(payload_id) + .await + .transpose() + .ok() + .flatten() + .expect("Payload should be built"); + let block = Arc::new(built_payload.block().clone()); + let payload2_v3 = ExecutionPayloadV3::from_block_unchecked( + block.hash(), + &Arc::unwrap_or_clone(block.clone()).into_block(), + ); + let versioned_hashes: Vec = Vec::new(); + let parent_beacon_block_root = block.parent_beacon_block_root.unwrap_or_default(); + println!("payload2_v3: {:?}", payload2_v3); + let new_payload_result = engine_client + .new_payload_v3(payload2_v3.clone(), versioned_hashes, parent_beacon_block_root) + .await?; + assert_eq!(new_payload_result.status, PayloadStatusEnum::Valid); + + let head = payload_v3.execution_payload.payload_inner.payload_inner.block_hash; + let fcu_state = ForkchoiceState { + head_block_hash: head, + safe_block_hash: head, + finalized_block_hash: head, + }; + let ret = engine_client.fork_choice_updated_v3(fcu_state, None).await?; + print!("ret: {:?}", ret); + + let latest_block = node + .rpc + .inner + .eth_api() + .block_by_number(BlockNumberOrTag::Pending, false) // false = no full tx objects + .await? + .expect("latest block should exist"); + // + println!("Unsafe/latest block number: {}", latest_block.header.number); + // + match &latest_block.transactions { + BlockTransactions::Full(txs) => { + for tx in txs { + println!("Tx hash: {:?}", tx); + // println!("Tx input: {:?}", tx.input); + } + } + BlockTransactions::Hashes(hashes) => { + println!("Block has {} tx hashes", hashes.len()); + // for h in hashes { + // if let Some(receipt) = node + // .rpc + // .inner + // .eth_api() + // .transaction_receipt(*h) + // .await? + // { + // println!("Tx {:?}", h); + // } else { + // println!("Tx {h:?} has no receipt yet"); + // } + // } + } + BlockTransactions::Uncle => { + unreachable!() + } + } + + let res_bytes = node + .rpc + .inner + .eth_api() + .call(call_req.into(), Some(BlockId::latest()), None, None) + .await?; + + if res_bytes.is_empty() { + println!("Call returned empty bytes (revert or no code)"); + } else { + let balance = balanceOfCall::abi_decode_returns(&res_bytes).expect("decode failed"); + println!("balanceOf receiver {receiver:x} = {balance}"); + } + + // for tx_bytes in &payload_v3.execution_payload.payload_inner.payload_inner.transactions { + // // Calculate hash of the transaction + // let hash = keccak256(tx_bytes); + // + // // Get receipt + // let receipt = node + // .rpc + // .inner + // .eth_api() + // .transaction_receipt(hash) + // .await + // .expect("should not error") + // .expect("receipt should exist"); + // + // // Ensure success (status == 1) + // // assert!(receipt.inner.status_code.expect("status code").to_bool()); + // println!("receipt: {:?}", receipt); + // } + + + Ok(()) +} + + +#[test] +fn test_slot() { + use alloy_primitives::{keccak256, Address, B256, U256}; + + fn mapping_slot_balance_of(holder: Address) -> B256 { + let slot = U256::from(1u64); + + // Left-pad the 20-byte address to 32 bytes + let mut addr_word = [0u8; 32]; + addr_word[12..].copy_from_slice(holder.as_slice()); + + // Build abi.encode(key, slot) + let mut buf = [0u8; 64]; + buf[0..32].copy_from_slice(&addr_word); + buf[32..64].copy_from_slice(&slot.to_be_bytes::<32>()); + + keccak256(buf) + } + + + let holder: Address = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266".parse().unwrap(); + let storage_slot = mapping_slot_balance_of(holder); + println!("{storage_slot:#x}"); + +} diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index c4cbecb3976..2a994701e55 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -24,7 +24,9 @@ reth-fs-util.workspace = true reth-node-types.workspace = true reth-static-file-types.workspace = true reth-execution-errors.workspace = true - +reth-trie-common.workspace = true +alloy-trie.workspace = true +triedb.workspace = true # eth alloy-consensus.workspace = true alloy-genesis.workspace = true @@ -58,8 +60,7 @@ reth-storage-api.workspace = true tempdir = "0.3.7" rand = "0.8" criterion = { workspace = true } -triedb.workspace = true -alloy-trie.workspace = true + [[bench]] name = "state_root_comparison" diff --git a/crates/storage/db-common/README.md b/crates/storage/db-common/README.md index 15829884655..2ca66d32937 100644 --- a/crates/storage/db-common/README.md +++ b/crates/storage/db-common/README.md @@ -12,4 +12,5 @@ cargo bench -p reth-db-common --features trie-db-ext --bench state_root_comparis cargo bench -p reth-db-common --features trie-db-ext --bench state_root_comparison -- state_root_with_overlay_mdbx ``` -cargo run --release -p reth-db-common --features trie-db-ext --bin state_root_runner -- traditional 100000 5 \ No newline at end of file +cargo run --release -p reth-db-common --features trie-db-ext --bin state_root_runner -- traditional 100000 5 +cargo run --release -p reth-db-common --features trie-db-ext --bin state_root_overlay \ No newline at end of file diff --git a/crates/storage/db-common/benches/util.rs b/crates/storage/db-common/benches/util.rs index 1646e287bcb..61f4a6cfc6a 100644 --- a/crates/storage/db-common/benches/util.rs +++ b/crates/storage/db-common/benches/util.rs @@ -57,7 +57,10 @@ pub fn get_flat_trie_database( let meta_file_name_path = dir.path().join("triedb.meta"); let db = Database::create_new(&main_file_name_path).unwrap(); - let ret = setup_database(&db, fallback_eoa_size, fallback_contract_size, fallback_storage_per_contract, overlay_size) + let (addresses, accounts_map, storage_map, overlay_acct, overlay_storage) = + generate_shared_test_data(fallback_eoa_size, fallback_contract_size, fallback_storage_per_contract, overlay_size); + + let ret = setup_tdb_database(&db, &addresses, &accounts_map, &storage_map) .unwrap(); (FlatTrieDatabase { @@ -66,50 +69,42 @@ pub fn get_flat_trie_database( file_name_path: main_file_name_path, meta_file_name: "triedb.meta".to_string(), meta_file_name_path, - }, ret) + }, (overlay_acct, overlay_storage )) } - -fn setup_database( +pub fn setup_tdb_database( db: &Database, - eoa_count: usize, - contract_count: usize, - storage_per_contract: usize, - overlay_size: usize, -) -> Result<(HashMap, HashMap>), TransactionError> { - // Generate shared test data (overlay not used, so pass 0) - let (addresses, accounts_map, storage_map, overlay_acct, overlay_storage) = generate_shared_test_data( - eoa_count, - contract_count, - storage_per_contract, - overlay_size, // overlay_count not used - ); + addresses: &[Address], + accounts_map: &HashMap, + storage_map: &HashMap>, +) -> Result<(), TransactionError> { { let mut tx = db.begin_rw()?; - // Set accounts from the generated data - for address in &addresses { + // Set accounts from the provided data + for address in addresses { if let Some(account) = accounts_map.get(address) { let address_path = AddressPath::for_address(*address); let trie_account = TrieDBAccount::new( account.nonce, account.balance, EMPTY_ROOT_HASH, - KECCAK_EMPTY, + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), ); tx.set_account(address_path, Some(trie_account))?; } } - // Set storage from the generated data (only for contracts) - for (address, storage) in &storage_map { + // Set storage from the provided data (only for contracts) + for (address, storage) in storage_map { let address_path = AddressPath::for_address(*address); for (storage_key, storage_value) in storage { let storage_path = StoragePath::for_address_path_and_slot( address_path.clone(), StorageKey::from(*storage_key), ); + // Fix: Use the actual storage value, not the slot let storage_value_triedb = StorageValue::from_be_slice( - storage_path.get_slot().pack().as_slice() + storage_value.to_be_bytes::<32>().as_slice() ); tx.set_storage_slot(storage_path, Some(storage_value_triedb))?; } @@ -118,7 +113,7 @@ fn setup_database( tx.commit()?; } - Ok((overlay_acct, overlay_storage)) + Ok(()) } // Helper function to generate shared test data using alloy primitives @@ -190,31 +185,49 @@ pub fn generate_shared_test_data( for i in 0..overlay_count { // Randomly decide: duplicate existing address or new address - let address = if rng.gen_bool(0.5) && !addresses.is_empty() { - // 50% chance to update existing account + let is_existing = rng.gen_bool(0.5) && !addresses.is_empty(); + let address = if is_existing { + // Update existing account (only storage, no account update) addresses[rng.gen_range(0..addresses.len())] } else { - // 50% chance to create new account + // Create new account let mut addr_bytes = [0u8; 20]; rng.fill(&mut addr_bytes); Address::from_slice(&addr_bytes) }; - // Generate overlay account (with different values) - let overlay_account = Account { - nonce: (i + 1000) as u64, // different nonce - balance: U256::from((i + 2000) as u64), // different balance - bytecode_hash: if rng.gen_bool(0.3) { - // 30% chance to be a contract - Some(EMPTY_ROOT_HASH) - } else { - None - }, - }; - overlay_accounts_map.insert(address, overlay_account); + // Only generate overlay account for newly created accounts + if !is_existing { + // Generate overlay account (with different values) + let overlay_account = Account { + nonce: (i + 1000) as u64, // different nonce + balance: U256::from((i + 2000) as u64), // different balance + bytecode_hash: if rng.gen_bool(0.3) { + // 30% chance to be a contract + Some(EMPTY_ROOT_HASH) + } else { + None + }, + }; + overlay_accounts_map.insert(address, overlay_account); + } // Generate overlay storage (only for contracts) - if overlay_account.bytecode_hash.is_some() { + // For existing addresses, check if they're contracts in base data + // For new addresses, check if the overlay account is a contract + let is_contract = if is_existing { + // Check if existing address is a contract in base data + accounts_map.get(&address) + .map(|acc| acc.bytecode_hash.is_some()) + .unwrap_or(false) + } else { + // Check if new overlay account is a contract + overlay_accounts_map.get(&address) + .map(|acc| acc.bytecode_hash.is_some()) + .unwrap_or(false) + }; + + if is_contract { let mut contract_storage = HashMap::new(); // Random number of storage changes (max half of storage_per_contract) @@ -287,4 +300,49 @@ pub fn copy_files(from: &FlatTrieDatabase, to: &Path) -> Result<(), io::Error> { fs::copy(from_path, &to_path)?; } Ok(()) +} + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_generate_shared_test_data_single_eoa() { + let (addresses, accounts_map, storage_map, overlay_accounts_map, overlay_storage_map) = + generate_shared_test_data(1, 0, 0, 0); + + // Should have exactly 1 base address (EOA) + assert_eq!(addresses.len(), 1, "Should have exactly 1 base address"); + + // Should have exactly 1 account in base accounts map + assert_eq!(accounts_map.len(), 1, "Should have exactly 1 account in base accounts map"); + + // Verify the account properties + let address = &addresses[0]; + let account = accounts_map.get(address).expect("Address should exist in accounts_map"); + assert_eq!(account.nonce, 0, "EOA should have nonce 0"); + assert_eq!(account.balance, U256::from(0), "EOA should have balance 0"); + assert_eq!(account.bytecode_hash, None, "EOA should have no bytecode hash"); + + // Storage map should be empty (no contracts) + assert!(storage_map.is_empty(), "Storage map should be empty when contract_count is 0"); + + // Overlay maps should be empty (overlay_count is 0) + assert!(overlay_accounts_map.is_empty(), "Overlay accounts map should be empty when overlay_count is 0"); + assert!(overlay_storage_map.is_empty(), "Overlay storage map should be empty when overlay_count is 0"); + } + #[test] + fn test_generate_shared_test_data_single_eoa_single_contract() { + let (addresses, accounts_map, storage_map, overlay_accounts_map, overlay_storage_map) = + generate_shared_test_data(1, 1, 0, 0); + + // Should have exactly 1 base address (EOA) + assert_eq!(addresses.len(), 2, "Should have exactly 1 base address"); + + // Should have exactly 1 account in base accounts map + assert_eq!(accounts_map.len(), 2, "Should have exactly 1 account in base accounts map"); + + + } } \ No newline at end of file diff --git a/crates/storage/db-common/src/bin/state_root_overlay.rs b/crates/storage/db-common/src/bin/state_root_overlay.rs new file mode 100644 index 00000000000..129a1994258 --- /dev/null +++ b/crates/storage/db-common/src/bin/state_root_overlay.rs @@ -0,0 +1,181 @@ +use alloy_primitives::{keccak256, Address, B256, U256, StorageKey, StorageValue}; +use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_primitives_traits::Account; +use reth_provider::{ + test_utils::create_test_provider_factory_with_chain_spec, + DatabaseProviderFactory, HashingWriter, LatestStateProvider, TrieWriter, +}; +use reth_storage_api::{StateRootProvider, TrieWriter as _}; +use reth_trie_common::{HashedPostState, HashedStorage}; +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Instant; +use alloy_genesis::Genesis; +use alloy_primitives::map::B256Map; +use tempdir::TempDir; +use triedb::{ + account::Account as TrieDBAccount, + overlay::{OverlayStateMut, OverlayValue}, + path::{AddressPath, StoragePath}, + Database, +}; +use crate::util::{setup_tdb_database}; + +#[path = "../../benches/util.rs"] +mod util; + +fn main() -> eyre::Result<()> { + println!("Testing overlay state root calculation methods..."); + + // Generate shared test data + let (base_addresses, base_accounts_map, base_storage_map, overlay_acct, overlay_storage) = + util::generate_shared_test_data( + util::DEFAULT_SETUP_DB_EOA_SIZE, + util::DEFAULT_SETUP_DB_CONTRACT_SIZE, + util::DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, + util::BATCH_SIZE, + ); + + println!("Generated {} base addresses, {} overlay accounts", + base_addresses.len(), overlay_acct.len()); + + let dir = TempDir::new("triedb_overlay_base").unwrap(); + let main_file_name_path = dir.path().join("triedb"); + let triedb = Database::create_new(&main_file_name_path).unwrap(); + + // let tdb_pre_root = triedb.state_root(); + + setup_tdb_database(&triedb, &base_addresses, &base_accounts_map, &base_storage_map).unwrap(); + + let mut account_overlay_mut = OverlayStateMut::new(); + + for (address, account) in &overlay_acct { + let address_path = AddressPath::for_address(*address); + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + ); + account_overlay_mut.insert(address_path.clone().into(), Some(OverlayValue::Account(trie_account))); + } + + // Add overlay storage + for (address, storage) in &overlay_storage { + let address_path = AddressPath::for_address(*address); + for (storage_key, storage_value) in storage { + // Convert B256 back to U256 to get the raw storage slot + let raw_slot = U256::from_be_slice(storage_key.as_slice()); + let storage_path = StoragePath::for_address_path_and_slot( + address_path.clone(), + StorageKey::from(raw_slot), + ); + + if storage_value.is_zero() { + // Zero value means delete the storage slot + account_overlay_mut.insert( + storage_path.clone().into(), + None, // ✅ Delete slot for zero values + ); + } else { + // Non-zero value: insert the storage entry + account_overlay_mut.insert( + storage_path.clone().into(), + Some(OverlayValue::Storage(StorageValue::from_be_slice( + storage_value.to_be_bytes::<32>().as_slice() + ))), + ); + } + } + } + let account_overlay = account_overlay_mut.freeze(); + + let start = Instant::now(); + let tx = triedb.begin_ro()?; + let triedb_root = tx.compute_root_with_overlay(account_overlay.clone())?; + println!("triedb_root = {:?}, overlay state root elapsed = {:?} ms", triedb_root.root, start.elapsed().as_millis()); + + let start = Instant::now(); + tx.commit()?; + println!("triedb commit elapsed = {:?} ns", start.elapsed().as_nanos()); + + // ===== Setup MDBX ===== + println!("\nSetting up MDBX..."); + // Create a chain spec with empty genesis allocation but keep MAINNET hardforks + let empty_chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(Genesis::default()) // Empty genesis with no alloc + .with_forks(MAINNET.hardforks.clone()) // Keep MAINNET hardforks + .build(), + ); + let provider_factory = create_test_provider_factory_with_chain_spec(empty_chain_spec); + + // let db_provider_ro_pre = provider_factory.database_provider_ro()?; + // let latest_ro_pre = LatestStateProvider::new(db_provider_ro_pre); + // let empty_state = HashedPostState::default(); + // let (mdbx_pre_root, _) = latest_ro_pre.state_root_with_updates(empty_state)?; + + // Insert base data + { + let mut provider_rw = provider_factory.provider_rw()?; + let accounts: Vec<(Address, Account)> = base_accounts_map.iter().map(|(a, acc)| (*a, *acc)).collect(); + let storage_entries: Vec<(Address, Vec)> = base_storage_map + .iter() + .map(|(address, storage)| { + let entries: Vec = storage + .iter() + .map(|(key, value)| reth_primitives_traits::StorageEntry { + key: *key, + value: *value, + }) + .collect(); + (*address, entries) + }) + .collect(); + + let accounts_for_hashing = accounts.iter().map(|(address, account)| (*address, Some(*account))); + provider_rw.insert_account_for_hashing(accounts_for_hashing)?; + provider_rw.insert_storage_for_hashing(storage_entries)?; + provider_rw.commit()?; + } + + // Build HashedPostState from overlay + let mut hashed_accounts: Vec<(B256, Option)> = overlay_acct + .iter() + .map(|(address, account)| { + let hashed = keccak256(address); + (hashed, Some(*account)) + }) + .collect(); + + let mut hashed_storages: B256Map = HashMap::default(); + for (address, storage) in &overlay_storage { + let hashed_address = keccak256(address); + let hashed_storage = HashedStorage::from_iter( + false, + storage.iter().map(|(key, value)| { + let hashed_slot = keccak256(*key); + (hashed_slot, *value) + }), + ); + hashed_storages.insert(hashed_address, hashed_storage); + } + + let hashed_state = HashedPostState { + accounts: hashed_accounts.into_iter().collect(), + storages: hashed_storages, + }; + + let db_provider_ro = provider_factory.database_provider_ro()?; + let latest_ro = LatestStateProvider::new(db_provider_ro); + + let start = Instant::now(); + let (mdbx_root, _updates) = latest_ro.state_root_with_updates(hashed_state)?; + + println!("MDBX state root: {:?}, overlay state root elapsed {:?} ms", mdbx_root, start.elapsed().as_millis()); + assert_eq!(mdbx_root, triedb_root.root); + + Ok(()) +} \ No newline at end of file diff --git a/crates/storage/db-common/src/bin/state_root_overlay_min.rs b/crates/storage/db-common/src/bin/state_root_overlay_min.rs new file mode 100644 index 00000000000..08dd6add030 --- /dev/null +++ b/crates/storage/db-common/src/bin/state_root_overlay_min.rs @@ -0,0 +1,98 @@ +use alloy_primitives::{address, keccak256, B256, U256}; +use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_primitives_traits::Account; +use reth_provider::{ + test_utils::create_test_provider_factory_with_chain_spec,DatabaseProviderFactory, + LatestStateProvider, ProviderFactory, +}; +use reth_storage_api::StateRootProvider; +use reth_trie_common::HashedPostState; +use std::sync::Arc; +use alloy_genesis::Genesis; +use tempdir::TempDir; +use triedb::{ + account::Account as TrieDBAccount, + overlay::{OverlayStateMut, OverlayValue}, + path::AddressPath, + Database, +}; + +fn main() -> eyre::Result<()> { + println!("Testing overlay state root calculation with single account..."); + + // ===== Setup TrieDB ===== + let dir = TempDir::new("triedb_overlay_min").unwrap(); + let main_file_name_path = dir.path().join("triedb"); + let triedb = Database::create_new(&main_file_name_path).unwrap(); + + let tdb_pre_root = triedb.state_root(); + println!("TrieDB pre state root: {:?}", tdb_pre_root); + + // Create overlay with single account + let mut overlay_mut = OverlayStateMut::new(); + let address = address!("0xd8da6bf26964af9d7eed9e03e53415d37aa96045"); + let address_path = AddressPath::for_address(address); + let trie_account = TrieDBAccount::new( + 1, // nonce + U256::from(100), // balance + EMPTY_ROOT_HASH, // storage_root + KECCAK_EMPTY, // code_hash + ); + overlay_mut.insert(address_path.clone().into(), Some(OverlayValue::Account(trie_account))); + let account_overlay = overlay_mut.freeze(); + + // Calculate state root with TrieDB + let tx = triedb.begin_ro()?; + let triedb_root = tx.compute_root_with_overlay(account_overlay.clone())?; + println!("TrieDB state root with overlay: {:?}", triedb_root.root); + tx.commit()?; + + // ===== Setup MDBX ===== + println!("\nSetting up MDBX..."); + let empty_chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(Genesis::default()) + .with_forks(MAINNET.hardforks.clone()) + .build(), + ); + let provider_factory = create_test_provider_factory_with_chain_spec(empty_chain_spec); + + let db_provider_ro_pre = provider_factory.database_provider_ro()?; + let latest_ro_pre = LatestStateProvider::new(db_provider_ro_pre); + let empty_state = HashedPostState::default(); + let (mdbx_pre_root, _) = latest_ro_pre.state_root_with_updates(empty_state)?; + println!("MDBX pre state root: {:?}", mdbx_pre_root); + + // Build HashedPostState from overlay (single account) + let account = Account { + nonce: 1, + balance: U256::from(100), + bytecode_hash: None, // No bytecode + }; + let hashed_address = keccak256(address); + let hashed_state = HashedPostState { + accounts: vec![(hashed_address, Some(account))].into_iter().collect(), + storages: Default::default(), + }; + + // Calculate state root with MDBX + let db_provider_ro = provider_factory.database_provider_ro()?; + let latest_ro = LatestStateProvider::new(db_provider_ro); + let (mdbx_root, _updates) = latest_ro.state_root_with_updates(hashed_state)?; + println!("MDBX state root with overlay: {:?}", mdbx_root); + + // ===== Compare Results ===== + println!("\n=== Comparison ==="); + println!("TrieDB root: {:?}", triedb_root.root); + println!("MDBX root: {:?}", mdbx_root); + + if triedb_root.root == mdbx_root { + println!("\n✅ SUCCESS: Both methods produce the same state root!"); + Ok(()) + } else { + println!("\n❌ FAILURE: State roots differ!"); + eyre::bail!("State root mismatch: TrieDB={:?}, MDBX={:?}", triedb_root.root, mdbx_root) + } +} \ No newline at end of file From 2632dd2630cf87506d75d57af1fef51086a69575 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Wed, 17 Dec 2025 16:28:01 +0800 Subject: [PATCH 14/36] add triedb integration into providerFactory --- Cargo.lock | 3 + Cargo.toml | 1 + crates/cli/commands/src/common.rs | 8 +- .../cli/commands/src/stage/dump/execution.rs | 2 + .../src/stage/dump/hashing_account.rs | 2 + .../src/stage/dump/hashing_storage.rs | 2 + crates/cli/commands/src/stage/dump/merkle.rs | 2 + crates/node/builder/src/launch/common.rs | 2 + crates/node/core/src/args/datadir_args.rs | 9 ++ crates/node/core/src/dirs.rs | 12 ++ crates/storage/db/src/lib.rs | 8 + crates/storage/provider/Cargo.toml | 3 + .../src/providers/database/builder.rs | 75 ++++++++-- .../provider/src/providers/database/mod.rs | 14 +- crates/storage/provider/src/providers/mod.rs | 2 + .../provider/src/providers/triedb/mod.rs | 141 ++++++++++++++++++ crates/storage/provider/src/test_utils/mod.rs | 4 + 17 files changed, 273 insertions(+), 17 deletions(-) create mode 100644 crates/storage/provider/src/providers/triedb/mod.rs diff --git a/Cargo.lock b/Cargo.lock index af82943c6d1..b3534f0e3ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10238,6 +10238,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", + "alloy-trie 0.9.1", "assert_matches", "dashmap 6.1.0", "eyre", @@ -10273,9 +10274,11 @@ dependencies = [ "revm-database-interface", "revm-state", "strum 0.27.2", + "tempdir", "tempfile", "tokio", "tracing", + "triedb", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 46d8ddb5140..e2850db7fa1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -671,6 +671,7 @@ proptest = "1.7" proptest-derive = "0.5" similar-asserts = { version = "1.5.0", features = ["serde"] } tempfile = "3.20" +tempdir = "0.3.7" test-fuzz = "7" rstest = "0.24.0" test-case = "3" diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index a8b5b3c8efd..6018c24ffd9 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -21,7 +21,7 @@ use reth_node_core::{ dirs::{ChainPath, DataDirPath}, }; use reth_provider::{ - providers::{BlockchainProvider, NodeTypesForProvider, StaticFileProvider}, + providers::{BlockchainProvider, NodeTypesForProvider, StaticFileProvider, triedb::TriedbProvider}, ProviderFactory, StaticFileProviderFactory, }; use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; @@ -103,7 +103,8 @@ impl EnvironmentArgs { ), }; - let provider_factory = self.create_provider_factory(&config, db, sfp)?; + let triedb_provider = TriedbProvider::new(data_dir.triedb()); + let provider_factory = self.create_provider_factory(&config, db, sfp, triedb_provider)?; if access.is_read_write() { debug!(target: "reth::cli", chain=%self.chain.chain(), genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(&provider_factory)?; @@ -122,16 +123,19 @@ impl EnvironmentArgs { config: &Config, db: Arc, static_file_provider: StaticFileProvider, + triedb_provider: TriedbProvider ) -> eyre::Result>>> where C: ChainSpecParser, { let has_receipt_pruning = config.prune.has_receipts_pruning(); let prune_modes = config.prune.segments.clone(); + let factory = ProviderFactory::>>::new( db, self.chain.clone(), static_file_provider, + triedb_provider ) .with_prune_modes(prune_modes.clone()) .with_genesis_block_number(self.chain.genesis().number.unwrap()); diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 9e8e68e9800..022d51721c3 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -15,6 +15,7 @@ use reth_provider::{ use reth_stages::{stages::ExecutionStage, Stage, StageCheckpoint, UnwindInput}; use std::sync::Arc; use tracing::info; +use reth_provider::providers::triedb::TriedbProvider; pub(crate) async fn dump_execution_stage( db_tool: &DbTool, @@ -42,6 +43,7 @@ where Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, + TriedbProvider::new(output_datadir.triedb()), ), to, from, diff --git a/crates/cli/commands/src/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs index 8b9ba5e937e..079402ad456 100644 --- a/crates/cli/commands/src/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -12,6 +12,7 @@ use reth_provider::{ use reth_stages::{stages::AccountHashingStage, Stage, StageCheckpoint, UnwindInput}; use std::sync::Arc; use tracing::info; +use reth_provider::providers::triedb::TriedbProvider; pub(crate) async fn dump_hashing_account_stage>>( db_tool: &DbTool, @@ -39,6 +40,7 @@ pub(crate) async fn dump_hashing_account_stage>>( db_tool: &DbTool, @@ -29,6 +30,7 @@ pub(crate) async fn dump_hashing_storage_stage( db_tool: &DbTool, @@ -62,6 +63,7 @@ where Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, + TriedbProvider::new(output_datadir.triedb()), ), to, from, diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index b865d742736..7bc15615538 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -89,6 +89,7 @@ use tokio::sync::{ use futures::{future::Either, stream, Stream, StreamExt}; use reth_node_ethstats::EthStatsService; use reth_node_events::{cl::ConsensusLayerHealthEvents, node::NodeEvent}; +use reth_provider::providers::triedb::TriedbProvider; /// Reusable setup for launching a node. /// @@ -468,6 +469,7 @@ where self.right().clone(), self.chain_spec(), StaticFileProvider::read_write(self.data_dir().static_files())?, + TriedbProvider::new(self.data_dir().triedb()) ) .with_prune_modes(self.prune_modes()) .with_static_files_metrics() diff --git a/crates/node/core/src/args/datadir_args.rs b/crates/node/core/src/args/datadir_args.rs index cb0590f1779..d8e392281d1 100644 --- a/crates/node/core/src/args/datadir_args.rs +++ b/crates/node/core/src/args/datadir_args.rs @@ -27,6 +27,15 @@ pub struct DatadirArgs { verbatim_doc_comment )] pub static_files_path: Option, + + /// The absolute path to triedb path. + #[arg( + long = "datadir.triedb", + alias = "datadir.triedb", + value_name = "PATH", + verbatim_doc_comment + )] + pub triedb_path: Option, } impl DatadirArgs { diff --git a/crates/node/core/src/dirs.rs b/crates/node/core/src/dirs.rs index 4f8507c4e68..0c30c227438 100644 --- a/crates/node/core/src/dirs.rs +++ b/crates/node/core/src/dirs.rs @@ -301,6 +301,18 @@ impl ChainPath { } } + /// Returns the path to the `TrieDB` database directory for this chain. + /// + /// `//triedb` + pub fn triedb(&self) -> PathBuf { + let datadir_args = &self.2; + if let Some(triedb_path) = &datadir_args.triedb_path { + triedb_path.clone() + } else { + self.data_dir().join("triedb") + } + } + /// Returns the path to the reth p2p secret key for this chain. /// /// `//discovery-secret` diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index a6306723847..4257c32fb0f 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -159,6 +159,14 @@ pub mod test_utils { (temp_dir, path) } + /// Create `triedbdb` path for testing + #[track_caller] + pub fn create_test_triedb_dir() -> (TempDir, PathBuf) { + let temp_dir = TempDir::with_prefix("reth-test-triedb-").expect(ERROR_TEMPDIR); + let path = temp_dir.path().to_path_buf(); + (temp_dir, path) + } + /// Get a temporary directory path to use for the database pub fn tempdir_path() -> PathBuf { let builder = tempfile::Builder::new().prefix("reth-test-").rand_bytes(8).tempdir(); diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index e8599a89706..8c158e4e07d 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -37,9 +37,11 @@ alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true +alloy-trie.workspace = true revm-database.workspace = true revm-state = { workspace = true, optional = true } +triedb.workspace = true # tracing tracing.workspace = true @@ -75,6 +77,7 @@ revm-database-interface.workspace = true revm-state.workspace = true tempfile.workspace = true +tempdir.workspace = true assert_matches.workspace = true rand.workspace = true diff --git a/crates/storage/provider/src/providers/database/builder.rs b/crates/storage/provider/src/providers/database/builder.rs index 4bc8569432e..61536264c00 100644 --- a/crates/storage/provider/src/providers/database/builder.rs +++ b/crates/storage/provider/src/providers/database/builder.rs @@ -15,6 +15,7 @@ use std::{ path::{Path, PathBuf}, sync::Arc, }; +use crate::providers::triedb::TriedbProvider; /// Helper type to create a [`ProviderFactory`]. /// @@ -105,12 +106,13 @@ impl ProviderFactoryBuilder { where N: NodeTypes, { - let ReadOnlyConfig { db_dir, db_args, static_files_dir, watch_static_files } = + let ReadOnlyConfig { db_dir, db_args, static_files_dir, triedb_dir, watch_static_files } = config.into(); Ok(self .db(Arc::new(open_db_read_only(db_dir, db_args)?)) .chainspec(chainspec) .static_file(StaticFileProvider::read_only(static_files_dir, watch_static_files)?) + .triedb_provider(TriedbProvider::new(triedb_dir)) .build_provider_factory()) } } @@ -133,6 +135,8 @@ pub struct ReadOnlyConfig { pub db_args: DatabaseArguments, /// The path to the static file dir pub static_files_dir: PathBuf, + /// thE PATH TO THE `TrieDB` directory + pub triedb_dir: PathBuf, /// Whether the static files should be watched for changes. pub watch_static_files: bool, } @@ -146,13 +150,14 @@ impl ReadOnlyConfig { /// -`datadir` /// |__db /// |__static_files + /// |__triedb /// ``` /// /// By default this watches the static file directory for changes, see also /// [`StaticFileProvider::read_only`] pub fn from_datadir(datadir: impl AsRef) -> Self { let datadir = datadir.as_ref(); - Self::from_dirs(datadir.join("db"), datadir.join("static_files")) + Self::from_dirs(datadir.join("db"), datadir.join("static_files"), datadir.join("triedb")) } /// Disables long-lived read transaction safety guarantees. @@ -181,13 +186,10 @@ impl ReadOnlyConfig { /// If the path does not exist pub fn from_db_dir(db_dir: impl AsRef) -> Self { let db_dir = db_dir.as_ref(); - let static_files_dir = std::fs::canonicalize(db_dir) - .unwrap() - .parent() - .unwrap() - .to_path_buf() - .join("static_files"); - Self::from_dirs(db_dir, static_files_dir) + let datadir = std::fs::canonicalize(db_dir).unwrap().parent().unwrap().to_path_buf(); + let static_files_dir = datadir.join("static_files"); + let triedb_dir = datadir.join("triedb"); + Self::from_dirs(db_dir, static_files_dir, triedb_dir) } /// Creates the config for the given paths. @@ -195,9 +197,10 @@ impl ReadOnlyConfig { /// /// By default this watches the static file directory for changes, see also /// [`StaticFileProvider::read_only`] - pub fn from_dirs(db_dir: impl AsRef, static_files_dir: impl AsRef) -> Self { + pub fn from_dirs(db_dir: impl AsRef, static_files_dir: impl AsRef, triedb_dir: impl AsRef) -> Self { Self { static_files_dir: static_files_dir.as_ref().into(), + triedb_dir: triedb_dir.as_ref().into(), db_dir: db_dir.as_ref().into(), db_args: Default::default(), watch_static_files: true, @@ -318,14 +321,58 @@ impl TypesAnd3 { } } -impl TypesAnd3, StaticFileProvider> +impl TypesAnd3, StaticFileProvider> +where + N: NodeTypes, +{ + /// Configures the `TrieDB` provider. + pub fn triedb_provider( + self, + triedb_provider: TriedbProvider, + ) -> TypesAnd4, StaticFileProvider, TriedbProvider> { + TypesAnd4::new(self.val_1, self.val_2, self.val_3, triedb_provider) + } +} + +// impl TypesAnd3, StaticFileProvider> +// where +// N: NodeTypes, +// DB: Database + DatabaseMetrics + Clone + Unpin + 'static, +// { +// /// Creates the [`ProviderFactory`]. +// pub fn build_provider_factory(self) -> ProviderFactory> { +// let Self { _types, val_1, val_2, val_3 } = self; +// ProviderFactory::new(val_1, val_2, val_3) +// } +// } + +/// This is staging type that contains the configured types and _four_ values. +#[derive(Debug)] +pub struct TypesAnd4 { + _types: PhantomData, + val_1: Val1, + val_2: Val2, + val_3: Val3, + val_4: Val4, +} + +impl TypesAnd4 { + /// Creates a new instance with the given types and four values. + pub fn new(val_1: Val1, val_2: Val2, val_3: Val3, val_4: Val4) -> Self { + Self { _types: Default::default(), val_1, val_2, val_3, val_4 } + } +} + +impl TypesAnd4, StaticFileProvider, TriedbProvider> where N: NodeTypes, DB: Database + DatabaseMetrics + Clone + Unpin + 'static, { /// Creates the [`ProviderFactory`]. - pub fn build_provider_factory(self) -> ProviderFactory> { - let Self { _types, val_1, val_2, val_3 } = self; - ProviderFactory::new(val_1, val_2, val_3) + pub fn build_provider_factory( + self, + ) -> ProviderFactory> { + let Self { _types, val_1, val_2, val_3, val_4 } = self; + ProviderFactory::new(val_1, val_2, val_3, val_4) } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index d670836d2d6..6b9da018a80 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -49,6 +49,7 @@ mod metrics; mod chain; pub use chain::*; +use crate::providers::triedb::TriedbProvider; /// A common provider that fetches data from a database or static file. /// @@ -64,6 +65,8 @@ pub struct ProviderFactory { prune_modes: PruneModes, /// The node storage handler. storage: Arc, + + triedb_provider: TriedbProvider } impl ProviderFactory>> { @@ -79,6 +82,7 @@ impl ProviderFactory { db: N::DB, chain_spec: Arc, static_file_provider: StaticFileProvider, + triedb_provider: TriedbProvider ) -> Self { Self { db, @@ -86,6 +90,7 @@ impl ProviderFactory { static_file_provider, prune_modes: PruneModes::default(), storage: Default::default(), + triedb_provider } } @@ -127,6 +132,7 @@ impl>> ProviderFactory { chain_spec: Arc, args: DatabaseArguments, static_file_provider: StaticFileProvider, + triedb_provider: TriedbProvider ) -> RethResult { Ok(Self { db: Arc::new(init_db(path, args).map_err(RethError::msg)?), @@ -134,6 +140,7 @@ impl>> ProviderFactory { static_file_provider, prune_modes: PruneModes::default(), storage: Default::default(), + triedb_provider }) } } @@ -551,11 +558,12 @@ where N: NodeTypesWithDB, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let Self { db, chain_spec, static_file_provider, prune_modes, storage } = self; + let Self { db, chain_spec, static_file_provider, prune_modes, storage, triedb_provider } = self; f.debug_struct("ProviderFactory") .field("db", &db) .field("chain_spec", &chain_spec) .field("static_file_provider", &static_file_provider) + .field("triedb_provider", &triedb_provider) .field("prune_modes", &prune_modes) .field("storage", &storage) .finish() @@ -568,6 +576,7 @@ impl Clone for ProviderFactory { db: self.db.clone(), chain_spec: self.chain_spec.clone(), static_file_provider: self.static_file_provider.clone(), + triedb_provider: self.triedb_provider.clone(), prune_modes: self.prune_modes.clone(), storage: self.storage.clone(), } @@ -596,6 +605,7 @@ mod tests { use reth_storage_errors::provider::ProviderError; use reth_testing_utils::generators::{self, random_block, random_header, BlockParams}; use std::{ops::RangeInclusive, sync::Arc}; + use reth_db::test_utils::create_test_triedb_dir; #[test] fn common_history_provider() { @@ -627,11 +637,13 @@ mod tests { fn provider_factory_with_database_path() { let chain_spec = ChainSpecBuilder::mainnet().build(); let (_static_dir, static_dir_path) = create_test_static_files_dir(); + let (_trie_dir, trie_dir_path) = create_test_triedb_dir(); let factory = ProviderFactory::>::new_with_database_path( tempfile::TempDir::new().expect(ERROR_TEMPDIR).keep(), Arc::new(chain_spec), DatabaseArguments::new(Default::default()), StaticFileProvider::read_write(static_dir_path).unwrap(), + TriedbProvider::new(trie_dir_path) ) .unwrap(); let provider = factory.provider().unwrap(); diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 41e8121991b..9cb3ae15f81 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -13,6 +13,8 @@ pub use static_file::{ StaticFileProviderRWRefMut, StaticFileWriter, }; +pub mod triedb; + mod state; pub use state::{ historical::{HistoricalStateProvider, HistoricalStateProviderRef, LowestAvailableBlocks}, diff --git a/crates/storage/provider/src/providers/triedb/mod.rs b/crates/storage/provider/src/providers/triedb/mod.rs new file mode 100644 index 00000000000..ef90dd54636 --- /dev/null +++ b/crates/storage/provider/src/providers/triedb/mod.rs @@ -0,0 +1,141 @@ +use std::{path::Path, sync::Arc}; +use alloy_primitives::{Address, B256, U256}; +use alloy_trie::EMPTY_ROOT_HASH; +use alloy_consensus::constants::KECCAK_EMPTY; +use reth_primitives_traits::Account; +use triedb::{Database as TrieDbDatabase, path::{AddressPath, StoragePath}, account::Account as TrieDBAccount, + transaction::TransactionError, Database}; +#[derive(Debug, Clone)] +pub struct TriedbProvider { + inner: Arc +} + +impl TriedbProvider { + pub fn new(db_path: impl AsRef) -> Self { + let db_path = db_path.as_ref(); + let db = if db_path.exists() { + TrieDbDatabase::open(db_path).unwrap() + } else { + TrieDbDatabase::create_new(db_path).unwrap() + }; + Self { + inner: Arc::new(db), + } + } + + pub fn set_account( + &self, + address: Address, + account: Account, + storage_root: Option, + ) -> Result<(), TransactionError> { + let mut tx = self.inner.begin_rw()?; + + let address_path = AddressPath::for_address(address); + let storage_root = storage_root.unwrap_or(EMPTY_ROOT_HASH); + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + storage_root, + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + ); + + tx.set_account(address_path, Some(trie_account))?; + tx.commit()?; + Ok(()) + } + + pub fn get_account(&self, address: Address) -> Result, TransactionError> { + let mut tx = self.inner.begin_ro()?; + let address_path = AddressPath::for_address(address); + + let trie_account_opt = tx.get_account(&address_path)?; + + let account_opt = trie_account_opt.map(|trie_account| { + Account { + nonce: trie_account.nonce, + balance: trie_account.balance, + bytecode_hash: if trie_account.code_hash == KECCAK_EMPTY { + None + } else { + Some(trie_account.code_hash) + }, + } + }); + + Ok(account_opt) + } + +} + +#[cfg(test)] +mod tests { + use super::*; + use tempdir::TempDir; + + #[test] + fn test_triedb_provider_new_set_get() { + let dir = TempDir::new("triedb_test").unwrap(); + let db_path = dir.path().join("triedb"); + let provider = TriedbProvider::new(&db_path); + + let address = Address::with_last_byte(1); + let account = Account { + nonce: 42, + balance: U256::from(1000), + bytecode_hash: None, + }; + + provider.set_account(address, account, None).unwrap(); + + let provider2 = TriedbProvider::new(&db_path); + + let retrieved_account = provider2.get_account(address).unwrap(); + + assert!(retrieved_account.is_some(), "Account should exist"); + let acc = retrieved_account.unwrap(); + assert_eq!(acc.nonce, 42, "Nonce should match"); + assert_eq!(acc.balance, U256::from(1000), "Balance should match"); + assert_eq!(acc.bytecode_hash, None, "Bytecode hash should be None for EOA"); + } + + #[test] + fn test_triedb_provider_with_contract() { + let dir = TempDir::new("triedb_test_contract").unwrap(); + let db_path = dir.path().join("triedb"); + + let provider = TriedbProvider::new(&db_path); + + let address = Address::with_last_byte(2); + let code_hash = B256::with_last_byte(0xFF); + let account = Account { + nonce: 1, + balance: U256::from(5000), + bytecode_hash: Some(code_hash), + }; + + provider.set_account(address, account, None).unwrap(); + + let provider2 = TriedbProvider::new(&db_path); + let retrieved_account = provider2.get_account(address).unwrap(); + + assert!(retrieved_account.is_some(), "Contract account should exist"); + let acc = retrieved_account.unwrap(); + assert_eq!(acc.nonce, 1, "Nonce should match"); + assert_eq!(acc.balance, U256::from(5000), "Balance should match"); + assert_eq!(acc.bytecode_hash, Some(code_hash), "Code hash should match"); + } + + #[test] + fn test_triedb_provider_nonexistent_account() { + let dir = TempDir::new("triedb_test_nonexistent").unwrap(); + let db_path = dir.path().join("triedb"); + + let provider = TriedbProvider::new(&db_path); + + let nonexistent_address = Address::with_last_byte(99); + let result = provider.get_account(nonexistent_address).unwrap(); + + assert!(result.is_none(), "Nonexistent account should return None"); + } +} \ No newline at end of file diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index ccda2d60e85..3cffa3e9c06 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -23,6 +23,8 @@ mod noop; pub use mock::{ExtendedAccount, MockEthProvider}; pub use noop::NoopProvider; pub use reth_chain_state::test_utils::TestCanonStateSubscriptions; +use reth_db::test_utils::create_test_triedb_dir; +use crate::providers::triedb::TriedbProvider; /// Mock [`reth_node_types::NodeTypes`] for testing. pub type MockNodeTypes = reth_node_types::AnyNodeTypesWithEngine< @@ -54,11 +56,13 @@ pub fn create_test_provider_factory_with_node_types( chain_spec: Arc, ) -> ProviderFactory>>> { let (static_dir, _) = create_test_static_files_dir(); + let (triedb_dir, _) = create_test_triedb_dir(); let db = create_test_rw_db(); ProviderFactory::new( db, chain_spec, StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"), + TriedbProvider::new(triedb_dir), ) } From 2c0bcf2bd4402e4fc85a7190895b464b47b4a869 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Thu, 18 Dec 2025 11:03:19 +0800 Subject: [PATCH 15/36] add state_root_with_updates_triedb for latest staterootprovider --- Cargo.lock | 4 + crates/chain-state/Cargo.toml | 1 + crates/chain-state/src/memory_overlay.rs | 11 +- crates/cli/commands/src/common.rs | 2 +- .../cli/commands/src/stage/dump/execution.rs | 2 +- .../src/stage/dump/hashing_account.rs | 2 +- .../src/stage/dump/hashing_storage.rs | 2 +- crates/cli/commands/src/stage/dump/merkle.rs | 2 +- crates/e2e-test-utils/src/setup_import.rs | 11 +- crates/engine/tree/Cargo.toml | 2 + crates/engine/tree/src/tree/cached_state.rs | 8 ++ .../tree/src/tree/instrumented_state.rs | 8 ++ crates/evm/evm/Cargo.toml | 1 - crates/evm/evm/src/execute.rs | 54 ++++----- crates/node/builder/src/launch/common.rs | 2 +- .../optimism/node/tests/assets/genesis.json | 108 +----------------- crates/revm/src/test_utils.rs | 11 +- crates/rpc/rpc-eth-types/Cargo.toml | 1 + crates/rpc/rpc-eth-types/src/cache/db.rs | 7 ++ .../stages/stages/src/test_utils/test_db.rs | 9 +- .../src/providers/database/builder.rs | 6 +- .../provider/src/providers/database/mod.rs | 16 ++- .../src/providers/database/provider.rs | 25 +++- .../src/providers/state/historical.rs | 10 +- .../provider/src/providers/state/latest.rs | 103 ++++++++++++++++- .../provider/src/providers/state/macros.rs | 1 + .../provider/src/providers/triedb/mod.rs | 2 +- .../storage/provider/src/test_utils/mock.rs | 11 +- crates/storage/provider/src/test_utils/mod.rs | 2 +- crates/storage/rpc-provider/src/lib.rs | 9 +- crates/storage/storage-api/Cargo.toml | 1 + crates/storage/storage-api/src/noop.rs | 15 ++- crates/storage/storage-api/src/trie.rs | 19 ++- 33 files changed, 294 insertions(+), 174 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b3534f0e3ba..e787cac588b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7827,6 +7827,7 @@ dependencies = [ "tokio", "tokio-stream", "tracing", + "triedb", ] [[package]] @@ -8549,6 +8550,7 @@ dependencies = [ "thiserror 2.0.17", "tokio", "tracing", + "triedb", ] [[package]] @@ -10765,6 +10767,7 @@ dependencies = [ "tokio", "tokio-stream", "tracing", + "triedb", ] [[package]] @@ -10983,6 +10986,7 @@ dependencies = [ "reth-storage-errors", "reth-trie-common", "revm-database", + "triedb", ] [[package]] diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index d21c83ae7c4..d1942791f05 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -28,6 +28,7 @@ alloy-primitives = { workspace = true, features = ["std"] } alloy-consensus.workspace = true revm-database.workspace = true revm-state = { workspace = true, optional = true } +triedb.workspace=true # async tokio = { workspace = true, default-features = false, features = ["sync", "macros"] } diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index 254edb248b4..c4da8f2f5ec 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -4,8 +4,8 @@ use alloy_primitives::{keccak256, Address, BlockNumber, Bytes, StorageKey, Stora use reth_errors::ProviderResult; use reth_primitives_traits::{Account, Bytecode, NodePrimitives}; use reth_storage_api::{ - AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider, - StateProvider, StateRootProvider, StorageRootProvider, + AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, PlainPostState, + StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, @@ -147,6 +147,13 @@ impl StateRootProvider for MemoryOverlayStateProviderRef<'_, input.prepend_self(self.trie_input().clone()); self.historical.state_root_from_nodes_with_updates(input) } + + fn state_root_with_updates_triedb( + &self, + plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + self.historical.state_root_with_updates_triedb(plain_state) + } } impl StorageRootProvider for MemoryOverlayStateProviderRef<'_, N> { diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 6018c24ffd9..7029c5fa113 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -135,7 +135,7 @@ impl EnvironmentArgs { db, self.chain.clone(), static_file_provider, - triedb_provider + Arc::new(triedb_provider) ) .with_prune_modes(prune_modes.clone()) .with_genesis_block_number(self.chain.genesis().number.unwrap()); diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 022d51721c3..98255db3a91 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -43,7 +43,7 @@ where Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, - TriedbProvider::new(output_datadir.triedb()), + Arc::new(TriedbProvider::new(output_datadir.triedb())), ), to, from, diff --git a/crates/cli/commands/src/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs index 079402ad456..e532b01f227 100644 --- a/crates/cli/commands/src/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -40,7 +40,7 @@ pub(crate) async fn dump_hashing_account_stage = ProviderFactory::new( db.clone(), chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_write(static_files_path).unwrap(), + Arc::new(TriedbProvider::new(triedb_dir)), ); // Initialize genesis diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 351e112857a..e4b521aee93 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -67,6 +67,8 @@ tracing.workspace = true derive_more.workspace = true parking_lot.workspace = true crossbeam-channel.workspace = true +triedb.workspace=true +reth-storage-api.workspace = true # optional deps for test-utils reth-prune-types = { workspace = true, optional = true } diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index fd9999b9eba..907f677bbc3 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -9,6 +9,7 @@ use reth_provider::{ AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; +use reth_storage_api::PlainPostState; use reth_revm::db::BundleState; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, @@ -201,6 +202,13 @@ impl StateRootProvider for CachedStateProvider { ) -> ProviderResult<(B256, TrieUpdates)> { self.state_provider.state_root_from_nodes_with_updates(input) } + + fn state_root_with_updates_triedb( + &self, + plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + self.state_provider.state_root_with_updates_triedb(plain_state) + } } impl StateProofProvider for CachedStateProvider { diff --git a/crates/engine/tree/src/tree/instrumented_state.rs b/crates/engine/tree/src/tree/instrumented_state.rs index 9d96aca3a2e..3cf80202ab0 100644 --- a/crates/engine/tree/src/tree/instrumented_state.rs +++ b/crates/engine/tree/src/tree/instrumented_state.rs @@ -8,6 +8,7 @@ use reth_provider::{ AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; +use reth_storage_api::PlainPostState; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, @@ -224,6 +225,13 @@ impl StateRootProvider for InstrumentedStateProvider { ) -> ProviderResult<(B256, TrieUpdates)> { self.state_provider.state_root_from_nodes_with_updates(input) } + + fn state_root_with_updates_triedb( + &self, + plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + self.state_provider.state_root_with_updates_triedb(plain_state) + } } impl StateProofProvider for InstrumentedStateProvider { diff --git a/crates/evm/evm/Cargo.toml b/crates/evm/evm/Cargo.toml index 4bc8ef06dbb..99439f892c1 100644 --- a/crates/evm/evm/Cargo.toml +++ b/crates/evm/evm/Cargo.toml @@ -19,7 +19,6 @@ reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-storage-errors.workspace = true reth-trie-common.workspace = true - revm.workspace = true # alloy diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index f2dec426f2e..b3bcb29b257 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -18,9 +18,11 @@ pub use reth_execution_types::{BlockExecutionOutput, ExecutionOutcome}; use reth_primitives_traits::{ Block, HeaderTy, NodePrimitives, ReceiptTy, Recovered, RecoveredBlock, SealedHeader, TxTy, }; -use reth_storage_api::StateProvider; +use reth_storage_api::{PlainPostState, StateProvider}; pub use reth_storage_errors::provider::ProviderError; use reth_trie_common::{updates::TrieUpdates, HashedPostState}; +use std::collections::HashMap; +use alloy_primitives::U256; use revm::{ context::result::ExecutionResult, database::{states::bundle_state::BundleRetention, BundleState, State}, @@ -515,31 +517,31 @@ where // calculate the state root let hashed_state = state.hashed_post_state(&db.bundle_state); - use std::any::type_name_of_val; - use std::any::{Any, TypeId}; -// use reth_provider::{ -// HistoricalStateProviderRef, -// LatestStateProviderRef, -// providers::state::overlay::OverlayStateProvider, -// }; - - let type_name = type_name_of_val(&state); - println!("State type: {}", type_name); - // let type_id = state.type_id(); - // if TypeId::of::>() == type_id { - // println!("It's LatestStateProviderRef"); - // } else if TypeId::of::>() == type_id { - // println!("It's HistoricalStateProviderRef"); - // } else if TypeId::of::>() == type_id { - // println!("It's OverlayStateProvider"); - // } else if TypeId::of::>() == type_id { - // println!("It's MemoryOverlayStateProvider"); - // }else if TypeId::of::>() == type_id { - // println!("It's CachedStateProvider"); - // } else { - // println!("Unknown type: {:?}", type_id); - // } - let pr = state.state_root_with_updates(hashed_state.clone()); + + // Convert BundleState to PlainPostState for triedb computation + let mut plain_state = PlainPostState::default(); + for (address, bundle_account) in db.bundle_state.state() { + // Convert account - None if destroyed, Some(Account) if exists/updated + let account = if bundle_account.was_destroyed() || bundle_account.info.is_none() { + None + } else { + bundle_account.info.as_ref().map(|info| reth_primitives_traits::Account::from(info)) + }; + plain_state.accounts.insert(*address, account); + + // Convert storage (BundleState uses U256 keys, PlainPostState uses B256 keys) + let mut storage_map = HashMap::new(); + for (slot, storage_slot) in &bundle_account.storage { + // Convert U256 slot to B256 (32-byte representation) + let slot_b256 = B256::from_slice(&slot.to_be_bytes::<32>()); + storage_map.insert(slot_b256, storage_slot.present_value); + } + if !storage_map.is_empty() { + plain_state.storages.insert(*address, storage_map); + } + } + + let pr = state.state_root_with_updates_triedb(plain_state); let (state_root, trie_updates) = pr.map_err(BlockExecutionError::other)?; diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 7bc15615538..6200b3c9932 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -469,7 +469,7 @@ where self.right().clone(), self.chain_spec(), StaticFileProvider::read_write(self.data_dir().static_files())?, - TriedbProvider::new(self.data_dir().triedb()) + Arc::new(TriedbProvider::new(self.data_dir().triedb())) ) .with_prune_modes(self.prune_modes()) .with_static_files_metrics() diff --git a/crates/optimism/node/tests/assets/genesis.json b/crates/optimism/node/tests/assets/genesis.json index 19036e6df1d..e59d90f4ff1 100644 --- a/crates/optimism/node/tests/assets/genesis.json +++ b/crates/optimism/node/tests/assets/genesis.json @@ -1,107 +1 @@ -{ - "config": { - "chainId": 8453, - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "muirGlacierBlock": 0, - "berlinBlock": 0, - "londonBlock": 0, - "arrowGlacierBlock": 0, - "grayGlacierBlock": 0, - "mergeNetsplitBlock": 0, - "bedrockBlock": 0, - "regolithTime": 0, - "terminalTotalDifficulty": 0, - "terminalTotalDifficultyPassed": true, - "optimism": { - "eip1559Elasticity": 6, - "eip1559Denominator": 50 - } - }, - "nonce": "0x0", - "timestamp": "0x0", - "extraData": "0x00", - "gasLimit": "0x1c9c380", - "difficulty": "0x0", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "coinbase": "0x0000000000000000000000000000000000000000", - "alloc": { - "0x14dc79964da2c08b23698b3d3cc7ca32193d9955": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x15d34aaf54267db7d7c367839aaf71a00a2c6a65": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x1cbd3b2770909d4e10f157cabc84c7264073c9ec": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x23618e81e3f5cdf7f54c3d65f7fbc0abf5b21e8f": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x2546bcd3c84621e976d8185a91a922ae77ecec30": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x70997970c51812dc3a010c7d01b50e0d17dc79c8": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x71be63f3384f5fb98995898a86b02fb2426c5788": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x8626f6940e2eb28930efb4cef49b2d1f2c9c1199": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x90f79bf6eb2c4f870365e785982e1f101e93b906": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x976ea74026e726554db657fa54763abd0c3a0aa9": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x9965507d1a55bcc2695c58ba16fb37d819b0a4dc": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x9c41de96b2088cdc640c6182dfcf5491dc574a57": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xa0ee7a142d267c1f36714e4a8f75612f20a79720": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xbcd4042de499d14e55001ccbb24a551f3b954096": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xbda5747bfd65f08deb54cb465eb87d40e51b197e": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xcd3b766ccdd6ae721141f452c550ca635964ce71": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xdd2fd4581271e230360230f9337d5c0430bf44c0": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xdf3e18d64bc6a983f673ab319ccae4f1a57c7097": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266": { - "balance": "0xd3c21bcecceda1000000" - }, - "0xfabb0ac9d68b0b445fb7357272ff202c5651694a": { - "balance": "0xd3c21bcecceda1000000" - }, - "0x5FbDB2315678afecb367f032d93F642f64180aa3": { - "balance": "0x0", - "code": "0x608060405234801561000f575f5ffd5b5060043610610060575f3560e01c806306fdde031461006457806318160ddd14610082578063313ce567146100a057806370a08231146100be57806395d89b41146100ee578063a9059cbb1461010c575b5f5ffd5b61006c61013c565b60405161007991906103c1565b60405180910390f35b61008a610175565b60405161009791906103f9565b60405180910390f35b6100a861017a565b6040516100b5919061042d565b60405180910390f35b6100d860048036038101906100d391906104a4565b61017f565b6040516100e591906103f9565b60405180910390f35b6100f6610194565b60405161010391906103c1565b60405180910390f35b610126600480360381019061012191906104f9565b6101cd565b6040516101339190610551565b60405180910390f35b6040518060400160405280600781526020017f4d79546f6b656e0000000000000000000000000000000000000000000000000081525081565b5f5481565b601281565b6001602052805f5260405f205f915090505481565b6040518060400160405280600381526020017f4d544b000000000000000000000000000000000000000000000000000000000081525081565b5f8160015f3373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f2054101561024e576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610245906105b4565b60405180910390fd5b8160015f3373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f205f82825403925050819055508160015f8573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f205f82825401925050819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8460405161033f91906103f9565b60405180910390a36001905092915050565b5f81519050919050565b5f82825260208201905092915050565b8281835e5f83830152505050565b5f601f19601f8301169050919050565b5f61039382610351565b61039d818561035b565b93506103ad81856020860161036b565b6103b681610379565b840191505092915050565b5f6020820190508181035f8301526103d98184610389565b905092915050565b5f819050919050565b6103f3816103e1565b82525050565b5f60208201905061040c5f8301846103ea565b92915050565b5f60ff82169050919050565b61042781610412565b82525050565b5f6020820190506104405f83018461041e565b92915050565b5f5ffd5b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f6104738261044a565b9050919050565b61048381610469565b811461048d575f5ffd5b50565b5f8135905061049e8161047a565b92915050565b5f602082840312156104b9576104b8610446565b5b5f6104c684828501610490565b91505092915050565b6104d8816103e1565b81146104e2575f5ffd5b50565b5f813590506104f3816104cf565b92915050565b5f5f6040838503121561050f5761050e610446565b5b5f61051c85828601610490565b925050602061052d858286016104e5565b9150509250929050565b5f8115159050919050565b61054b81610537565b82525050565b5f6020820190506105645f830184610542565b92915050565b7f696e73756666696369656e7400000000000000000000000000000000000000005f82015250565b5f61059e600c8361035b565b91506105a98261056a565b602082019050919050565b5f6020820190508181035f8301526105cb81610592565b905091905056fea26469706673582212207fc7a19cb674d2b14161fa2594a527523f58b654fdda0568a842bdb287ff2a9b64736f6c634300081e0033", - "storage": { - "0xa3c1274aadd82e4d12c8004c33fb244ca686dad4fcc8957fc5668588c11d9502": "0x1000000000000000000000000" - } - } - }, - "number": "0x0" -} \ No newline at end of file +{"config":{"chainId":8453,"homesteadBlock":0,"eip150Block":0,"eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"berlinBlock":0,"londonBlock":0,"arrowGlacierBlock":0,"grayGlacierBlock":0,"mergeNetsplitBlock":0,"bedrockBlock":0,"regolithTime":0,"terminalTotalDifficulty":0,"terminalTotalDifficultyPassed":true,"optimism":{"eip1559Elasticity":6,"eip1559Denominator":50}},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x1c9c380","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0x14dc79964da2c08b23698b3d3cc7ca32193d9955":{"balance":"0xd3c21bcecceda1000000"},"0x15d34aaf54267db7d7c367839aaf71a00a2c6a65":{"balance":"0xd3c21bcecceda1000000"},"0x1cbd3b2770909d4e10f157cabc84c7264073c9ec":{"balance":"0xd3c21bcecceda1000000"},"0x23618e81e3f5cdf7f54c3d65f7fbc0abf5b21e8f":{"balance":"0xd3c21bcecceda1000000"},"0x2546bcd3c84621e976d8185a91a922ae77ecec30":{"balance":"0xd3c21bcecceda1000000"},"0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc":{"balance":"0xd3c21bcecceda1000000"},"0x70997970c51812dc3a010c7d01b50e0d17dc79c8":{"balance":"0xd3c21bcecceda1000000"},"0x71be63f3384f5fb98995898a86b02fb2426c5788":{"balance":"0xd3c21bcecceda1000000"},"0x8626f6940e2eb28930efb4cef49b2d1f2c9c1199":{"balance":"0xd3c21bcecceda1000000"},"0x90f79bf6eb2c4f870365e785982e1f101e93b906":{"balance":"0xd3c21bcecceda1000000"},"0x976ea74026e726554db657fa54763abd0c3a0aa9":{"balance":"0xd3c21bcecceda1000000"},"0x9965507d1a55bcc2695c58ba16fb37d819b0a4dc":{"balance":"0xd3c21bcecceda1000000"},"0x9c41de96b2088cdc640c6182dfcf5491dc574a57":{"balance":"0xd3c21bcecceda1000000"},"0xa0ee7a142d267c1f36714e4a8f75612f20a79720":{"balance":"0xd3c21bcecceda1000000"},"0xbcd4042de499d14e55001ccbb24a551f3b954096":{"balance":"0xd3c21bcecceda1000000"},"0xbda5747bfd65f08deb54cb465eb87d40e51b197e":{"balance":"0xd3c21bcecceda1000000"},"0xcd3b766ccdd6ae721141f452c550ca635964ce71":{"balance":"0xd3c21bcecceda1000000"},"0xdd2fd4581271e230360230f9337d5c0430bf44c0":{"balance":"0xd3c21bcecceda1000000"},"0xdf3e18d64bc6a983f673ab319ccae4f1a57c7097":{"balance":"0xd3c21bcecceda1000000"},"0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266":{"balance":"0xd3c21bcecceda1000000"},"0xfabb0ac9d68b0b445fb7357272ff202c5651694a":{"balance":"0xd3c21bcecceda1000000"}},"number":"0x0"} \ No newline at end of file diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index e0d40070878..1c4eaae47ec 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -4,8 +4,8 @@ use alloy_primitives::{ }; use reth_primitives_traits::{Account, Bytecode}; use reth_storage_api::{ - AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider, - StateProvider, StateRootProvider, StorageRootProvider, + AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, PlainPostState, + StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ @@ -91,6 +91,13 @@ impl StateRootProvider for StateProviderTest { ) -> ProviderResult<(B256, TrieUpdates)> { unimplemented!("state root computation is not supported") } + + fn state_root_with_updates_triedb( + &self, + _plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + todo!() + } } impl StorageRootProvider for StateProviderTest { diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index b52b30eb518..e95ee1c112e 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -52,6 +52,7 @@ futures.workspace = true tokio.workspace = true tokio-stream.workspace = true reqwest = { workspace = true, features = ["rustls-tls-native-roots"] } +triedb.workspace=true # metrics metrics.workspace = true diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 8209af0fa53..7bd4909334d 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -50,6 +50,13 @@ impl reth_storage_api::StateRootProvider for StateProviderTraitObjWrapper<'_> { ) -> reth_errors::ProviderResult<(B256, reth_trie::updates::TrieUpdates)> { self.0.state_root_from_nodes_with_updates(input) } + + fn state_root_with_updates_triedb( + &self, + plain_state: reth_storage_api::PlainPostState, + ) -> reth_errors::ProviderResult<(B256, reth_trie::updates::TrieUpdates)> { + self.0.state_root_with_updates_triedb(plain_state) + } } impl reth_storage_api::StorageRootProvider for StateProviderTraitObjWrapper<'_> { diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 3fe1c7f1f97..26642132bc5 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -1,7 +1,7 @@ use alloy_primitives::{keccak256, Address, BlockNumber, TxHash, TxNumber, B256}; use reth_chainspec::MAINNET; use reth_db::{ - test_utils::{create_test_rw_db, create_test_rw_db_with_path, create_test_static_files_dir}, + test_utils::{create_test_rw_db, create_test_rw_db_with_path, create_test_static_files_dir, create_test_triedb_dir}, DatabaseEnv, }; use reth_db_api::{ @@ -17,7 +17,7 @@ use reth_db_api::{ use reth_ethereum_primitives::{Block, EthPrimitives, Receipt}; use reth_primitives_traits::{Account, SealedBlock, SealedHeader, StorageEntry}; use reth_provider::{ - providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, + providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter, triedb::TriedbProvider}, test_utils::MockNodeTypesWithDB, HistoryWriter, ProviderError, ProviderFactory, StaticFileProviderFactory, StatsReader, }; @@ -26,6 +26,7 @@ use reth_storage_errors::provider::ProviderResult; use reth_testing_utils::generators::ChangeSet; use std::{collections::BTreeMap, fmt::Debug, path::Path}; use tempfile::TempDir; +use std::sync::Arc; /// Test database that is used for testing stage implementations. #[derive(Debug)] @@ -38,12 +39,14 @@ impl Default for TestStageDB { /// Create a new instance of [`TestStageDB`] fn default() -> Self { let (static_dir, static_dir_path) = create_test_static_files_dir(); + let (triedb_dir, _) = create_test_triedb_dir(); Self { temp_static_files_dir: static_dir, factory: ProviderFactory::new( create_test_rw_db(), MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), + Arc::new(TriedbProvider::new(triedb_dir)), ), } } @@ -52,6 +55,7 @@ impl Default for TestStageDB { impl TestStageDB { pub fn new(path: &Path) -> Self { let (static_dir, static_dir_path) = create_test_static_files_dir(); + let (triedb_dir, _) = create_test_triedb_dir(); Self { temp_static_files_dir: static_dir, @@ -59,6 +63,7 @@ impl TestStageDB { create_test_rw_db_with_path(path), MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), + Arc::new(TriedbProvider::new(triedb_dir)), ), } } diff --git a/crates/storage/provider/src/providers/database/builder.rs b/crates/storage/provider/src/providers/database/builder.rs index 61536264c00..d532370543c 100644 --- a/crates/storage/provider/src/providers/database/builder.rs +++ b/crates/storage/provider/src/providers/database/builder.rs @@ -329,8 +329,8 @@ where pub fn triedb_provider( self, triedb_provider: TriedbProvider, - ) -> TypesAnd4, StaticFileProvider, TriedbProvider> { - TypesAnd4::new(self.val_1, self.val_2, self.val_3, triedb_provider) + ) -> TypesAnd4, StaticFileProvider, Arc> { + TypesAnd4::new(self.val_1, self.val_2, self.val_3, Arc::new(triedb_provider)) } } @@ -363,7 +363,7 @@ impl TypesAnd4 { } } -impl TypesAnd4, StaticFileProvider, TriedbProvider> +impl TypesAnd4, StaticFileProvider, Arc> where N: NodeTypes, DB: Database + DatabaseMetrics + Clone + Unpin + 'static, diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 6b9da018a80..40f6b1c3e68 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -66,7 +66,7 @@ pub struct ProviderFactory { /// The node storage handler. storage: Arc, - triedb_provider: TriedbProvider + triedb_provider: Arc } impl ProviderFactory>> { @@ -82,8 +82,11 @@ impl ProviderFactory { db: N::DB, chain_spec: Arc, static_file_provider: StaticFileProvider, - triedb_provider: TriedbProvider + triedb_provider: Arc ) -> Self { + // Initialize the static triedb_provider + let _ = crate::providers::state::latest::set_triedb_provider(triedb_provider.clone()); + Self { db, chain_spec, @@ -132,8 +135,11 @@ impl>> ProviderFactory { chain_spec: Arc, args: DatabaseArguments, static_file_provider: StaticFileProvider, - triedb_provider: TriedbProvider + triedb_provider: Arc ) -> RethResult { + // Initialize the static triedb_provider + let _ = crate::providers::state::latest::set_triedb_provider(triedb_provider.clone()); + Ok(Self { db: Arc::new(init_db(path, args).map_err(RethError::msg)?), chain_spec, @@ -160,6 +166,7 @@ impl ProviderFactory { self.static_file_provider.clone(), self.prune_modes.clone(), self.storage.clone(), + Some(self.triedb_provider.clone()), )) } @@ -175,6 +182,7 @@ impl ProviderFactory { self.static_file_provider.clone(), self.prune_modes.clone(), self.storage.clone(), + Some(self.triedb_provider.clone()), ))) } @@ -643,7 +651,7 @@ mod tests { Arc::new(chain_spec), DatabaseArguments::new(Default::default()), StaticFileProvider::read_write(static_dir_path).unwrap(), - TriedbProvider::new(trie_dir_path) + Arc::new(TriedbProvider::new(trie_dir_path)) ) .unwrap(); let provider = factory.provider().unwrap(); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 1f0a0aa391a..0af95a93140 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -153,6 +153,8 @@ pub struct DatabaseProvider { prune_modes: PruneModes, /// Node storage handler. storage: Arc, + /// TrieDB provider for triedb operations + triedb_provider: Option>, } impl DatabaseProvider { @@ -160,6 +162,17 @@ impl DatabaseProvider { pub const fn prune_modes_ref(&self) -> &PruneModes { &self.prune_modes } + + /// Returns reference to TrieDB provider if available + pub fn triedb_provider(&self) -> Option<&Arc> { + self.triedb_provider.as_ref() + } +} + +impl crate::providers::state::latest::TriedbProviderAccess for DatabaseProvider { + fn triedb_provider(&self) -> Option<&Arc> { + self.triedb_provider.as_ref() + } } impl DatabaseProvider { @@ -242,14 +255,15 @@ impl> C impl DatabaseProvider { /// Creates a provider with an inner read-write transaction. - pub const fn new_rw( + pub fn new_rw( tx: TX, chain_spec: Arc, static_file_provider: StaticFileProvider, prune_modes: PruneModes, storage: Arc, + triedb_provider: Option>, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes, storage } + Self { tx, chain_spec, static_file_provider, prune_modes, storage, triedb_provider } } } @@ -488,14 +502,15 @@ where impl DatabaseProvider { /// Creates a provider with an inner read-only transaction. - pub const fn new( + pub fn new( tx: TX, chain_spec: Arc, static_file_provider: StaticFileProvider, prune_modes: PruneModes, storage: Arc, + triedb_provider: Option>, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes, storage } + Self { tx, chain_spec, static_file_provider, prune_modes, storage, triedb_provider } } /// Consume `DbTx` or `DbTxMut`. @@ -3124,7 +3139,7 @@ impl DBProvider for DatabaseProvider fn tx_ref(&self) -> &Self::Tx { &self.tx } - + fn tx_mut(&mut self) -> &mut Self::Tx { &mut self.tx } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 666138fae7b..4c246ca4e98 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -14,7 +14,8 @@ use reth_db_api::{ }; use reth_primitives_traits::{Account, Bytecode}; use reth_storage_api::{ - BlockNumReader, BytecodeReader, DBProvider, StateProofProvider, StorageRootProvider, + BlockNumReader, BytecodeReader, DBProvider, PlainPostState, StateProofProvider, + StorageRootProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ @@ -316,6 +317,13 @@ impl StateRootProvider StateRoot::overlay_root_from_nodes_with_updates(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } + + fn state_root_with_updates_triedb( + &self, + _plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + todo!() + } } impl StorageRootProvider diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 092feb37c43..71f01b146d0 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -2,11 +2,12 @@ use crate::{ providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, HashedPostStateProvider, StateProvider, StateRootProvider, }; -use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; +use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, U256}; use reth_db_api::{cursor::DbDupCursorRO, tables, transaction::DbTx}; use reth_primitives_traits::{Account, Bytecode}; -use reth_storage_api::{BytecodeReader, DBProvider, StateProofProvider, StorageRootProvider}; +use reth_storage_api::{BytecodeReader, DBProvider, PlainPostState, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use std::sync::{Arc, OnceLock}; use reth_trie::{ proof::{Proof, StorageProof}, updates::TrieUpdates, @@ -18,6 +19,26 @@ use reth_trie_db::{ DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, }; +use alloy_consensus::constants::KECCAK_EMPTY; +use alloy_trie::EMPTY_ROOT_HASH; +use triedb::{ + account::Account as TrieDBAccount, + overlay::{OverlayStateMut, OverlayValue}, + path::{AddressPath, StoragePath}, +}; + +/// Static storage for the triedb provider instance +static TRIEDB_PROVIDER: OnceLock> = OnceLock::new(); + +/// Initialize the static triedb provider +pub fn set_triedb_provider(provider: Arc) -> Result<(), Arc> { + TRIEDB_PROVIDER.set(provider) +} + +/// Get the static triedb provider +pub fn get_triedb_provider() -> Option<&'static Arc> { + TRIEDB_PROVIDER.get() +} /// State provider over latest state that takes tx reference. /// @@ -84,8 +105,72 @@ impl StateRootProvider for LatestStateProviderRef<' StateRoot::overlay_root_from_nodes_with_updates(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } + + fn state_root_with_updates_triedb( + &self, + plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + let triedb_provider = get_triedb_provider() + .ok_or_else(|| ProviderError::UnsupportedProvider)?; + + let mut overlay_mut = OverlayStateMut::new(); + + for (address, account_opt) in &plain_state.accounts { + let address_path = AddressPath::for_address(*address); + + if let Some(account) = account_opt { + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, // Storage root will be computed from storage overlay + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + ); + overlay_mut.insert(address_path.clone().into(), Some(OverlayValue::Account(trie_account))); + } else { + // Account is being destroyed + overlay_mut.insert(address_path.clone().into(), None); + } + } + + for (address, storage) in &plain_state.storages { + let address_path = AddressPath::for_address(*address); + + for (storage_key, storage_value) in storage { + let raw_slot = U256::from_be_slice(storage_key.as_slice()); + let storage_path = StoragePath::for_address_path_and_slot( + address_path.clone(), + StorageKey::from(raw_slot), + ); + + if storage_value.is_zero() { + overlay_mut.insert(storage_path.clone().into(), None); + } else { + overlay_mut.insert( + storage_path.clone().into(), + Some(OverlayValue::Storage(StorageValue::from_be_slice( + storage_value.to_be_bytes::<32>().as_slice() + ))), + ); + } + } + } + + let overlay = overlay_mut.freeze(); + + let mut tx = triedb_provider.inner.begin_ro() + .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to begin triedb transaction: {e:?}")))?; + + let result = tx.compute_root_with_overlay(overlay) + .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to compute triedb root: {e:?}")))?; + + tx.commit() + .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to commit triedb transaction: {e:?}")))?; + + Ok((result.root, TrieUpdates::default())) + } } + impl StorageRootProvider for LatestStateProviderRef<'_, Provider> { fn storage_root( &self, @@ -178,6 +263,12 @@ impl BytecodeReader } } +/// Trait for accessing TrieDB provider +pub trait TriedbProviderAccess { + /// Returns reference to TrieDB provider if available + fn triedb_provider(&self) -> Option<&Arc>; +} + /// State provider for the latest state. #[derive(Debug)] pub struct LatestStateProvider(Provider); @@ -193,6 +284,14 @@ impl LatestStateProvider { const fn as_ref(&self) -> LatestStateProviderRef<'_, Provider> { LatestStateProviderRef::new(&self.0) } + + /// Returns reference to TrieDB provider if available + pub fn triedb_provider(&self) -> Option<&Arc> + where + Provider: TriedbProviderAccess, + { + self.0.triedb_provider() + } } // Delegates all provider impls to [LatestStateProviderRef] diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index 74bb371819f..35f0cfee49b 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -48,6 +48,7 @@ macro_rules! delegate_provider_impls { fn state_root_from_nodes(&self, input: reth_trie::TrieInput) -> reth_storage_errors::provider::ProviderResult; fn state_root_with_updates(&self, state: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult<(alloy_primitives::B256, reth_trie::updates::TrieUpdates)>; fn state_root_from_nodes_with_updates(&self, input: reth_trie::TrieInput) -> reth_storage_errors::provider::ProviderResult<(alloy_primitives::B256, reth_trie::updates::TrieUpdates)>; + fn state_root_with_updates_triedb(&self, plain_state: reth_storage_api::PlainPostState) -> reth_storage_errors::provider::ProviderResult<(alloy_primitives::B256, reth_trie::updates::TrieUpdates)>; } StorageRootProvider $(where [$($generics)*])? { fn storage_root(&self, address: alloy_primitives::Address, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; diff --git a/crates/storage/provider/src/providers/triedb/mod.rs b/crates/storage/provider/src/providers/triedb/mod.rs index ef90dd54636..89ca0c47c3d 100644 --- a/crates/storage/provider/src/providers/triedb/mod.rs +++ b/crates/storage/provider/src/providers/triedb/mod.rs @@ -7,7 +7,7 @@ use triedb::{Database as TrieDbDatabase, path::{AddressPath, StoragePath}, ac transaction::TransactionError, Database}; #[derive(Debug, Clone)] pub struct TriedbProvider { - inner: Arc + pub inner: Arc } impl TriedbProvider { diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 16388de91ae..d20a52b7aa6 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -33,8 +33,8 @@ use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, BytecodeReader, DBProvider, DatabaseProviderFactory, - HashedPostStateProvider, NodePrimitivesProvider, StageCheckpointReader, StateProofProvider, - StorageRootProvider, TrieReader, + HashedPostStateProvider, NodePrimitivesProvider, PlainPostState, StageCheckpointReader, + StateProofProvider, StorageRootProvider, TrieReader, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ @@ -799,6 +799,13 @@ where let state_root = self.state_roots.lock().pop().unwrap_or_default(); Ok((state_root, Default::default())) } + + fn state_root_with_updates_triedb( + &self, + _plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + todo!() + } } impl StorageRootProvider for MockEthProvider diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index 3cffa3e9c06..ed1365baef4 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -62,7 +62,7 @@ pub fn create_test_provider_factory_with_node_types( db, chain_spec, StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"), - TriedbProvider::new(triedb_dir), + Arc::new(TriedbProvider::new(triedb_dir)), ) } diff --git a/crates/storage/rpc-provider/src/lib.rs b/crates/storage/rpc-provider/src/lib.rs index 6e5bd17218b..a52c6d0e2e1 100644 --- a/crates/storage/rpc-provider/src/lib.rs +++ b/crates/storage/rpc-provider/src/lib.rs @@ -57,7 +57,7 @@ use reth_rpc_convert::{TryFromBlockResponse, TryFromReceiptResponse, TryFromTran use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockReaderIdExt, BlockSource, DBProvider, NodePrimitivesProvider, - ReceiptProviderIdExt, StatsReader, + PlainPostState, ReceiptProviderIdExt, StatsReader, }; use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState, MultiProof, TrieInput}; use std::{ @@ -1197,6 +1197,13 @@ where warn!("state_root_from_nodes_with_updates is not implemented and will return zero"); Ok((B256::ZERO, TrieUpdates::default())) } + + fn state_root_with_updates_triedb( + &self, + _plain_state: PlainPostState, + ) -> Result<(B256, TrieUpdates), ProviderError> { + todo!() + } } impl StorageReader for RpcBlockchainStateProvider diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index a62193a5dd8..ca11dc5ad55 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -32,6 +32,7 @@ alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true auto_impl.workspace = true +triedb.workspace = true [features] default = ["std"] diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index e538e1216e8..16fe844aaed 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -3,10 +3,10 @@ use crate::{ AccountReader, BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, BytecodeReader, ChangeSetReader, - HashedPostStateProvider, HeaderProvider, NodePrimitivesProvider, PruneCheckpointReader, - ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProofProvider, - StateProvider, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, - StorageRootProvider, TransactionVariant, TransactionsProvider, TrieReader, + HashedPostStateProvider, HeaderProvider, NodePrimitivesProvider, PlainPostState, + PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, + StateProofProvider, StateProvider, StateProviderBox, StateProviderFactory, StateReader, + StateRootProvider, StorageRootProvider, TransactionVariant, TransactionsProvider, TrieReader, }; #[cfg(feature = "db-api")] @@ -424,6 +424,13 @@ impl StateRootProvider for NoopProvider ) -> ProviderResult<(B256, TrieUpdates)> { Ok((B256::default(), TrieUpdates::default())) } + + fn state_root_with_updates_triedb( + &self, + _plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + todo!() + } } impl StorageRootProvider for NoopProvider { diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index 9ff02c106e5..d77f95119b4 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -1,12 +1,22 @@ use alloc::vec::Vec; -use alloy_primitives::{Address, BlockNumber, Bytes, B256}; +use alloy_primitives::{Address, BlockNumber, Bytes, B256, U256}; use reth_storage_errors::provider::ProviderResult; use reth_trie_common::{ updates::{StorageTrieUpdatesSorted, TrieUpdates, TrieUpdatesSorted}, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; - +use reth_primitives_traits::Account; +use std::collections::HashMap; + +/// Plain (unhashed) post state updates for TrieDB computation +#[derive(Debug, Clone, Default)] +pub struct PlainPostState { + /// Mapping of address to account info, `None` if destroyed + pub accounts: HashMap>, + /// Mapping of address to storage entries (slot -> value) + pub storages: HashMap>, +} /// A type that can compute the state root of a given post state. #[auto_impl::auto_impl(&, Box, Arc)] pub trait StateRootProvider: Send + Sync { @@ -37,6 +47,11 @@ pub trait StateRootProvider: Send + Sync { &self, input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)>; + + fn state_root_with_updates_triedb( + &self, + plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)>; } /// A type that can compute the storage root for a given account. From 44e9cc990c693daecf8bf844aa7baf819ea878ce Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Thu, 18 Dec 2025 16:02:24 +0800 Subject: [PATCH 16/36] complete init_genesis using triedb --- Cargo.lock | 1 + crates/engine/primitives/src/config.rs | 2 +- crates/engine/tree/src/tree/mod.rs | 27 +++-- crates/evm/evm/Cargo.toml | 1 + crates/evm/evm/src/execute.rs | 28 ++++- crates/optimism/node/tests/it/engine.rs | 2 +- crates/storage/db-common/src/init.rs | 107 +++++++++++++++--- crates/storage/provider/src/providers/mod.rs | 2 +- .../provider/src/providers/state/mod.rs | 2 +- triedb.md | 6 + 10 files changed, 144 insertions(+), 34 deletions(-) create mode 100644 triedb.md diff --git a/Cargo.lock b/Cargo.lock index e787cac588b..696135eafa7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8917,6 +8917,7 @@ dependencies = [ "reth-storage-errors", "reth-trie-common", "revm", + "tracing", ] [[package]] diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 0b9b7d9f821..b5a520728e5 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -1,7 +1,7 @@ //! Engine tree configuration. /// Triggers persistence when the number of canonical blocks in memory exceeds this threshold. -pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; +pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 0; /// How close to the canonical head we persist blocks. pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 0; diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index c28b05547cc..4399b754e19 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1337,6 +1337,7 @@ where /// If we're currently awaiting a response this will try to receive the response (non-blocking) /// or send a new persistence action if necessary. fn advance_persistence(&mut self) -> Result<(), AdvancePersistenceError> { + debug!("start advance persistence"); if self.persistence_state.in_progress() { let (mut rx, start_time, current_action) = self .persistence_state @@ -1370,14 +1371,17 @@ where } } - // if !self.persistence_state.in_progress() { - // if let Some(new_tip_num) = self.find_disk_reorg()? { - // self.remove_blocks(new_tip_num) - // } else if self.should_persist() { - // let blocks_to_persist = self.get_canonical_blocks_to_persist()?; - // self.persist_blocks(blocks_to_persist); - // } - // } + if !self.persistence_state.in_progress() { + if let Some(new_tip_num) = self.find_disk_reorg()? { + self.remove_blocks(new_tip_num) + } else if self.should_persist() { + debug!("start persist blocks"); + let blocks_to_persist = self.get_canonical_blocks_to_persist()?; + self.persist_blocks(blocks_to_persist); + } else { + debug!("start not persist blocks"); + } + } Ok(()) } @@ -1731,9 +1735,10 @@ where return false } - let min_block = self.persistence_state.last_persisted_block.number; - self.state.tree_state.canonical_block_number().saturating_sub(min_block) > - self.config.persistence_threshold() + // let min_block = self.persistence_state.last_persisted_block.number; + // self.state.tree_state.canonical_block_number().saturating_sub(min_block) > + // self.config.persistence_threshold() + return true } /// Returns a batch of consecutive canonical blocks to persist in the range diff --git a/crates/evm/evm/Cargo.toml b/crates/evm/evm/Cargo.toml index 99439f892c1..32bd742c2a9 100644 --- a/crates/evm/evm/Cargo.toml +++ b/crates/evm/evm/Cargo.toml @@ -31,6 +31,7 @@ auto_impl.workspace = true derive_more.workspace = true futures-util.workspace = true metrics = { workspace = true, optional = true } +tracing.workspace = true [dev-dependencies] reth-ethereum-primitives.workspace = true diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index b3bcb29b257..49c785b7df8 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -518,6 +518,11 @@ where // calculate the state root let hashed_state = state.hashed_post_state(&db.bundle_state); + // Calculate state root using the previous method (mdbx) + let (mdbx_state_root, mdbx_trie_updates) = state + .state_root_with_updates(hashed_state.clone()) + .map_err(BlockExecutionError::other)?; + // Convert BundleState to PlainPostState for triedb computation let mut plain_state = PlainPostState::default(); for (address, bundle_account) in db.bundle_state.state() { @@ -541,10 +546,31 @@ where } } + // Calculate state root using triedb method let pr = state.state_root_with_updates_triedb(plain_state); - let (state_root, trie_updates) = + let (triedb_state_root, triedb_trie_updates) = pr.map_err(BlockExecutionError::other)?; + // Compare the two state roots + if mdbx_state_root != triedb_state_root { + tracing::debug!( + "reth::evm - State root mismatch! MDBX: {:?}, TrieDB: {:?}", + mdbx_state_root, + triedb_state_root + ); + // For now, use the triedb root, but log the mismatch + // You may want to panic or handle this differently based on your needs + } else { + tracing::debug!( + "reth::evm - State roots match: {:?}", + triedb_state_root + ); + } + + // Use triedb state root (or you can choose to use mdbx_state_root) + let state_root = triedb_state_root; + let trie_updates = triedb_trie_updates; + let (transactions, senders) = self.transactions.into_iter().map(|tx| tx.into_parts()).unzip(); diff --git a/crates/optimism/node/tests/it/engine.rs b/crates/optimism/node/tests/it/engine.rs index f9eba7898fa..ec081638416 100644 --- a/crates/optimism/node/tests/it/engine.rs +++ b/crates/optimism/node/tests/it/engine.rs @@ -287,7 +287,7 @@ async fn full_engine_api_bock_building_continuously() -> eyre::Result<()> { .await?; assert_eq!(fcu_result_2.payload_status.status, PayloadStatusEnum::Valid); let payload_id_2 = fcu_result_2.payload_id.expect("second payload id"); - + tokio::time::sleep(std::time::Duration::from_millis(500)).await; Ok(()) } diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index a543579c1e2..a3107daa701 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -2,7 +2,7 @@ use alloy_consensus::BlockHeader; use alloy_genesis::GenesisAccount; -use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256}; +use alloy_primitives::{keccak256, map::HashMap, Address, StorageValue, StorageKey, B256, U256}; use reth_chainspec::EthChainSpec; use reth_codecs::Compact; use reth_config::config::EtlConfig; @@ -32,6 +32,14 @@ use tracing::{debug, error, info, trace}; use reth_trie::{trie_cursor::{TrieCursor, TrieCursorFactory}}; #[cfg(feature = "trie-db-ext")] use crate::init_triedb::calculate_state_root_with_triedb; +use reth_provider::providers::state::latest::get_triedb_provider; +use triedb::{ + account::Account as TrieDBAccount, + path::{AddressPath, StoragePath}, + transaction::TransactionError, +}; +use alloy_trie::EMPTY_ROOT_HASH; +use alloy_consensus::constants::KECCAK_EMPTY; /// Default soft limit for number of bytes to read from state dump file, before inserting into /// database. @@ -105,6 +113,7 @@ where + AsRef, PF::ChainSpec: EthChainSpec
::BlockHeader>, { + println!("init_genesis"); let chain = factory.chain_spec(); let genesis = chain.genesis(); @@ -156,23 +165,25 @@ where insert_genesis_state(&provider_rw, alloc.iter())?; // compute state root to populate trie tables - #[cfg(feature = "trie-db-ext")] - { - use std::path::PathBuf; - use reth_trie::{hashed_cursor::{HashedCursorFactory, HashedCursor}, StateRootTrieDb, TrieExtDatabase}; - let trie_db_path = std::env::var("RETH_TRIEDB_PATH") - .map(PathBuf::from) - .unwrap_or_else(|_| { - PathBuf::from("../triedb") - }); - let file_path = trie_db_path.join("test.db"); - let trie_ext_db = TrieExtDatabase::new(file_path); - let trie_db_path = std::env::temp_dir().join("reth_triedb_init"); - calculate_state_root_with_triedb(&provider_rw, trie_db_path, None)?; - } - #[cfg(not(feature = "trie-db-ext"))] - { - compute_state_root(&provider_rw, None)?; + // #[cfg(feature = "trie-db-ext")] + // { + + // } + // #[cfg(not(feature = "trie-db-ext"))] + println!("start compute_state_root"); + let ret = compute_state_root(&provider_rw, None)?; + println!("compute_state_root done {:?}", ret); + + // Calculate state root using triedb + println!("start compute_state_root_triedb"); + match compute_state_root_triedb(alloc.iter()) { + Ok(triedb_state_root) => { + println!("compute_state_root_triedb done: {:?}", triedb_state_root); + } + Err(e) => { + println!("compute_state_root_triedb failed: {:?}", e); + // Don't fail genesis init if triedb fails, just log it + } } // set stage checkpoint to genesis block number for all stages @@ -703,6 +714,66 @@ where } } +/// Computes the state root using triedb by inserting all genesis accounts and storage. +pub fn compute_state_root_triedb<'a, 'b>( + alloc: impl Iterator, +) -> Result { + let triedb_provider = get_triedb_provider() + .ok_or_else(|| InitStorageError::Provider(ProviderError::UnsupportedProvider))?; + + let mut tx = triedb_provider.inner.begin_rw() + .map_err(|e| InitStorageError::Provider(ProviderError::TrieWitnessError(format!("Failed to begin triedb transaction: {e:?}"))))?; + + // Insert all genesis accounts and storage into triedb + for (address, genesis_account) in alloc { + let address_path = AddressPath::for_address(*address); + + // Convert GenesisAccount to Account + let account = Account { + nonce: genesis_account.nonce.unwrap_or(0), + balance: genesis_account.balance, + bytecode_hash: genesis_account.code.as_ref().map(|code| keccak256(code)), + }; + + // Insert storage first (if exists), so storage root can be computed + if let Some(ref storage) = genesis_account.storage { + for (storage_key, storage_value) in storage { + let raw_slot = U256::from_be_slice(storage_key.as_slice()); + let storage_path = StoragePath::for_address_path_and_slot( + address_path.clone(), + StorageKey::from(raw_slot), + ); + + let storage_value_u256 = U256::from_be_slice(storage_value.as_slice()); + if !storage_value_u256.is_zero() { + let storage_value_triedb = StorageValue::from_be_slice( + storage_value_u256.to_be_bytes::<32>().as_slice() + ); + tx.set_storage_slot(storage_path, Some(storage_value_triedb)).unwrap(); + } + } + } + + // Insert account (storage root will be computed by triedb when we commit) + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, // Will be computed by triedb + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + ); + + tx.set_account(address_path, Some(trie_account)).unwrap(); + } + + // Commit - this computes the state root + tx.commit() + .map_err(|e| InitStorageError::Provider(ProviderError::TrieWitnessError(format!("Failed to commit triedb transaction: {e:?}"))))?; + + // Get the computed state root + let triedb_state_root = triedb_provider.inner.state_root(); + Ok(triedb_state_root) +} + /// Type to deserialize state root from state dump file. #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] struct StateRoot { diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 9cb3ae15f81..772e891f1dd 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -15,7 +15,7 @@ pub use static_file::{ pub mod triedb; -mod state; +pub mod state; pub use state::{ historical::{HistoricalStateProvider, HistoricalStateProviderRef, LowestAvailableBlocks}, latest::{LatestStateProvider, LatestStateProviderRef}, diff --git a/crates/storage/provider/src/providers/state/mod.rs b/crates/storage/provider/src/providers/state/mod.rs index f26302531eb..f5e83d23a5f 100644 --- a/crates/storage/provider/src/providers/state/mod.rs +++ b/crates/storage/provider/src/providers/state/mod.rs @@ -1,5 +1,5 @@ //! [`StateProvider`](crate::StateProvider) implementations pub(crate) mod historical; -pub(crate) mod latest; +pub mod latest; pub(crate) mod macros; pub(crate) mod overlay; diff --git a/triedb.md b/triedb.md new file mode 100644 index 00000000000..2c90adbeb0a --- /dev/null +++ b/triedb.md @@ -0,0 +1,6 @@ +rm -rf ~/Library/Application\ Support/reth/dev && rm -rf logs \ +&& cargo run --package op-reth --bin op-reth -- node --dev \ + -vvvv \ + --log.file.filter debug \ + --log.file.directory /Users/cliffyang/dev/okx/reth/logs \ + --log.file.name op-reth.log \ No newline at end of file From 43d3fc250b0e4fed772757b1f52f13e8ce294fc1 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Thu, 18 Dec 2025 17:26:59 +0800 Subject: [PATCH 17/36] add debug log --- crates/engine/tree/src/tree/mod.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 4399b754e19..575976e80fe 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -591,7 +591,8 @@ where } else { self.try_buffer_payload(payload)? }; - + debug!("try_insert_payload status: {:?}", status); + debug!("is_sync_target_head: {:?}", self.is_sync_target_head(block_hash)); let mut outcome = TreeOutcome::new(status); // if the block is valid and it is the current sync target head, make it canonical if outcome.outcome.is_valid() && self.is_sync_target_head(block_hash) { @@ -1933,7 +1934,14 @@ where /// See [`ForkchoiceStateTracker::sync_target_state`] fn is_sync_target_head(&self, block_hash: B256) -> bool { if let Some(target) = self.state.forkchoice_state_tracker.sync_target_state() { + debug!( + target: "engine::tree", + ?block_hash, + head_block_hash = ?target.head_block_hash, + ); return target.head_block_hash == block_hash + } else { + debug!(target: "engine::tree", "no sync target state"); } false } From 74540b836761149aa8400a10232ddfac7a287f1e Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Thu, 18 Dec 2025 17:50:32 +0800 Subject: [PATCH 18/36] fix localMiner --- crates/engine/local/src/miner.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index aab19e04965..b71a033a72f 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -141,7 +141,7 @@ where /// Runs the [`LocalMiner`] in a loop, polling the miner and building payloads. pub async fn run(mut self) { - let mut fcu_interval = tokio::time::interval(Duration::from_secs(10000)); + let mut fcu_interval = tokio::time::interval(Duration::from_secs(1)); loop { tokio::select! { // Wait for the interval or the pool to receive a transaction @@ -225,8 +225,9 @@ where let block = payload.block(); let payload = T::block_to_payload(payload.block().clone()); + tracing::debug!("start new_payload"); let res = self.to_engine.new_payload(payload).await?; - + tracing::debug!("end new_payload"); if !res.is_valid() { eyre::bail!("Invalid payload") } From d7a0f6ec5ef20c3d6640b39be078afaf3729765b Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Fri, 19 Dec 2025 10:50:13 +0800 Subject: [PATCH 19/36] fix memory overlay state root with triedb --- Cargo.toml | 2 +- crates/chain-state/src/memory_overlay.rs | 34 ++++++++++++++++++- crates/evm/evm/src/execute.rs | 4 +-- crates/storage/db-common/src/init.rs | 4 --- .../src/providers/state/historical.rs | 1 + .../provider/src/providers/state/latest.rs | 1 + 6 files changed, 37 insertions(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e2850db7fa1..66e418d115f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -292,7 +292,7 @@ too_long_first_doc_paragraph = "allow" # Uncomment this section if you're using a debugger. [profile.dev] # https://davidlattimore.github.io/posts/2024/02/04/speeding-up-the-rust-edit-build-run-cycle.html -debug = "full" +debug = "line-tables-only" split-debuginfo = "unpacked" # Speed up tests. diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index c4da8f2f5ec..8edfee00f31 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -152,7 +152,39 @@ impl StateRootProvider for MemoryOverlayStateProviderRef<'_, &self, plain_state: PlainPostState, ) -> ProviderResult<(B256, TrieUpdates)> { - self.historical.state_root_with_updates_triedb(plain_state) + use std::collections::HashMap; + let mut cached_plain_state = PlainPostState::default(); + + for block in &self.in_memory { + let bundle_state = &block.execution_output.bundle; + for (address, bundle_account) in bundle_state.state() { + let account = if bundle_account.was_destroyed() || bundle_account.info.is_none() { + None + } else { + bundle_account.info.as_ref().map(|info| reth_primitives_traits::Account::from(info)) + }; + cached_plain_state.accounts.insert(*address, account); + + let storage_map = cached_plain_state.storages.entry(*address).or_insert_with(HashMap::new); + for (slot, storage_slot) in &bundle_account.storage { + let slot_b256 = B256::from_slice(&slot.to_be_bytes::<32>()); + storage_map.insert(slot_b256, storage_slot.present_value); + } + } + } + + let mut merged_state = cached_plain_state; + + for (address, account) in plain_state.accounts { + merged_state.accounts.insert(address, account); + } + + for (address, storage) in plain_state.storages { + let merged_storage = merged_state.storages.entry(address).or_insert_with(HashMap::new); + merged_storage.extend(storage); + } + + self.historical.state_root_with_updates_triedb(merged_state) } } diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 49c785b7df8..489da87fb3a 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -558,11 +558,9 @@ where mdbx_state_root, triedb_state_root ); - // For now, use the triedb root, but log the mismatch - // You may want to panic or handle this differently based on your needs } else { tracing::debug!( - "reth::evm - State roots match: {:?}", + "reth::evm - State root match: {:?}", triedb_state_root ); } diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index a3107daa701..dc98a1aeb42 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -113,7 +113,6 @@ where + AsRef, PF::ChainSpec: EthChainSpec
::BlockHeader>, { - println!("init_genesis"); let chain = factory.chain_spec(); let genesis = chain.genesis(); @@ -170,12 +169,9 @@ where // } // #[cfg(not(feature = "trie-db-ext"))] - println!("start compute_state_root"); let ret = compute_state_root(&provider_rw, None)?; - println!("compute_state_root done {:?}", ret); // Calculate state root using triedb - println!("start compute_state_root_triedb"); match compute_state_root_triedb(alloc.iter()) { Ok(triedb_state_root) => { println!("compute_state_root_triedb done: {:?}", triedb_state_root); diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 4c246ca4e98..3ea8ba386b6 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -322,6 +322,7 @@ impl StateRootProvider &self, _plain_state: PlainPostState, ) -> ProviderResult<(B256, TrieUpdates)> { + tracing::debug!("latest_state_provider state_root_with_updates_triedb"); todo!() } } diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 71f01b146d0..1c24f1cdbbd 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -110,6 +110,7 @@ impl StateRootProvider for LatestStateProviderRef<' &self, plain_state: PlainPostState, ) -> ProviderResult<(B256, TrieUpdates)> { + tracing::debug!("latest_state_provider state_root_with_updates_triedb"); let triedb_provider = get_triedb_provider() .ok_or_else(|| ProviderError::UnsupportedProvider)?; From 17c19da538293dec97d9f7e1c57af3ff1eb87701 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Fri, 19 Dec 2025 17:54:45 +0800 Subject: [PATCH 20/36] local miner only send new_payload after payload inserted --- Cargo.lock | 1 + crates/engine/local/Cargo.toml | 1 + crates/engine/local/src/miner.rs | 63 +++++++++++-- crates/engine/primitives/src/config.rs | 2 +- crates/engine/tree/src/tree/mod.rs | 17 ++-- .../src/providers/database/provider.rs | 92 ++++++++++++++++++- triedb.md | 4 +- 7 files changed, 162 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 696135eafa7..13bc3de85c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8414,6 +8414,7 @@ dependencies = [ "reth-optimism-chainspec", "reth-optimism-forks", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-storage-api", "reth-transaction-pool", diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index 64839ec4363..25ee9403fcd 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -17,6 +17,7 @@ reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-storage-api.workspace = true reth-transaction-pool.workspace = true +reth-payload-builder-primitives.workspace = true # alloy alloy-consensus.workspace = true diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index b71a033a72f..f8396646ca6 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -7,6 +7,7 @@ use eyre::OptionExt; use futures_util::{stream::Fuse, StreamExt}; use reth_engine_primitives::ConsensusEngineHandle; use reth_payload_builder::PayloadBuilderHandle; +use reth_payload_builder_primitives::PayloadEvents; use reth_payload_primitives::{ BuiltPayload, EngineApiMessageVersion, PayloadAttributesBuilder, PayloadKind, PayloadTypes, }; @@ -21,7 +22,7 @@ use std::{ }; use tokio::time::Interval; use tokio_stream::wrappers::ReceiverStream; -use tracing::error; +use tracing::{error, debug}; /// A mining mode for the local dev engine. #[derive(Debug)] @@ -190,8 +191,8 @@ where Ok(()) } - /// Generates payload attributes for a new block, passes them to FCU and inserts built payload - /// through newPayload. + /// Generates payload attributes for a new block, waits for InsertExecutedBlock to be processed, + /// then calls newPayload. async fn advance(&mut self) -> eyre::Result<()> { let timestamp = std::cmp::max( self.last_timestamp.saturating_add(1), @@ -201,6 +202,11 @@ where .as_secs(), ); + // Subscribe to payload events BEFORE building the payload to ensure we don't miss it + let payload_events = self.payload_builder.subscribe().await + .map_err(|e| eyre::eyre!("Failed to subscribe to payload events: {:?}", e))?; + let mut built_stream = payload_events.into_built_payload_stream(); + let res = self .to_engine .fork_choice_updated( @@ -223,17 +229,62 @@ where }; let block = payload.block(); + let block_hash = block.hash(); + + // Wait for the built_payloads stream to process this payload + // The payload builder emits payloads to the stream, which sends InsertExecutedBlock + // We wait for our specific payload to appear in the stream + debug!(target: "engine::local", block_hash=?block_hash, "Waiting for InsertExecutedBlock to be processed"); + + let mut found = false; + let timeout = tokio::time::Duration::from_millis(1000); + let start = tokio::time::Instant::now(); + + while !found && start.elapsed() < timeout { + tokio::select! { + result = built_stream.next() => { + match result { + Some(p) => { + if let Some(executed_block) = p.executed_block() { + if executed_block.recovered_block().hash() == block_hash { + debug!(target: "engine::local", block_hash=?block_hash, "Found payload in built_payloads stream, InsertExecutedBlock should be processed"); + found = true; + // Give a small additional delay to ensure InsertExecutedBlock is fully processed + tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; + break; + } + } + } + None => { + debug!(target: "engine::local", "Payload event stream ended"); + break; + } + } + } + _ = tokio::time::sleep(tokio::time::Duration::from_millis(10)) => { + // Continue waiting, but check timeout + if start.elapsed() >= timeout { + debug!(target: "engine::local", block_hash=?block_hash, "Timeout waiting for payload in built_payloads stream"); + break; + } + } + } + } + + if !found { + debug!(target: "engine::local", block_hash=?block_hash, "Did not find payload in built_payloads stream, proceeding anyway"); + } let payload = T::block_to_payload(payload.block().clone()); - tracing::debug!("start new_payload"); + debug!(target: "engine::local", block_hash=?block_hash, "start new_payload"); let res = self.to_engine.new_payload(payload).await?; - tracing::debug!("end new_payload"); + debug!(target: "engine::local", block_hash=?block_hash, "end new_payload"); if !res.is_valid() { eyre::bail!("Invalid payload") } self.last_timestamp = timestamp; - self.last_block_hashes.push_back(block.hash()); + self.last_block_hashes.push_back(block_hash); // ensure we keep at most 64 blocks if self.last_block_hashes.len() > 64 { self.last_block_hashes.pop_front(); diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index b5a520728e5..0b9b7d9f821 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -1,7 +1,7 @@ //! Engine tree configuration. /// Triggers persistence when the number of canonical blocks in memory exceeds this threshold. -pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 0; +pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; /// How close to the canonical head we persist blocks. pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 0; diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 575976e80fe..1f7bc8ff909 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1376,11 +1376,11 @@ where if let Some(new_tip_num) = self.find_disk_reorg()? { self.remove_blocks(new_tip_num) } else if self.should_persist() { - debug!("start persist blocks"); + debug!("should persist blocks"); let blocks_to_persist = self.get_canonical_blocks_to_persist()?; self.persist_blocks(blocks_to_persist); } else { - debug!("start not persist blocks"); + debug!("should not persist blocks"); } } @@ -1736,10 +1736,9 @@ where return false } - // let min_block = self.persistence_state.last_persisted_block.number; - // self.state.tree_state.canonical_block_number().saturating_sub(min_block) > - // self.config.persistence_threshold() - return true + let min_block = self.persistence_state.last_persisted_block.number; + self.state.tree_state.canonical_block_number().saturating_sub(min_block) > + self.config.persistence_threshold() } /// Returns a batch of consecutive canonical blocks to persist in the range @@ -1854,8 +1853,10 @@ where let header = self.state.tree_state.sealed_header_by_hash(&hash); if header.is_some() { + debug!(target: "engine::tree", "found in memory Sealed block with hash {hash:?}"); Ok(header) } else { + debug!(target: "engine::tree", "found in disk Sealed block with hash {hash:?}"); self.provider.sealed_header_by_hash(hash) } } @@ -2504,7 +2505,7 @@ where // We now assume that we already have this block in the tree. However, we need to // run the conversion to ensure that the block hash is valid. convert_to_block(self, input)?; - + debug!(target: "engine::tree", block=?block_num_hash, "block already seen"); // X Layer: Even if block is already seen, update timing metrics if it was built locally // Block was built locally but already exists in tree // Set insert timing to 0 for now, will be updated in event handler if elapsed > 0 @@ -2520,6 +2521,8 @@ where _ => {} }; + debug!(target: "engine::tree", block=?block_num_hash, "block not already seen"); + // Ensure that the parent state is available. match self.state_provider_builder(block_id.parent) { Err(err) => { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 0af95a93140..9d3d2a080d7 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -28,7 +28,7 @@ use alloy_eips::BlockHashOrNumber; use alloy_primitives::{ keccak256, map::{hash_map, B256Map, HashMap, HashSet}, - Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, + Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256,StorageKey,StorageValue }; use itertools::Itertools; use rayon::slice::ParallelSliceMut; @@ -57,7 +57,7 @@ use reth_prune_types::{ use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ - BlockBodyIndicesProvider, BlockBodyReader, NodePrimitivesProvider, StateProvider, + BlockBodyIndicesProvider, BlockBodyReader, NodePrimitivesProvider, PlainPostState, StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::ProviderResult; @@ -84,6 +84,12 @@ use std::{ sync::Arc, }; use tracing::{debug, trace}; +use alloy_consensus::constants::KECCAK_EMPTY; +use alloy_trie::EMPTY_ROOT_HASH; +use triedb::{ + account::Account as TrieDBAccount, + path::{AddressPath, StoragePath}, +}; /// A [`DatabaseProvider`] that holds a read-only database transaction. pub type DatabaseProviderRO = DatabaseProvider<::TX, N>; @@ -298,10 +304,32 @@ impl DatabaseProvider()); + storage_map.insert(slot_b256, storage_slot.present_value); + } + } + } + let block_number = recovered_block.number(); self.insert_block(Arc::unwrap_or_clone(recovered_block))?; @@ -324,6 +352,64 @@ impl DatabaseProvider().as_slice() + ); + tx.set_storage_slot(storage_path, Some(storage_value_triedb)) + .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to set triedb storage slot: {e:?}")))?; + } + } + } + + // Then, write accounts (storage roots will be computed automatically by triedb) + for (address, account_opt) in &merged_plain_state.accounts { + let address_path = AddressPath::for_address(*address); + + if let Some(account) = account_opt { + // Account exists or is being updated + // Storage root will be computed from the storage we just wrote + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, // Will be computed from storage + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + ); + tx.set_account(address_path, Some(trie_account)) + .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to set triedb account: {e:?}")))?; + } else { + // Account is being destroyed + tx.set_account(address_path, None) + .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to delete triedb account: {e:?}")))?; + } + } + + // Commit the triedb transaction + tx.commit() + .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to commit triedb transaction: {e:?}")))?; + } + debug!(target: "providers::db", range = ?first_number..=last_block_number, "Appended block data"); Ok(()) diff --git a/triedb.md b/triedb.md index 2c90adbeb0a..3eea2b425ca 100644 --- a/triedb.md +++ b/triedb.md @@ -3,4 +3,6 @@ rm -rf ~/Library/Application\ Support/reth/dev && rm -rf logs \ -vvvv \ --log.file.filter debug \ --log.file.directory /Users/cliffyang/dev/okx/reth/logs \ - --log.file.name op-reth.log \ No newline at end of file + --log.file.name op-reth.log + +cast send 0x33f34d8b20696780ba07b1ea89f209b4dc51723a --value 1ether --private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 --rpc-url http://localhost:8545 --gas-price 1000gwei \ No newline at end of file From 636e641c14eb85840096568a08a10310a3ce8034 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 22 Dec 2025 10:16:32 +0800 Subject: [PATCH 21/36] only calculate using triedb --- crates/evm/evm/src/execute.rs | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 489da87fb3a..3dbd461f327 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -518,10 +518,10 @@ where // calculate the state root let hashed_state = state.hashed_post_state(&db.bundle_state); - // Calculate state root using the previous method (mdbx) - let (mdbx_state_root, mdbx_trie_updates) = state - .state_root_with_updates(hashed_state.clone()) - .map_err(BlockExecutionError::other)?; + // // Calculate state root using the previous method (mdbx) + // let (mdbx_state_root, mdbx_trie_updates) = state + // .state_root_with_updates(hashed_state.clone()) + // .map_err(BlockExecutionError::other)?; // Convert BundleState to PlainPostState for triedb computation let mut plain_state = PlainPostState::default(); @@ -551,19 +551,19 @@ where let (triedb_state_root, triedb_trie_updates) = pr.map_err(BlockExecutionError::other)?; - // Compare the two state roots - if mdbx_state_root != triedb_state_root { - tracing::debug!( - "reth::evm - State root mismatch! MDBX: {:?}, TrieDB: {:?}", - mdbx_state_root, - triedb_state_root - ); - } else { - tracing::debug!( - "reth::evm - State root match: {:?}", - triedb_state_root - ); - } + // // Compare the two state roots + // if mdbx_state_root != triedb_state_root { + // tracing::debug!( + // "reth::evm - State root mismatch! MDBX: {:?}, TrieDB: {:?}", + // mdbx_state_root, + // triedb_state_root + // ); + // } else { + // tracing::debug!( + // "reth::evm - State root match: {:?}", + // triedb_state_root + // ); + // } // Use triedb state root (or you can choose to use mdbx_state_root) let state_root = triedb_state_root; From cf50c8f1968679e664cb332e066fa2c7d1b12469 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 22 Dec 2025 10:33:31 +0800 Subject: [PATCH 22/36] merge with dev --- Cargo.lock | 71 ++--- crates/storage/db-common/src/init.rs | 8 +- crates/storage/db-common/src/lib.rs | 2 - crates/trie/trie/src/lib.rs | 4 +- crates/trie/trie/src/trie_ext.rs | 458 +++++++++++++-------------- 5 files changed, 256 insertions(+), 287 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 13bc3de85c4..5fd42ca6fe5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -119,7 +119,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-serde", - "alloy-trie 0.9.1", + "alloy-trie", "alloy-tx-macros", "arbitrary", "auto_impl", @@ -291,7 +291,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-serde", - "alloy-trie 0.9.1", + "alloy-trie", "borsh", "serde", "serde_with", @@ -885,26 +885,6 @@ dependencies = [ "ws_stream_wasm", ] -[[package]] -name = "alloy-trie" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "983d99aa81f586cef9dae38443245e585840fcf0fc58b09aee0b1f27aed1d500" -dependencies = [ - "alloy-primitives", - "alloy-rlp", - "arbitrary", - "arrayvec", - "derive_arbitrary", - "derive_more", - "nybbles 0.3.4", - "proptest", - "proptest-derive 0.5.1", - "serde", - "smallvec", - "tracing", -] - [[package]] name = "alloy-trie" version = "0.9.1" @@ -6273,7 +6253,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" dependencies = [ "alloy-rlp", - "arbitrary", "const-hex", "proptest", "serde", @@ -6334,9 +6313,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "op-alloy-consensus" -version = "0.22.0" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e42e9de945efe3c2fbd207e69720c9c1af2b8caa6872aee0e216450c25a3ca70" +checksum = "726da827358a547be9f1e37c2a756b9e3729cb0350f43408164794b370cad8ae" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6360,9 +6339,9 @@ checksum = "a79f352fc3893dcd670172e615afef993a41798a1d3fc0db88a3e60ef2e70ecc" [[package]] name = "op-alloy-network" -version = "0.22.0" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9da49a2812a0189dd05e81e4418c3ae13fd607a92654107f02ebad8e91ed9e" +checksum = "f63f27e65be273ec8fcb0b6af0fd850b550979465ab93423705ceb3dfddbd2ab" dependencies = [ "alloy-consensus", "alloy-network", @@ -6376,9 +6355,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.22.0" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ceb771ab9323647093ea2e58dc7f25289a1b95cbef2faa2620f6ca2dee4d9" +checksum = "8ef9114426b16172254555aad34a8ea96c01895e40da92f5d12ea680a1baeaa7" dependencies = [ "alloy-primitives", "jsonrpsee", @@ -6386,9 +6365,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.22.0" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd1eb7bddd2232856ba9d259320a094f9edf2b9061acfe5966e7960208393e6" +checksum = "562dd4462562c41f9fdc4d860858c40e14a25df7f983ae82047f15f08fce4d19" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6406,9 +6385,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.22.0" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5429622150d18d8e6847a701135082622413e2451b64d03f979415d764566bef" +checksum = "d8f24b8cb66e4b33e6c9e508bf46b8ecafc92eadd0b93fedd306c0accb477657" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7841,7 +7820,7 @@ dependencies = [ "alloy-genesis", "alloy-primitives", "alloy-rlp", - "alloy-trie 0.9.1", + "alloy-trie", "auto_impl", "derive_more", "reth-ethereum-forks", @@ -7983,7 +7962,7 @@ dependencies = [ "alloy-eips", "alloy-genesis", "alloy-primitives", - "alloy-trie 0.9.1", + "alloy-trie", "arbitrary", "bytes", "modular-bitfield", @@ -8148,7 +8127,7 @@ dependencies = [ "alloy-consensus", "alloy-genesis", "alloy-primitives", - "alloy-trie 0.9.1", + "alloy-trie", "boyer-moore-magiclen", "codspeed-criterion-compat", "eyre", @@ -9758,7 +9737,7 @@ dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", - "alloy-trie 0.9.1", + "alloy-trie", "op-alloy-consensus", "reth-chainspec", "reth-consensus", @@ -10207,7 +10186,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-rpc-types-eth", - "alloy-trie 0.9.1", + "alloy-trie", "arbitrary", "auto_impl", "bincode 1.3.3", @@ -10242,7 +10221,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", - "alloy-trie 0.9.1", + "alloy-trie", "assert_matches", "dashmap 6.1.0", "eyre", @@ -10914,7 +10893,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", - "alloy-trie 0.9.1", + "alloy-trie", "itertools 0.14.0", "k256", "reth-chainspec", @@ -11165,7 +11144,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", - "alloy-trie 0.9.1", + "alloy-trie", "assert_matches", "auto_impl", "codspeed-criterion-compat", @@ -11206,7 +11185,7 @@ dependencies = [ "alloy-rlp", "alloy-rpc-types-eth", "alloy-serde", - "alloy-trie 0.9.1", + "alloy-trie", "arbitrary", "arrayvec", "bincode 1.3.3", @@ -11294,7 +11273,7 @@ version = "1.9.2" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-trie 0.9.1", + "alloy-trie", "arbitrary", "assert_matches", "auto_impl", @@ -11327,7 +11306,7 @@ version = "1.9.2" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-trie 0.9.1", + "alloy-trie", "arbitrary", "assert_matches", "itertools 0.14.0", @@ -13547,11 +13526,11 @@ dependencies = [ [[package]] name = "triedb" version = "0.1.0" -source = "git+https://github.com/base/triedb.git#ee4e382f1a1aa0c773d5707156d85e170bfab488" +source = "git+https://github.com/base/triedb.git#cedd1a33084ddb2724240193c39df3fbdec1dba0" dependencies = [ "alloy-primitives", "alloy-rlp", - "alloy-trie 0.8.1", + "alloy-trie", "arrayvec", "fxhash", "memmap2", diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index dc98a1aeb42..6c6f48fb67e 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -163,13 +163,7 @@ where insert_genesis_state(&provider_rw, alloc.iter())?; - // compute state root to populate trie tables - // #[cfg(feature = "trie-db-ext")] - // { - - // } - // #[cfg(not(feature = "trie-db-ext"))] - let ret = compute_state_root(&provider_rw, None)?; + // let ret = compute_state_root(&provider_rw, None)?; // Calculate state root using triedb match compute_state_root_triedb(alloc.iter()) { diff --git a/crates/storage/db-common/src/lib.rs b/crates/storage/db-common/src/lib.rs index 307476a518f..5080687ff98 100644 --- a/crates/storage/db-common/src/lib.rs +++ b/crates/storage/db-common/src/lib.rs @@ -11,7 +11,5 @@ pub mod init; mod db_tool; -// #[cfg(feature = "trie-db-ext")] -pub mod init_triedb; pub use db_tool::*; diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index 2f7c681e674..62d3ff1a5e7 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -65,6 +65,4 @@ pub mod test_utils; pub mod mock; /// Verification of existing stored trie nodes against state data. -pub mod verify; -mod trie_ext; -pub use trie_ext::{StateRootTrieDb, TrieExtDatabase}; \ No newline at end of file +pub mod verify; \ No newline at end of file diff --git a/crates/trie/trie/src/trie_ext.rs b/crates/trie/trie/src/trie_ext.rs index 67ca71b057a..529f00ab673 100644 --- a/crates/trie/trie/src/trie_ext.rs +++ b/crates/trie/trie/src/trie_ext.rs @@ -1,229 +1,229 @@ -use std::path::Path; -use std::time::Instant; -use alloy_primitives::B256; -use alloy_trie::{HashBuilder, EMPTY_ROOT_HASH}; -use tracing::{debug, trace}; -use reth_execution_errors::StateRootError; -use reth_trie_common::{prefix_set::TriePrefixSets}; -use crate::{IntermediateStateRootState, StateRoot, StateRootProgress, StorageRoot}; -use crate::hashed_cursor::{HashedCursor, HashedCursorFactory}; -use crate::node_iter::{TrieElement, TrieNodeIter}; -use crate::stats::TrieTracker; -use crate::trie::StateRootContext; -use crate::trie_cursor::TrieCursorFactory; -use crate::walker::TrieWalker; -use triedb::{Database as TrieDbDatabase, path::{AddressPath, StoragePath}, }; -use nybbles::Nibbles; -use triedb::account::Account as TrieDbAccount; -use alloy_consensus::constants::KECCAK_EMPTY; -#[derive(Debug)] -pub struct TrieExtDatabase { - pub inner: TrieDbDatabase, -} - -impl TrieExtDatabase { - pub fn new(db_path: impl AsRef) -> Self { - let db_path = db_path.as_ref(); - let db = TrieDbDatabase::create_new(db_path).unwrap(); - Self { - inner: db, - } - } -} - -/// `StateRoot` is used to compute the root node of a state trie. -#[derive(Debug)] -pub struct StateRootTrieDb { - /// The factory for hashed cursors. - pub hashed_cursor_factory: H, - pub db: TrieExtDatabase -} - -impl StateRootTrieDb { - /// Creates [`StateRootTrieDb`] with - pub fn new(hashed_cursor_factory: H, db: TrieExtDatabase) -> Self { - Self { - hashed_cursor_factory, - db - } - } -} -impl StateRootTrieDb -where - H: HashedCursorFactory + Clone, -{ - pub fn calculate_commit(self) -> Result { - trace!(target: "trie::state_root", "calculating state root"); - - let mut acct_cursor = self.hashed_cursor_factory.hashed_account_cursor()?; - - let mut tx = self.db.inner.begin_rw().unwrap(); - - // Start from the beginning by seeking to the first account (B256::ZERO) - let mut account_entry = acct_cursor.next().unwrap(); - while let Some((hashed_address, account)) = account_entry { - - let nibbles = Nibbles::unpack(hashed_address); - let address_path = AddressPath::new(nibbles); - - // Get storage cursor for this account first - let mut storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor(hashed_address)?; - - // Iterate through all storage entries for this account to compute storage root - // For now, we'll use EMPTY_ROOT_HASH if no storage entries exist - // TODO: Compute actual storage root from storage entries - let mut storage_entry = storage_cursor.seek(B256::ZERO)?; - let storage_root = if storage_entry.is_some() { - // If there are storage entries, we need to compute the storage root - // For now, use EMPTY_ROOT_HASH as placeholder - // In a full implementation, you'd build the storage trie and get its root - EMPTY_ROOT_HASH - } else { - EMPTY_ROOT_HASH - }; - - // Convert reth_primitives_traits::Account to triedb::account::Account - let triedb_account = TrieDbAccount { - nonce: account.nonce, - balance: account.balance, - code_hash: account.bytecode_hash.unwrap_or(KECCAK_EMPTY), - storage_root, - }; - - tx.set_account(address_path.clone(), Some(triedb_account)).unwrap(); - - // Now set storage slots in TrieDB - while let Some((hashed_storage_key, storage_value)) = storage_entry { - let storage_path = StoragePath::for_address_path_and_slot_hash(address_path.clone(), Nibbles::unpack(hashed_storage_key)); - tx.set_storage_slot(storage_path, Some(storage_value)).unwrap(); - - storage_entry = storage_cursor.next()?; - } - - account_entry = acct_cursor.next()?; - } -let start_commit = Instant::now(); - tx.commit().unwrap(); - println!("commit elapsed: {:?}", start_commit.elapsed()); - Ok(self.db.inner.state_root()) - } -} - -#[cfg(test)] -mod tests { - use tempdir::TempDir; - use super::{TrieExtDatabase}; - use crate::hashed_cursor::{HashedCursor, HashedCursorFactory}; - use reth_provider::{ - test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, - ProviderFactory, HashingWriter, DBProvider - }; - use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA}; - use reth_provider::DatabaseProviderFactory; - use reth_trie_db::DatabaseHashedCursorFactory; - use alloy_primitives::{Address, U256, keccak256, B256}; - use reth_primitives_traits::Account; - - #[test] - pub fn test_triedb() { - let tmp_dir = TempDir::new("test_triedb").unwrap(); - let file_path = tmp_dir.path().join("test.db"); - let trie_db = TrieExtDatabase::new(file_path); - - let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); - - let mut provider_rw = provider_factory.database_provider_rw().unwrap(); - - // Generate dummy accounts - let dummy_accounts: Vec<(Address, Account)> = vec![ - ( - Address::with_last_byte(1), - Account { - nonce: 10, - balance: U256::from(1000), - bytecode_hash: None, - }, - ), - ( - Address::with_last_byte(2), - Account { - nonce: 20, - balance: U256::from(2000), - bytecode_hash: None, - }, - ), - ( - Address::with_last_byte(3), - Account { - nonce: 30, - balance: U256::from(3000), - bytecode_hash: None, - }, - ), - ]; - - // Insert accounts into the database - let accounts_for_hashing = dummy_accounts - .iter() - .map(|(address, account)| (*address, Some(*account))); - - provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); - - // Commit the transaction (this consumes provider_rw) - provider_rw.commit().unwrap(); - - // Get a new provider to read the committed data - let provider_rw = provider_factory.database_provider_rw().unwrap(); - let tx = provider_rw.tx_ref(); - let hashed_cursor_factory = DatabaseHashedCursorFactory::new(tx); - println!("hashed cursor factory: {:?}", hashed_cursor_factory); - let mut account_cursor = hashed_cursor_factory.hashed_account_cursor().unwrap(); - // - // // Start from the beginning (seek to B256::ZERO to get the first account) - // let mut account_entry = account_cursor.seek(B256::ZERO).unwrap(); - // - // let mut iterated_accounts = Vec::new(); - // - // // Iterate through all accounts - // while let Some((hashed_address, account)) = account_entry { - // iterated_accounts.push((hashed_address, account)); - // - // // Move to next account - // account_entry = account_cursor.next().unwrap(); - // } - // - // // Verify we got all the accounts we inserted - // assert_eq!(iterated_accounts.len(), dummy_accounts.len()); - // - // // Verify the accounts match (by checking hashed addresses) - // let inserted_hashed_addresses: Vec = dummy_accounts - // .iter() - // .map(|(address, _)| keccak256(address)) - // .collect(); - // - // let iterated_hashed_addresses: Vec = iterated_accounts - // .iter() - // .map(|(hashed_address, _)| *hashed_address) - // .collect(); - // - // // Sort both for comparison - // let mut inserted_sorted = inserted_hashed_addresses.clone(); - // inserted_sorted.sort(); - // let mut iterated_sorted = iterated_hashed_addresses.clone(); - // iterated_sorted.sort(); - // - // assert_eq!(inserted_sorted, iterated_sorted); - // - // // Verify account data matches - // for (hashed_address, account) in &iterated_accounts { - // let original_account = dummy_accounts - // .iter() - // .find(|(addr, _)| keccak256(addr) == *hashed_address) - // .unwrap(); - // - // assert_eq!(account.nonce, original_account.1.nonce); - // assert_eq!(account.balance, original_account.1.balance); - // assert_eq!(account.bytecode_hash, original_account.1.bytecode_hash); - // } - } -} \ No newline at end of file +// use std::path::Path; +// use std::time::Instant; +// use alloy_primitives::B256; +// use alloy_trie::{HashBuilder, EMPTY_ROOT_HASH}; +// use tracing::{debug, trace}; +// use reth_execution_errors::StateRootError; +// use reth_trie_common::{prefix_set::TriePrefixSets}; +// use crate::{IntermediateStateRootState, StateRoot, StateRootProgress, StorageRoot}; +// use crate::hashed_cursor::{HashedCursor, HashedCursorFactory}; +// use crate::node_iter::{TrieElement, TrieNodeIter}; +// use crate::stats::TrieTracker; +// use crate::trie::StateRootContext; +// use crate::trie_cursor::TrieCursorFactory; +// use crate::walker::TrieWalker; +// use triedb::{Database as TrieDbDatabase, path::{AddressPath, StoragePath}, }; +// use nybbles::Nibbles; +// use triedb::account::Account as TrieDbAccount; +// use alloy_consensus::constants::KECCAK_EMPTY; +// #[derive(Debug)] +// pub struct TrieExtDatabase { +// pub inner: TrieDbDatabase, +// } +// +// impl TrieExtDatabase { +// pub fn new(db_path: impl AsRef) -> Self { +// let db_path = db_path.as_ref(); +// let db = TrieDbDatabase::create_new(db_path).unwrap(); +// Self { +// inner: db, +// } +// } +// } +// +// /// `StateRoot` is used to compute the root node of a state trie. +// #[derive(Debug)] +// pub struct StateRootTrieDb { +// /// The factory for hashed cursors. +// pub hashed_cursor_factory: H, +// pub db: TrieExtDatabase +// } +// +// impl StateRootTrieDb { +// /// Creates [`StateRootTrieDb`] with +// pub fn new(hashed_cursor_factory: H, db: TrieExtDatabase) -> Self { +// Self { +// hashed_cursor_factory, +// db +// } +// } +// } +// impl StateRootTrieDb +// where +// H: HashedCursorFactory + Clone, +// { +// pub fn calculate_commit(self) -> Result { +// trace!(target: "trie::state_root", "calculating state root"); +// +// let mut acct_cursor = self.hashed_cursor_factory.hashed_account_cursor()?; +// +// let mut tx = self.db.inner.begin_rw().unwrap(); +// +// // Start from the beginning by seeking to the first account (B256::ZERO) +// let mut account_entry = acct_cursor.next().unwrap(); +// while let Some((hashed_address, account)) = account_entry { +// +// let nibbles = Nibbles::unpack(hashed_address); +// let address_path = AddressPath::new(nibbles); +// +// // Get storage cursor for this account first +// let mut storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor(hashed_address)?; +// +// // Iterate through all storage entries for this account to compute storage root +// // For now, we'll use EMPTY_ROOT_HASH if no storage entries exist +// // TODO: Compute actual storage root from storage entries +// let mut storage_entry = storage_cursor.seek(B256::ZERO)?; +// let storage_root = if storage_entry.is_some() { +// // If there are storage entries, we need to compute the storage root +// // For now, use EMPTY_ROOT_HASH as placeholder +// // In a full implementation, you'd build the storage trie and get its root +// EMPTY_ROOT_HASH +// } else { +// EMPTY_ROOT_HASH +// }; +// +// // Convert reth_primitives_traits::Account to triedb::account::Account +// let triedb_account = TrieDbAccount { +// nonce: account.nonce, +// balance: account.balance, +// code_hash: account.bytecode_hash.unwrap_or(KECCAK_EMPTY), +// storage_root, +// }; +// +// tx.set_account(address_path.clone(), Some(triedb_account)).unwrap(); +// +// // Now set storage slots in TrieDB +// while let Some((hashed_storage_key, storage_value)) = storage_entry { +// let storage_path = StoragePath::for_address_path_and_slot_hash(address_path.clone(), Nibbles::unpack(hashed_storage_key)); +// tx.set_storage_slot(storage_path, Some(storage_value)).unwrap(); +// +// storage_entry = storage_cursor.next()?; +// } +// +// account_entry = acct_cursor.next()?; +// } +// let start_commit = Instant::now(); +// tx.commit().unwrap(); +// println!("commit elapsed: {:?}", start_commit.elapsed()); +// Ok(self.db.inner.state_root()) +// } +// } +// +// #[cfg(test)] +// mod tests { +// use tempdir::TempDir; +// use super::{TrieExtDatabase}; +// use crate::hashed_cursor::{HashedCursor, HashedCursorFactory}; +// use reth_provider::{ +// test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, +// ProviderFactory, HashingWriter, DBProvider +// }; +// use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA}; +// use reth_provider::DatabaseProviderFactory; +// use reth_trie_db::DatabaseHashedCursorFactory; +// use alloy_primitives::{Address, U256, keccak256, B256}; +// use reth_primitives_traits::Account; +// +// #[test] +// pub fn test_triedb() { +// let tmp_dir = TempDir::new("test_triedb").unwrap(); +// let file_path = tmp_dir.path().join("test.db"); +// let trie_db = TrieExtDatabase::new(file_path); +// +// let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); +// +// let mut provider_rw = provider_factory.database_provider_rw().unwrap(); +// +// // Generate dummy accounts +// let dummy_accounts: Vec<(Address, Account)> = vec![ +// ( +// Address::with_last_byte(1), +// Account { +// nonce: 10, +// balance: U256::from(1000), +// bytecode_hash: None, +// }, +// ), +// ( +// Address::with_last_byte(2), +// Account { +// nonce: 20, +// balance: U256::from(2000), +// bytecode_hash: None, +// }, +// ), +// ( +// Address::with_last_byte(3), +// Account { +// nonce: 30, +// balance: U256::from(3000), +// bytecode_hash: None, +// }, +// ), +// ]; +// +// // Insert accounts into the database +// let accounts_for_hashing = dummy_accounts +// .iter() +// .map(|(address, account)| (*address, Some(*account))); +// +// provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); +// +// // Commit the transaction (this consumes provider_rw) +// provider_rw.commit().unwrap(); +// +// // Get a new provider to read the committed data +// let provider_rw = provider_factory.database_provider_rw().unwrap(); +// let tx = provider_rw.tx_ref(); +// let hashed_cursor_factory = DatabaseHashedCursorFactory::new(tx); +// println!("hashed cursor factory: {:?}", hashed_cursor_factory); +// let mut account_cursor = hashed_cursor_factory.hashed_account_cursor().unwrap(); +// // +// // // Start from the beginning (seek to B256::ZERO to get the first account) +// // let mut account_entry = account_cursor.seek(B256::ZERO).unwrap(); +// // +// // let mut iterated_accounts = Vec::new(); +// // +// // // Iterate through all accounts +// // while let Some((hashed_address, account)) = account_entry { +// // iterated_accounts.push((hashed_address, account)); +// // +// // // Move to next account +// // account_entry = account_cursor.next().unwrap(); +// // } +// // +// // // Verify we got all the accounts we inserted +// // assert_eq!(iterated_accounts.len(), dummy_accounts.len()); +// // +// // // Verify the accounts match (by checking hashed addresses) +// // let inserted_hashed_addresses: Vec = dummy_accounts +// // .iter() +// // .map(|(address, _)| keccak256(address)) +// // .collect(); +// // +// // let iterated_hashed_addresses: Vec = iterated_accounts +// // .iter() +// // .map(|(hashed_address, _)| *hashed_address) +// // .collect(); +// // +// // // Sort both for comparison +// // let mut inserted_sorted = inserted_hashed_addresses.clone(); +// // inserted_sorted.sort(); +// // let mut iterated_sorted = iterated_hashed_addresses.clone(); +// // iterated_sorted.sort(); +// // +// // assert_eq!(inserted_sorted, iterated_sorted); +// // +// // // Verify account data matches +// // for (hashed_address, account) in &iterated_accounts { +// // let original_account = dummy_accounts +// // .iter() +// // .find(|(addr, _)| keccak256(addr) == *hashed_address) +// // .unwrap(); +// // +// // assert_eq!(account.nonce, original_account.1.nonce); +// // assert_eq!(account.balance, original_account.1.balance); +// // assert_eq!(account.bytecode_hash, original_account.1.bytecode_hash); +// // } +// } +// } \ No newline at end of file From c8a428f0c1e5734f2cfaba74bdbf55ef5c5a476c Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 22 Dec 2025 10:44:40 +0800 Subject: [PATCH 23/36] refactoring --- Cargo.toml | 4 ++-- crates/payload/basic/src/lib.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 66e418d115f..4543aeebcf5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -311,8 +311,8 @@ lto = "thin" [profile.release] opt-level = 3 lto = "thin" -debug = 1 -strip = "none" +debug = "none" +strip = "symbols" panic = "unwind" codegen-units = 16 diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 44c5f0f1a13..aa2b1f66802 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -520,7 +520,7 @@ where empty_payload: empty_payload.filter(|_| kind != PayloadKind::WaitForPending), }; - (fut, KeepPayloadJobAlive::Yes) + (fut, KeepPayloadJobAlive::No) } } From b12426ea4d260a41fc922b6064b59b20d6289bad Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 22 Dec 2025 14:41:03 +0800 Subject: [PATCH 24/36] update triedb log --- crates/optimism/bin/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index 61a47ebee9b..2501c0630be 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -27,7 +27,7 @@ fn main() { if let Err(err) = Cli::::parse().run(async move |builder, rollup_args| { - info!(target: "reth::cli", "Launching node"); + info!(target: "reth::cli", "Launching node triedb"); // For X Layer if rollup_args.xlayer_args.apollo.enabled { From b5a6274754c5b2673617684427d8e2f7eaf1a26e Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 22 Dec 2025 15:40:43 +0800 Subject: [PATCH 25/36] add min fee log --- crates/optimism/payload/src/payload.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 41b825a2b72..2f2c7cc72fa 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -28,6 +28,7 @@ use reth_primitives_traits::{ /// Re-export for use in downstream arguments. pub use op_alloy_rpc_types_engine::OpPayloadAttributes; +use tracing::info; use reth_optimism_primitives::OpPrimitives; /// Optimism Payload Builder Attributes @@ -412,6 +413,16 @@ where chain_spec: &ChainSpec, ) -> Result { let extra_data = if chain_spec.is_jovian_active_at_timestamp(attributes.timestamp()) { + // Log min_base_fee from attributes + if let Some(min_base_fee) = attributes.min_base_fee { + info!( + target: "payload_builder", + parent_number = parent.number(), + timestamp = attributes.timestamp(), + min_base_fee = min_base_fee, + "Using min_base_fee from payload attributes for Jovian block" + ); + } attributes .get_jovian_extra_data( chain_spec.base_fee_params_at_timestamp(attributes.timestamp()), From 16376a8ef963a80cb79c60c794ad02db6396af33 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 22 Dec 2025 15:52:13 +0800 Subject: [PATCH 26/36] add block base fee log --- Cargo.lock | 1 + crates/optimism/evm/Cargo.toml | 2 +- crates/optimism/evm/src/lib.rs | 11 +++++++++++ crates/optimism/payload/src/payload.rs | 10 ---------- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82bd9499b4e..bdaeadeef46 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9739,6 +9739,7 @@ dependencies = [ "reth-storage-errors", "revm", "thiserror 2.0.17", + "tracing", ] [[package]] diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index d7bbe29330f..c94de8db4f7 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -20,7 +20,7 @@ reth-execution-types.workspace = true reth-storage-errors.workspace = true reth-rpc-eth-api = { workspace = true, optional = true } - +tracing.workspace = true # ethereum alloy-eips.workspace = true alloy-evm.workspace = true diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index e5df16ee2e7..f974fc2f684 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -162,6 +162,17 @@ where parent: &Header, attributes: &Self::NextBlockEnvCtx, ) -> Result, Self::Error> { + let base_fee = self.chain_spec().next_block_base_fee(parent, attributes.timestamp).unwrap_or_default(); + + tracing::info!( + target: "evm::op", + parent_number = parent.number(), + parent_base_fee = parent.base_fee_per_gas().unwrap_or_default(), + next_block_base_fee = base_fee, + timestamp = attributes.timestamp, + gas_limit = attributes.gas_limit, + "Setting base fee for next block EVM environment" + ); Ok(EvmEnv::for_op_next_block( parent, NextEvmEnvAttributes { diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 2f2c7cc72fa..490e3606661 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -413,16 +413,6 @@ where chain_spec: &ChainSpec, ) -> Result { let extra_data = if chain_spec.is_jovian_active_at_timestamp(attributes.timestamp()) { - // Log min_base_fee from attributes - if let Some(min_base_fee) = attributes.min_base_fee { - info!( - target: "payload_builder", - parent_number = parent.number(), - timestamp = attributes.timestamp(), - min_base_fee = min_base_fee, - "Using min_base_fee from payload attributes for Jovian block" - ); - } attributes .get_jovian_extra_data( chain_spec.base_fee_params_at_timestamp(attributes.timestamp()), From 0aa95b4cfea9c1c7041db67a13dd6c19fe8b02d8 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 22 Dec 2025 19:23:52 +0800 Subject: [PATCH 27/36] merge with latest dev --- Cargo.lock | 115 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 112 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bdaeadeef46..756ec5b4aff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -897,7 +897,7 @@ dependencies = [ "arrayvec", "derive_arbitrary", "derive_more", - "nybbles", + "nybbles 0.4.6", "proptest", "proptest-derive 0.5.1", "serde", @@ -4233,6 +4233,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "gcc" version = "0.3.55" @@ -6238,6 +6247,19 @@ dependencies = [ "libc", ] +[[package]] +name = "nybbles" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +dependencies = [ + "alloy-rlp", + "const-hex", + "proptest", + "serde", + "smallvec", +] + [[package]] name = "nybbles" version = "0.4.6" @@ -7501,6 +7523,15 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + [[package]] name = "reqwest" version = "0.11.27" @@ -7774,6 +7805,7 @@ dependencies = [ "tokio", "tokio-stream", "tracing", + "triedb", ] [[package]] @@ -8094,8 +8126,11 @@ dependencies = [ "alloy-consensus", "alloy-genesis", "alloy-primitives", + "alloy-trie", "boyer-moore-magiclen", + "codspeed-criterion-compat", "eyre", + "rand 0.8.5", "reth-chainspec", "reth-codecs", "reth-config", @@ -8109,12 +8144,16 @@ dependencies = [ "reth-provider", "reth-stages-types", "reth-static-file-types", + "reth-storage-api", "reth-trie", + "reth-trie-common", "reth-trie-db", "serde", "serde_json", + "tempdir", "thiserror 2.0.17", "tracing", + "triedb", ] [[package]] @@ -8351,7 +8390,9 @@ dependencies = [ "reth-engine-primitives", "reth-ethereum-engine-primitives", "reth-optimism-chainspec", + "reth-optimism-forks", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-storage-api", "reth-transaction-pool", @@ -8423,6 +8464,8 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-rpc-types-engine", + "alloy-signer", + "alloy-signer-local", "assert_matches", "codspeed-criterion-compat", "crossbeam-channel", @@ -8455,6 +8498,7 @@ dependencies = [ "reth-exex-types", "reth-metrics", "reth-network-p2p", + "reth-node-core", "reth-node-ethereum", "reth-node-metrics", "reth-payload-builder", @@ -8467,10 +8511,12 @@ dependencies = [ "reth-stages", "reth-stages-api", "reth-static-file", + "reth-storage-api", "reth-tasks", "reth-testing-utils", "reth-tracing", "reth-trie", + "reth-trie-common", "reth-trie-parallel", "reth-trie-sparse", "reth-trie-sparse-parallel", @@ -8483,6 +8529,7 @@ dependencies = [ "thiserror 2.0.17", "tokio", "tracing", + "triedb", "xlayer-db", ] @@ -8850,6 +8897,7 @@ dependencies = [ "reth-storage-errors", "reth-trie-common", "revm", + "tracing", ] [[package]] @@ -8884,7 +8932,7 @@ dependencies = [ "alloy-evm", "alloy-primitives", "alloy-rlp", - "nybbles", + "nybbles 0.4.6", "reth-storage-errors", "thiserror 2.0.17", ] @@ -9796,11 +9844,13 @@ name = "reth-optimism-node" version = "1.9.3" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-genesis", "alloy-network", "alloy-primitives", "alloy-rpc-types-engine", "alloy-rpc-types-eth", + "alloy-sol-types", "clap", "eyre", "futures", @@ -9830,6 +9880,7 @@ dependencies = [ "reth-optimism-storage", "reth-optimism-txpool", "reth-payload-builder", + "reth-payload-primitives", "reth-payload-util", "reth-primitives-traits", "reth-provider", @@ -10172,6 +10223,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", + "alloy-trie", "assert_matches", "dashmap 6.1.0", "eyre", @@ -10207,9 +10259,11 @@ dependencies = [ "revm-database-interface", "revm-state", "strum 0.27.2", + "tempdir", "tempfile", "tokio", "tracing", + "triedb", ] [[package]] @@ -10696,6 +10750,7 @@ dependencies = [ "tokio", "tokio-stream", "tracing", + "triedb", ] [[package]] @@ -10915,6 +10970,7 @@ dependencies = [ "reth-storage-errors", "reth-trie-common", "revm-database", + "triedb", ] [[package]] @@ -11097,22 +11153,28 @@ dependencies = [ "codspeed-criterion-compat", "itertools 0.14.0", "metrics", + "nybbles 0.3.4", + "nybbles 0.4.6", "parking_lot", "pretty_assertions", "proptest", "proptest-arbitrary-interop", + "reth-chainspec", "reth-ethereum-primitives", "reth-execution-errors", "reth-metrics", "reth-primitives-traits", + "reth-provider", "reth-stages-types", "reth-storage-errors", "reth-tracing", "reth-trie-common", + "reth-trie-db", "reth-trie-sparse", "revm-database", "revm-state", "tracing", + "triedb", "triehash", ] @@ -11135,7 +11197,7 @@ dependencies = [ "derive_more", "hash-db", "itertools 0.14.0", - "nybbles", + "nybbles 0.4.6", "plain_hasher", "proptest", "proptest-arbitrary-interop", @@ -11164,13 +11226,17 @@ dependencies = [ "reth-execution-errors", "reth-primitives-traits", "reth-provider", + "reth-storage-api", "reth-trie", "reth-trie-common", + "reth-trie-db", "revm", "revm-database", "serde_json", "similar-asserts", + "tempdir", "tracing", + "triedb", "triehash", ] @@ -11937,6 +12003,17 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sealed" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22f968c5ea23d555e670b449c1c5e7b2fc399fdaec1d304a17cd48e288abc107" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "sec1" version = "0.7.3" @@ -12698,6 +12775,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "tempdir" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" +dependencies = [ + "rand 0.4.6", + "remove_dir_all", +] + [[package]] name = "tempfile" version = "3.23.0" @@ -13439,6 +13526,28 @@ dependencies = [ "syn 2.0.111", ] +[[package]] +name = "triedb" +version = "0.1.0" +source = "git+https://github.com/base/triedb.git#cedd1a33084ddb2724240193c39df3fbdec1dba0" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-trie", + "arrayvec", + "fxhash", + "memmap2", + "metrics", + "metrics-derive", + "parking_lot", + "proptest", + "proptest-derive 0.6.0", + "rayon", + "sealed", + "static_assertions", + "zerocopy", +] + [[package]] name = "triehash" version = "0.8.4" From 45ae55d907bfe60c610530c8d253a968f9f513eb Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 22 Dec 2025 19:33:07 +0800 Subject: [PATCH 28/36] add triedb state root calc time --- crates/evm/evm/src/execute.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 3dbd461f327..3c92367059d 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -22,11 +22,13 @@ use reth_storage_api::{PlainPostState, StateProvider}; pub use reth_storage_errors::provider::ProviderError; use reth_trie_common::{updates::TrieUpdates, HashedPostState}; use std::collections::HashMap; +use std::time::Instant; use alloy_primitives::U256; use revm::{ context::result::ExecutionResult, database::{states::bundle_state::BundleRetention, BundleState, State}, }; +use tracing::info; /// A type that knows how to execute a block. It is assumed to operate on a /// [`crate::Evm`] internally and use [`State`] as database. @@ -547,9 +549,11 @@ where } // Calculate state root using triedb method + let start = Instant::now(); let pr = state.state_root_with_updates_triedb(plain_state); let (triedb_state_root, triedb_trie_updates) = pr.map_err(BlockExecutionError::other)?; + info!("state_root_with_updates_triedb, elapsed: {:?}", start.elapsed().as_millis()); // // Compare the two state roots // if mdbx_state_root != triedb_state_root { From 3410066e770c65bcae94951176feb205b74578e7 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 22 Dec 2025 19:34:28 +0800 Subject: [PATCH 29/36] add triedb state root calc time --- crates/evm/evm/src/execute.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 3c92367059d..d679fc7d1d7 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -518,7 +518,9 @@ where db.merge_transitions(BundleRetention::Reverts); // calculate the state root + let start = Instant::now(); let hashed_state = state.hashed_post_state(&db.bundle_state); + info!("hashed_post_state, elapsed: {:?}", start.elapsed().as_millis()); // // Calculate state root using the previous method (mdbx) // let (mdbx_state_root, mdbx_trie_updates) = state From 89cd45e9504c7652d6c106a1b569bdf77dd03766 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Tue, 23 Dec 2025 09:52:01 +0800 Subject: [PATCH 30/36] add log --- .../storage/provider/src/providers/state/latest.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 1c24f1cdbbd..5981c452f38 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -8,6 +8,7 @@ use reth_primitives_traits::{Account, Bytecode}; use reth_storage_api::{BytecodeReader, DBProvider, PlainPostState, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::sync::{Arc, OnceLock}; +use std::time::Instant; use reth_trie::{ proof::{Proof, StorageProof}, updates::TrieUpdates, @@ -110,10 +111,10 @@ impl StateRootProvider for LatestStateProviderRef<' &self, plain_state: PlainPostState, ) -> ProviderResult<(B256, TrieUpdates)> { - tracing::debug!("latest_state_provider state_root_with_updates_triedb"); + tracing::info!("latest_state_provider state_root_with_updates_triedb"); let triedb_provider = get_triedb_provider() .ok_or_else(|| ProviderError::UnsupportedProvider)?; - + let start = Instant::now(); let mut overlay_mut = OverlayStateMut::new(); for (address, account_opt) in &plain_state.accounts { @@ -157,16 +158,23 @@ impl StateRootProvider for LatestStateProviderRef<' } let overlay = overlay_mut.freeze(); + let elapsed = start.elapsed().as_millis(); + tracing::info!("latest_state_provider overlay prepare elapsed: {elapsed:?}"); + let start = Instant::now(); let mut tx = triedb_provider.inner.begin_ro() .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to begin triedb transaction: {e:?}")))?; let result = tx.compute_root_with_overlay(overlay) .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to compute triedb root: {e:?}")))?; + let elapsed = start.elapsed().as_millis(); + tracing::info!("latest_state_provider compute_root_with_overlay elapsed: {elapsed:?}"); + let start = Instant::now(); tx.commit() .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to commit triedb transaction: {e:?}")))?; - + let elapsed = start.elapsed().as_millis(); + tracing::info!("latest_state_provider commit elapsed: {elapsed:?}"); Ok((result.root, TrieUpdates::default())) } } From ec7d8d73d21cc516051354c9c8bf09900a817aa5 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Tue, 23 Dec 2025 12:35:12 +0800 Subject: [PATCH 31/36] add fix cache --- Cargo.lock | 20 +++++++ Cargo.toml | 3 +- crates/evm/evm/src/execute.rs | 18 ++++--- crates/storage/provider/Cargo.toml | 3 +- .../provider/src/providers/database/mod.rs | 9 ++-- .../provider/src/providers/state/latest.rs | 54 +++++++++++++++---- 6 files changed, 85 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 756ec5b4aff..01f6e187e4c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3971,6 +3971,15 @@ dependencies = [ "windows-sys 0.60.2", ] +[[package]] +name = "fixed-cache" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba59b6c98ba422a13f17ee1305c995cb5742bba7997f5b4d9af61b2ff0ffb213" +dependencies = [ + "equivalent", +] + [[package]] name = "fixed-hash" version = "0.8.0" @@ -7362,6 +7371,15 @@ dependencies = [ "rand_core 0.9.3", ] +[[package]] +name = "rapidhash" +version = "4.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2988730ee014541157f48ce4dcc603940e00915edc3c7f9a8d78092256bb2493" +dependencies = [ + "rustversion", +] + [[package]] name = "ratatui" version = "0.29.0" @@ -10227,11 +10245,13 @@ dependencies = [ "assert_matches", "dashmap 6.1.0", "eyre", + "fixed-cache", "itertools 0.14.0", "metrics", "notify", "parking_lot", "rand 0.9.2", + "rapidhash", "rayon", "reth-chain-state", "reth-chainspec", diff --git a/Cargo.toml b/Cargo.toml index 869e8478cad..c6a725f4bf0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -591,7 +591,8 @@ mini-moka = "0.10" tar-no-std = { version = "0.3.2", default-features = false } miniz_oxide = { version = "0.8.4", default-features = false } chrono = "0.4.41" - +fixed-cache = "0.1" +rapidhash = "4.2.0" # metrics metrics = "0.24.0" metrics-derive = "0.1" diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index d679fc7d1d7..7b79df5c029 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -518,9 +518,9 @@ where db.merge_transitions(BundleRetention::Reverts); // calculate the state root - let start = Instant::now(); - let hashed_state = state.hashed_post_state(&db.bundle_state); - info!("hashed_post_state, elapsed: {:?}", start.elapsed().as_millis()); + // let start = Instant::now(); + // let hashed_state = state.hashed_post_state(&db.bundle_state); + // info!("hashed_post_state, elapsed: {:?}", start.elapsed().as_millis()); // // Calculate state root using the previous method (mdbx) // let (mdbx_state_root, mdbx_trie_updates) = state @@ -528,9 +528,12 @@ where // .map_err(BlockExecutionError::other)?; // Convert BundleState to PlainPostState for triedb computation + let start = Instant::now(); + tracing::info!("BasicBlockBuilder::finish, plain_state total_accts: {:?}", db.bundle_state.state().len()); + + let mut total_storage = 0; let mut plain_state = PlainPostState::default(); for (address, bundle_account) in db.bundle_state.state() { - // Convert account - None if destroyed, Some(Account) if exists/updated let account = if bundle_account.was_destroyed() || bundle_account.info.is_none() { None } else { @@ -538,7 +541,6 @@ where }; plain_state.accounts.insert(*address, account); - // Convert storage (BundleState uses U256 keys, PlainPostState uses B256 keys) let mut storage_map = HashMap::new(); for (slot, storage_slot) in &bundle_account.storage { // Convert U256 slot to B256 (32-byte representation) @@ -547,8 +549,12 @@ where } if !storage_map.is_empty() { plain_state.storages.insert(*address, storage_map); + total_storage += bundle_account.storage.len(); } + } + tracing::info!("BasicBlockBuilder::finish, plain_state total_storage: {:?}", total_storage); + info!("BasicBlockBuilder::finish, convert elapsed: {:?}", start.elapsed().as_millis()); // Calculate state root using triedb method let start = Instant::now(); @@ -591,7 +597,7 @@ where let block = RecoveredBlock::new_unhashed(block, senders); - Ok(BlockBuilderOutcome { execution_result: result, hashed_state, trie_updates, block }) + Ok(BlockBuilderOutcome { execution_result: result, hashed_state: HashedPostState::default(), trie_updates, block }) } fn executor_mut(&mut self) -> &mut Self::Executor { diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 8c158e4e07d..cba0917d922 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -56,7 +56,8 @@ parking_lot.workspace = true dashmap = { workspace = true, features = ["inline"] } strum.workspace = true eyre.workspace = true - +fixed-cache.workspace=true +rapidhash.workspace = true # test-utils reth-ethereum-engine-primitives = { workspace = true, optional = true } tokio = { workspace = true, features = ["sync"], optional = true } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 40f6b1c3e68..0a7af495fee 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -33,14 +33,15 @@ use std::{ path::Path, sync::Arc, }; - +use std::ops::Add; use tracing::trace; mod provider; pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW}; - +use fixed_cache::Cache; use super::ProviderNodeTypes; use reth_trie::KeccakKeyHasher; +use triedb::path::AddressPath; mod builder; pub use builder::{ProviderFactoryBuilder, ReadOnlyConfig}; @@ -86,7 +87,9 @@ impl ProviderFactory { ) -> Self { // Initialize the static triedb_provider let _ = crate::providers::state::latest::set_triedb_provider(triedb_provider.clone()); - + let cache: Cache = Cache::new(65536, Default::default()); + let _ = crate::providers::state::latest::set_fixed_cache(cache); + Self { db, chain_spec, diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 5981c452f38..fb435842ae3 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -1,3 +1,4 @@ +use std::ops::Add; use crate::{ providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, HashedPostStateProvider, StateProvider, StateRootProvider, @@ -27,9 +28,11 @@ use triedb::{ overlay::{OverlayStateMut, OverlayValue}, path::{AddressPath, StoragePath}, }; +use fixed_cache::{static_cache,Cache}; /// Static storage for the triedb provider instance static TRIEDB_PROVIDER: OnceLock> = OnceLock::new(); +static FIXED_CACHE: OnceLock> = OnceLock::new(); /// Initialize the static triedb provider pub fn set_triedb_provider(provider: Arc) -> Result<(), Arc> { @@ -40,6 +43,10 @@ pub fn set_triedb_provider(provider: Arc Option<&'static Arc> { TRIEDB_PROVIDER.get() } +pub fn set_fixed_cache(cache: Cache) -> Result<(), Cache> { + tracing::info!("set_fixed_cache"); + FIXED_CACHE.set(cache) +} /// State provider over latest state that takes tx reference. /// @@ -115,33 +122,53 @@ impl StateRootProvider for LatestStateProviderRef<' let triedb_provider = get_triedb_provider() .ok_or_else(|| ProviderError::UnsupportedProvider)?; let start = Instant::now(); + let address_cache = FIXED_CACHE.get().unwrap(); let mut overlay_mut = OverlayStateMut::new(); - + for (address, account_opt) in &plain_state.accounts { - let address_path = AddressPath::for_address(*address); - + let address_path = address_cache.get_or_insert_with(*address, |address| { + AddressPath::for_address(*address) + }); if let Some(account) = account_opt { let trie_account = TrieDBAccount::new( account.nonce, account.balance, - EMPTY_ROOT_HASH, // Storage root will be computed from storage overlay + EMPTY_ROOT_HASH, account.bytecode_hash.unwrap_or(KECCAK_EMPTY), ); overlay_mut.insert(address_path.clone().into(), Some(OverlayValue::Account(trie_account))); } else { - // Account is being destroyed overlay_mut.insert(address_path.clone().into(), None); } } - + let total_accts = plain_state.accounts.len(); + tracing::info!("latest_state_provider total acct: {total_accts:?}"); + + let mut total_storage = 0; + for (address, storage) in &plain_state.storages { - let address_path = AddressPath::for_address(*address); - + let address_path = address_cache.get_or_insert_with(*address, |address| { + AddressPath::for_address(*address) + }); + + total_storage += storage.len(); for (storage_key, storage_value) in storage { let raw_slot = U256::from_be_slice(storage_key.as_slice()); + let storage_key_typed = StorageKey::from(raw_slot); + + // let storage_path = storage_path_cache.get_or_insert_with( + // (*address, storage_key_typed), + // |(address, key)| { + // StoragePath::for_address_path_and_slot( + // address_path.clone(), + // *key, + // ) + // } + // ); + let storage_path = StoragePath::for_address_path_and_slot( address_path.clone(), - StorageKey::from(raw_slot), + storage_key_typed, ); if storage_value.is_zero() { @@ -156,11 +183,16 @@ impl StateRootProvider for LatestStateProviderRef<' } } } - - let overlay = overlay_mut.freeze(); + tracing::info!("latest_state_provider total storage: {total_storage:?}"); let elapsed = start.elapsed().as_millis(); tracing::info!("latest_state_provider overlay prepare elapsed: {elapsed:?}"); + let start = Instant::now(); + let overlay = overlay_mut.freeze(); + let elapsed = start.elapsed().as_millis(); + tracing::info!("latest_state_provider overlay freeze elapsed: {elapsed:?}"); + + let start = Instant::now(); let mut tx = triedb_provider.inner.begin_ro() .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to begin triedb transaction: {e:?}")))?; From 4b150f6535352e8d8d0f069777e5994b2173d5b4 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Tue, 23 Dec 2025 15:55:34 +0800 Subject: [PATCH 32/36] fix --- crates/engine/primitives/src/config.rs | 2 +- crates/engine/tree/src/tree/mod.rs | 2 +- .../db-common/src/bin/state_root_overlay.rs | 7 ----- crates/storage/db-common/src/init.rs | 2 -- .../src/providers/database/provider.rs | 7 +++-- .../provider/src/providers/triedb/mod.rs | 29 +++++++++++++++++-- 6 files changed, 34 insertions(+), 15 deletions(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 0b9b7d9f821..abb79445f41 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -1,7 +1,7 @@ //! Engine tree configuration. /// Triggers persistence when the number of canonical blocks in memory exceeds this threshold. -pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; +pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 20; /// How close to the canonical head we persist blocks. pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 0; diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index a3046262ca0..d541f86adf9 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1298,7 +1298,7 @@ where debug!(target: "engine::tree", count=blocks_to_persist.len(), blocks = ?blocks_to_persist.iter().map(|block| block.recovered_block().num_hash()).collect::>(), "Persisting blocks"); let (tx, rx) = oneshot::channel(); let _ = self.persistence.save_blocks(blocks_to_persist, tx); - + info!("start save blocks"); self.persistence_state.start_save(highest_num_hash, rx); } diff --git a/crates/storage/db-common/src/bin/state_root_overlay.rs b/crates/storage/db-common/src/bin/state_root_overlay.rs index 129a1994258..5523090054c 100644 --- a/crates/storage/db-common/src/bin/state_root_overlay.rs +++ b/crates/storage/db-common/src/bin/state_root_overlay.rs @@ -44,8 +44,6 @@ fn main() -> eyre::Result<()> { let main_file_name_path = dir.path().join("triedb"); let triedb = Database::create_new(&main_file_name_path).unwrap(); - // let tdb_pre_root = triedb.state_root(); - setup_tdb_database(&triedb, &base_addresses, &base_accounts_map, &base_storage_map).unwrap(); let mut account_overlay_mut = OverlayStateMut::new(); @@ -112,11 +110,6 @@ fn main() -> eyre::Result<()> { ); let provider_factory = create_test_provider_factory_with_chain_spec(empty_chain_spec); - // let db_provider_ro_pre = provider_factory.database_provider_ro()?; - // let latest_ro_pre = LatestStateProvider::new(db_provider_ro_pre); - // let empty_state = HashedPostState::default(); - // let (mdbx_pre_root, _) = latest_ro_pre.state_root_with_updates(empty_state)?; - // Insert base data { let mut provider_rw = provider_factory.provider_rw()?; diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 6c6f48fb67e..7c163e56f0b 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -30,8 +30,6 @@ use serde::{Deserialize, Serialize}; use std::io::BufRead; use tracing::{debug, error, info, trace}; use reth_trie::{trie_cursor::{TrieCursor, TrieCursorFactory}}; -#[cfg(feature = "trie-db-ext")] -use crate::init_triedb::calculate_state_root_with_triedb; use reth_provider::providers::state::latest::get_triedb_provider; use triedb::{ account::Account as TrieDBAccount, diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 9d3d2a080d7..53cd933d3ad 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -83,9 +83,10 @@ use std::{ ops::{Deref, DerefMut, Not, Range, RangeBounds, RangeFrom, RangeInclusive}, sync::Arc, }; -use tracing::{debug, trace}; +use tracing::{debug, info, trace}; use alloy_consensus::constants::KECCAK_EMPTY; use alloy_trie::EMPTY_ROOT_HASH; +use tokio::time::Instant; use triedb::{ account::Account as TrieDBAccount, path::{AddressPath, StoragePath}, @@ -353,6 +354,7 @@ impl DatabaseProvider DatabaseProvider) -> Self { let db_path = db_path.as_ref(); let db = if db_path.exists() { - TrieDbDatabase::open(db_path).unwrap() + println!("Opening triedb database at {}", db_path.display()); + // Try to open existing database + match TrieDbDatabase::open(db_path) { + Ok(db) => db, + Err(e) => { + println!("Failed to open existing triedb database: {e:?}. Removing and creating new database."); + // Remove the existing directory and create fresh + if db_path.is_dir() { + std::fs::remove_dir_all(db_path).unwrap_or_else(|e| { + panic!("Failed to remove existing triedb directory at {:?}: {e:?}", db_path); + }); + } else { + std::fs::remove_file(db_path).unwrap_or_else(|e| { + panic!("Failed to remove existing triedb file at {:?}: {e:?}", db_path); + }); + } + // Ensure parent directory exists + if let Some(parent) = db_path.parent() { + std::fs::create_dir_all(parent).unwrap(); + } + TrieDbDatabase::create_new(db_path).unwrap() + } + } } else { + // Ensure parent directory exists + if let Some(parent) = db_path.parent() { + std::fs::create_dir_all(parent).unwrap(); + } TrieDbDatabase::create_new(db_path).unwrap() }; Self { inner: Arc::new(db), } } - pub fn set_account( &self, address: Address, From 515aafe72f9f88f51d57b30643a62da25afd912d Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Wed, 24 Dec 2025 11:43:53 +0800 Subject: [PATCH 33/36] complete state_root_overlay benchmark --- Cargo.lock | 19 + crates/node/core/src/args/database.rs | 3 +- crates/optimism/bin/Cargo.toml | 25 +- crates/optimism/bin/src/main.rs | 15 + crates/optimism/bin/src/state_root_overlay.rs | 207 +++++++++++ crates/optimism/bin/src/util.rs | 348 ++++++++++++++++++ crates/storage/db-common/Cargo.toml | 1 + .../db-common/src/bin/state_root_overlay.rs | 37 +- 8 files changed, 649 insertions(+), 6 deletions(-) create mode 100644 crates/optimism/bin/src/state_root_overlay.rs create mode 100644 crates/optimism/bin/src/util.rs diff --git a/Cargo.lock b/Cargo.lock index 01f6e187e4c..75114bc20fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6419,9 +6419,19 @@ dependencies = [ name = "op-reth" version = "1.9.3" dependencies = [ + "alloy-genesis", + "alloy-primitives", + "alloy-trie", "clap", + "eyre", + "rand 0.9.2", "reth-apollo", + "reth-chainspec", "reth-cli-util", + "reth-db", + "reth-db-common", + "reth-fs-util", + "reth-node-types", "reth-optimism-chainspec", "reth-optimism-cli", "reth-optimism-consensus", @@ -6431,7 +6441,15 @@ dependencies = [ "reth-optimism-payload-builder", "reth-optimism-primitives", "reth-optimism-rpc", + "reth-primitives-traits", + "reth-provider", + "reth-storage-api", + "reth-trie-common", + "tempdir", "tracing", + "tracing-subscriber 0.3.22", + "triedb", + "uuid", "xlayer-db", "xlayer-rpc", ] @@ -13776,6 +13794,7 @@ checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" dependencies = [ "getrandom 0.3.4", "js-sys", + "rand 0.9.2", "wasm-bindgen", ] diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index 6f1d3bfc711..a6d4a34087f 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -62,7 +62,8 @@ impl DatabaseArgs { Some(0) => Some(MaxReadTransactionDuration::Unbounded), // if 0, disable timeout Some(secs) => Some(MaxReadTransactionDuration::Set(Duration::from_secs(secs))), }; - + tracing::info!("mdbx config, exclusive {:?}, max_read_transaction_duration {:?}, geometry_max_size {:?}, growth_step {:?}, max_readers {:?}, sync_mode {:?}", + self.exclusive, max_read_transaction_duration, self.max_size, self.growth_step, self.max_readers, self.sync_mode); reth_db::mdbx::DatabaseArguments::new(client_version) .with_log_level(self.log_level) .with_exclusive(self.exclusive) diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 0708cc57d51..38f9f2695e9 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -20,14 +20,29 @@ reth-optimism-payload-builder.workspace = true reth-optimism-primitives.workspace = true reth-optimism-forks.workspace = true reth-apollo.workspace = true - +reth-chainspec.workspace = true +alloy-genesis.workspace = true +reth-provider.workspace = true +reth-storage-api.workspace = true +reth-trie-common.workspace = true +reth-db.workspace = true +reth-db-common.workspace = true +reth-node-types.workspace = true +reth-fs-util.workspace = true +reth-primitives-traits.workspace = true +alloy-primitives.workspace = true +alloy-trie.workspace = true +tracing-subscriber.workspace = true clap = { workspace = true, features = ["derive", "env"] } tracing.workspace = true - +uuid = { version = "1", features = ["v4", "fast-rng"] } # xlayer xlayer-rpc.workspace = true xlayer-db.workspace = true - +tempdir.workspace = true +triedb.workspace = true +eyre.workspace = true +rand.workspace = true [lints] workspace = true @@ -60,3 +75,7 @@ min-trace-logs = ["tracing/release_max_level_trace"] [[bin]] name = "op-reth" path = "src/main.rs" + +[[bin]] +name = "state_root_overlay" +path = "src/state_root_overlay.rs" diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index a969df525f3..9d8f0279bbb 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -10,6 +10,9 @@ use std::{path::Path, sync::Arc}; use tracing::error; use xlayer_db::utils::{initialize, set_enable_inner_tx}; use xlayer_rpc::utils::{XlayerExt, XlayerExtApiServer}; +use tracing_subscriber::fmt::format::FmtSpan; +use tracing_subscriber::{fmt, prelude::*, Registry}; +use uuid::Uuid; #[global_allocator] static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); @@ -24,6 +27,18 @@ fn main() { } } + let fmt_layer = fmt::layer() + .with_current_span(true) // include span fields on each log + .with_span_list(false) // or true to show the full stack + .with_span_events(FmtSpan::NONE); + + Registry::default() + .with(fmt_layer) + .init(); + + let root = tracing::info_span!("app", trace_id = %Uuid::now_v4()); + let _enter = root.enter(); + if let Err(err) = Cli::::parse().run(async move |builder, rollup_args| { info!(target: "reth::cli", "Launching node triedb"); diff --git a/crates/optimism/bin/src/state_root_overlay.rs b/crates/optimism/bin/src/state_root_overlay.rs new file mode 100644 index 00000000000..50bcea14a5a --- /dev/null +++ b/crates/optimism/bin/src/state_root_overlay.rs @@ -0,0 +1,207 @@ +use alloy_primitives::{keccak256, Address, B256, U256, StorageKey, StorageValue}; +use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; +use reth_optimism_chainspec::OpChainSpecBuilder; +use reth_provider::{ + test_utils::create_test_provider_factory_with_chain_spec, + DatabaseProviderFactory, HashingWriter, LatestStateProvider, TrieWriter, +}; +use reth_primitives_traits::Account; +use reth_storage_api::{StateRootProvider}; +use reth_trie_common::{HashedPostState, HashedStorage}; +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Instant; +use alloy_genesis::Genesis; +use alloy_primitives::map::B256Map; +use tempdir::TempDir; +use triedb::{ + account::Account as TrieDBAccount, + overlay::{OverlayStateMut, OverlayValue}, + path::{AddressPath, StoragePath}, + Database, +}; +use reth_db::{init_db, ClientVersion, DatabaseEnv}; +use reth_db::mdbx::DatabaseArguments; +use reth_db_common::init::compute_state_root; +use reth_node_types::NodeTypesWithDBAdapter; +use reth_optimism_node::OpNode; +use reth_optimism_primitives::OpPrimitives; +use crate::util::{setup_tdb_database}; + +mod util; + +fn main() -> eyre::Result<()> { + println!("Testing overlay state root calculation methods..."); + + // Generate shared test data + let (base_addresses, base_accounts_map, base_storage_map, overlay_acct, overlay_storage) = + util::generate_shared_test_data( + util::DEFAULT_SETUP_DB_EOA_SIZE, + util::DEFAULT_SETUP_DB_CONTRACT_SIZE, + util::DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, + util::BATCH_SIZE, + ); + + println!("Generated {} base addresses, {} overlay accounts", + base_addresses.len(), overlay_acct.len()); + + let dir = TempDir::new("triedb_overlay_base").unwrap(); + let main_file_name_path = dir.path().join("triedb"); + let triedb = Database::create_new(&main_file_name_path).unwrap(); + + setup_tdb_database(&triedb, &base_addresses, &base_accounts_map, &base_storage_map).unwrap(); + + let mut account_overlay_mut = OverlayStateMut::new(); + + for (address, account) in &overlay_acct { + let address_path = AddressPath::for_address(*address); + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + ); + account_overlay_mut.insert(address_path.clone().into(), Some(OverlayValue::Account(trie_account))); + } + + // Add overlay storage + for (address, storage) in &overlay_storage { + let address_path = AddressPath::for_address(*address); + for (storage_key, storage_value) in storage { + // Convert B256 back to U256 to get the raw storage slot + let raw_slot = U256::from_be_slice(storage_key.as_slice()); + let storage_path = StoragePath::for_address_path_and_slot( + address_path.clone(), + StorageKey::from(raw_slot), + ); + + if storage_value.is_zero() { + // Zero value means delete the storage slot + account_overlay_mut.insert( + storage_path.clone().into(), + None, // ✅ Delete slot for zero values + ); + } else { + // Non-zero value: insert the storage entry + account_overlay_mut.insert( + storage_path.clone().into(), + Some(OverlayValue::Storage(StorageValue::from_be_slice( + storage_value.to_be_bytes::<32>().as_slice() + ))), + ); + } + } + } + let account_overlay = account_overlay_mut.freeze(); + + let start = Instant::now(); + let tx = triedb.begin_ro()?; + let triedb_root = tx.compute_root_with_overlay(account_overlay.clone())?; + println!("triedb_root = {:?}, overlay state root elapsed = {:?} ms", triedb_root.root, start.elapsed().as_millis()); + + let start = Instant::now(); + tx.commit()?; + println!("triedb commit elapsed = {:?} ns", start.elapsed().as_nanos()); + + // ===== Setup MDBX ===== + println!("\nSetting up MDBX..."); + // Create a chain spec with empty genesis allocation but keep base mainnet hardforks + let empty_chain_spec = Arc::new( + OpChainSpecBuilder::base_mainnet() + .genesis(Genesis::default()) // Empty genesis with no alloc + .build(), + ); + + + let datadir = tempdir::TempDir::new("state_root_overlay")?; + let db_path = datadir.path().join("mdbx"); + let sf_path = datadir.path().join("static_files"); + let triedb_path = datadir.path().join("triedb"); + reth_fs_util::create_dir_all(&db_path)?; + reth_fs_util::create_dir_all(&sf_path)?; + reth_fs_util::create_dir_all(&triedb_path)?; + + let db = Arc::new(init_db( + &db_path, + DatabaseArguments::new(ClientVersion::default()), + )?); + + use reth_provider::providers::StaticFileProvider; + let sfp: StaticFileProvider = StaticFileProvider::read_write(sf_path)?; + + use reth_provider::providers::triedb::TriedbProvider; + let triedb_provider = Arc::new(TriedbProvider::new(&triedb_path)); + + use reth_provider::providers::ProviderFactory; + let provider_factory: ProviderFactory>> = + ProviderFactory::new( + db, + empty_chain_spec.clone(), + sfp, + triedb_provider, + ); + // Insert base data + { + let mut provider_rw = provider_factory.provider_rw()?; + let accounts: Vec<(Address, Account)> = base_accounts_map.iter().map(|(a, acc)| (*a, *acc)).collect(); + let storage_entries: Vec<(Address, Vec)> = base_storage_map + .iter() + .map(|(address, storage)| { + let entries: Vec = storage + .iter() + .map(|(key, value)| reth_primitives_traits::StorageEntry { + key: *key, + value: *value, + }) + .collect(); + (*address, entries) + }) + .collect(); + + let accounts_for_hashing = accounts.iter().map(|(address, account)| (*address, Some(*account))); + provider_rw.insert_account_for_hashing(accounts_for_hashing)?; + provider_rw.insert_storage_for_hashing(storage_entries)?; + + let ret = compute_state_root(provider_rw.as_ref(), None)?; + provider_rw.commit()?; + + } + + // Build HashedPostState from overlay + let mut hashed_accounts: Vec<(B256, Option)> = overlay_acct + .iter() + .map(|(address, account)| { + let hashed = keccak256(address); + (hashed, Some(*account)) + }) + .collect(); + + let mut hashed_storages: B256Map = HashMap::default(); + for (address, storage) in &overlay_storage { + let hashed_address = keccak256(address); + let hashed_storage = HashedStorage::from_iter( + false, + storage.iter().map(|(key, value)| { + let hashed_slot = keccak256(*key); + (hashed_slot, *value) + }), + ); + hashed_storages.insert(hashed_address, hashed_storage); + } + + let hashed_state = HashedPostState { + accounts: hashed_accounts.into_iter().collect(), + storages: hashed_storages, + }; + + let db_provider_ro = provider_factory.database_provider_ro()?; + let latest_ro = LatestStateProvider::new(db_provider_ro); + + let start = Instant::now(); + let (mdbx_root, _updates) = latest_ro.state_root_with_updates(hashed_state)?; + + println!("MDBX state root: {:?}, overlay state root elapsed {:?} ms", mdbx_root, start.elapsed().as_millis()); + assert_eq!(mdbx_root, triedb_root.root); + + Ok(()) +} \ No newline at end of file diff --git a/crates/optimism/bin/src/util.rs b/crates/optimism/bin/src/util.rs new file mode 100644 index 00000000000..61f4a6cfc6a --- /dev/null +++ b/crates/optimism/bin/src/util.rs @@ -0,0 +1,348 @@ +use std::path::{Path, PathBuf}; +use tempdir::TempDir; +use rand::prelude::*; +use rand::RngCore; +use alloy_primitives::{Address, StorageKey, StorageValue, U256, B256}; +use reth_primitives_traits::{Account, StorageEntry}; +use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; +use triedb::{ + account::Account as TrieDBAccount, + path::{AddressPath, StoragePath}, + transaction::TransactionError, + Database, +}; +use std::{ + fs, io, + sync::{Arc, Barrier}, + thread, + time::Duration, +}; +use std::collections::HashMap; + +pub const BATCH_SIZE: usize = 10_000; + +pub fn generate_random_address(rng: &mut StdRng) -> AddressPath { + let mut bytes = [0u8; 20]; + rng.fill_bytes(&mut bytes); + let addr = Address::from_slice(&bytes); + AddressPath::for_address(addr) +} + +pub const DEFAULT_SETUP_DB_EOA_SIZE: usize = 1_000_000; +pub const DEFAULT_SETUP_DB_CONTRACT_SIZE: usize = 100_000; +pub const DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT: usize = 10; +pub const SEED_EOA: u64 = 42; // EOA seeding value +pub const SEED_CONTRACT: u64 = 43; // contract account seeding value + + +#[derive(Debug)] +#[allow(dead_code)] +pub struct FlatTrieDatabase { + _base_dir: Option, + pub main_file_name: String, + pub file_name_path: PathBuf, + pub meta_file_name: String, + pub meta_file_name_path: PathBuf, +} +pub fn get_flat_trie_database( + fallback_eoa_size: usize, + fallback_contract_size: usize, + fallback_storage_per_contract: usize, + overlay_size: usize, +) -> (FlatTrieDatabase,(HashMap, HashMap>) ){ + + let dir = TempDir::new("triedb_bench_base").unwrap(); + + let main_file_name_path = dir.path().join("triedb"); + let meta_file_name_path = dir.path().join("triedb.meta"); + let db = Database::create_new(&main_file_name_path).unwrap(); + + let (addresses, accounts_map, storage_map, overlay_acct, overlay_storage) = + generate_shared_test_data(fallback_eoa_size, fallback_contract_size, fallback_storage_per_contract, overlay_size); + + let ret = setup_tdb_database(&db, &addresses, &accounts_map, &storage_map) + .unwrap(); + + (FlatTrieDatabase { + _base_dir: Some(dir), + main_file_name: "triedb".to_string(), + file_name_path: main_file_name_path, + meta_file_name: "triedb.meta".to_string(), + meta_file_name_path, + }, (overlay_acct, overlay_storage )) +} +pub fn setup_tdb_database( + db: &Database, + addresses: &[Address], + accounts_map: &HashMap, + storage_map: &HashMap>, +) -> Result<(), TransactionError> { + { + let mut tx = db.begin_rw()?; + + // Set accounts from the provided data + for address in addresses { + if let Some(account) = accounts_map.get(address) { + let address_path = AddressPath::for_address(*address); + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + ); + tx.set_account(address_path, Some(trie_account))?; + } + } + + // Set storage from the provided data (only for contracts) + for (address, storage) in storage_map { + let address_path = AddressPath::for_address(*address); + for (storage_key, storage_value) in storage { + let storage_path = StoragePath::for_address_path_and_slot( + address_path.clone(), + StorageKey::from(*storage_key), + ); + // Fix: Use the actual storage value, not the slot + let storage_value_triedb = StorageValue::from_be_slice( + storage_value.to_be_bytes::<32>().as_slice() + ); + tx.set_storage_slot(storage_path, Some(storage_value_triedb))?; + } + } + + tx.commit()?; + } + + Ok(()) +} + +// Helper function to generate shared test data using alloy primitives +pub fn generate_shared_test_data( + eoa_count: usize, + contract_count: usize, + storage_per_contract: usize, + overlay_count: usize, // total number of overlay addresses (can include duplicates and new ones) +) -> ( + Vec
, // all base addresses (EOA + contracts) + HashMap, // base accounts map + HashMap>, // base storage map: address -> storage_key -> value + HashMap, // overlay accounts map (can have duplicates with base + new addresses) + HashMap>, // overlay storage map +) { + let mut rng = StdRng::seed_from_u64(SEED_CONTRACT); + + // Generate EOA addresses + let eoa_addresses: Vec
= (0..eoa_count).map(|_| { + let mut addr_bytes = [0u8; 20]; + rng.fill(&mut addr_bytes); + Address::from_slice(&addr_bytes) + }).collect(); + + // Generate contract addresses + let contract_addresses: Vec
= (0..contract_count).map(|_| { + let mut addr_bytes = [0u8; 20]; + rng.fill(&mut addr_bytes); + Address::from_slice(&addr_bytes) + }).collect(); + + // Combine all base addresses + let mut addresses = eoa_addresses.clone(); + addresses.extend(contract_addresses.clone()); + + // Generate base accounts map + let mut accounts_map = HashMap::new(); + for (i, address) in addresses.iter().enumerate() { + let account = Account { + nonce: i as u64, + balance: U256::from(i as u64), + bytecode_hash: if contract_addresses.contains(address) { + // Contracts have bytecode hash + Some(EMPTY_ROOT_HASH) + } else { + // EOAs have no bytecode + None + }, + }; + accounts_map.insert(*address, account); + } + + // Generate base storage map (only for contracts) + let mut storage_map: HashMap> = HashMap::new(); + for address in &contract_addresses { + let mut contract_storage = HashMap::new(); + for key in 1..=storage_per_contract { + let storage_key = B256::from(U256::from(key)); + let storage_value = U256::from(key); + contract_storage.insert(storage_key, storage_value); + } + storage_map.insert(*address, contract_storage); + } + + // Generate overlay states + // Some addresses can be duplicates (updates to existing), some can be new + let mut overlay_accounts_map = HashMap::new(); + let mut overlay_storage_map: HashMap> = HashMap::new(); + + for i in 0..overlay_count { + // Randomly decide: duplicate existing address or new address + let is_existing = rng.gen_bool(0.5) && !addresses.is_empty(); + let address = if is_existing { + // Update existing account (only storage, no account update) + addresses[rng.gen_range(0..addresses.len())] + } else { + // Create new account + let mut addr_bytes = [0u8; 20]; + rng.fill(&mut addr_bytes); + Address::from_slice(&addr_bytes) + }; + + // Only generate overlay account for newly created accounts + if !is_existing { + // Generate overlay account (with different values) + let overlay_account = Account { + nonce: (i + 1000) as u64, // different nonce + balance: U256::from((i + 2000) as u64), // different balance + bytecode_hash: if rng.gen_bool(0.3) { + // 30% chance to be a contract + Some(EMPTY_ROOT_HASH) + } else { + None + }, + }; + overlay_accounts_map.insert(address, overlay_account); + } + + // Generate overlay storage (only for contracts) + // For existing addresses, check if they're contracts in base data + // For new addresses, check if the overlay account is a contract + let is_contract = if is_existing { + // Check if existing address is a contract in base data + accounts_map.get(&address) + .map(|acc| acc.bytecode_hash.is_some()) + .unwrap_or(false) + } else { + // Check if new overlay account is a contract + overlay_accounts_map.get(&address) + .map(|acc| acc.bytecode_hash.is_some()) + .unwrap_or(false) + }; + + if is_contract { + let mut contract_storage = HashMap::new(); + + // Random number of storage changes (max half of storage_per_contract) + let max_changes = (storage_per_contract / 2).max(1); + let num_changes = rng.gen_range(1..=max_changes); + + // Get existing storage if this address exists in base storage_map + let existing_storage = storage_map.get(&address); + + for _ in 0..num_changes { + let change_type = rng.gen_range(0..3); // 0: new, 1: delete, 2: update + + match change_type { + 0 => { + // New storage slot + let storage_key = B256::from(U256::from(rng.gen_range(1000..2000))); + let storage_value = U256::from(rng.gen_range(5000..10000)); + contract_storage.insert(storage_key, storage_value); + } + 1 => { + // Delete existing storage (value = 0) + if let Some(existing) = existing_storage { + if !existing.is_empty() { + let keys: Vec = existing.keys().copied().collect(); + if !keys.is_empty() { + let key_to_delete = keys[rng.gen_range(0..keys.len())]; + contract_storage.insert(key_to_delete, U256::ZERO); + } + } + } + } + 2 => { + // Update existing storage + if let Some(existing) = existing_storage { + if !existing.is_empty() { + let keys: Vec = existing.keys().copied().collect(); + if !keys.is_empty() { + let key_to_update = keys[rng.gen_range(0..keys.len())]; + let new_value = U256::from(rng.gen_range(10000..20000)); + contract_storage.insert(key_to_update, new_value); + } + } + } + } + _ => unreachable!(), + } + } + + if !contract_storage.is_empty() { + overlay_storage_map.insert(address, contract_storage); + } + } + } + + ( + addresses, + accounts_map, + storage_map, + overlay_accounts_map, + overlay_storage_map, + ) +} + +pub fn copy_files(from: &FlatTrieDatabase, to: &Path) -> Result<(), io::Error> { + for (file, from_path) in [ + (&from.main_file_name, &from.file_name_path), + (&from.meta_file_name, &from.meta_file_name_path), + ] { + let to_path = to.join(file); + fs::copy(from_path, &to_path)?; + } + Ok(()) +} + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_generate_shared_test_data_single_eoa() { + let (addresses, accounts_map, storage_map, overlay_accounts_map, overlay_storage_map) = + generate_shared_test_data(1, 0, 0, 0); + + // Should have exactly 1 base address (EOA) + assert_eq!(addresses.len(), 1, "Should have exactly 1 base address"); + + // Should have exactly 1 account in base accounts map + assert_eq!(accounts_map.len(), 1, "Should have exactly 1 account in base accounts map"); + + // Verify the account properties + let address = &addresses[0]; + let account = accounts_map.get(address).expect("Address should exist in accounts_map"); + assert_eq!(account.nonce, 0, "EOA should have nonce 0"); + assert_eq!(account.balance, U256::from(0), "EOA should have balance 0"); + assert_eq!(account.bytecode_hash, None, "EOA should have no bytecode hash"); + + // Storage map should be empty (no contracts) + assert!(storage_map.is_empty(), "Storage map should be empty when contract_count is 0"); + + // Overlay maps should be empty (overlay_count is 0) + assert!(overlay_accounts_map.is_empty(), "Overlay accounts map should be empty when overlay_count is 0"); + assert!(overlay_storage_map.is_empty(), "Overlay storage map should be empty when overlay_count is 0"); + } + #[test] + fn test_generate_shared_test_data_single_eoa_single_contract() { + let (addresses, accounts_map, storage_map, overlay_accounts_map, overlay_storage_map) = + generate_shared_test_data(1, 1, 0, 0); + + // Should have exactly 1 base address (EOA) + assert_eq!(addresses.len(), 2, "Should have exactly 1 base address"); + + // Should have exactly 1 account in base accounts map + assert_eq!(accounts_map.len(), 2, "Should have exactly 1 account in base accounts map"); + + + } +} \ No newline at end of file diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 2a994701e55..45e3c3263ea 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -11,6 +11,7 @@ repository.workspace = true # reth reth-chainspec.workspace = true reth-db-api.workspace = true +reth-db.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-primitives-traits.workspace = true reth-config.workspace = true diff --git a/crates/storage/db-common/src/bin/state_root_overlay.rs b/crates/storage/db-common/src/bin/state_root_overlay.rs index 5523090054c..8395d5913ae 100644 --- a/crates/storage/db-common/src/bin/state_root_overlay.rs +++ b/crates/storage/db-common/src/bin/state_root_overlay.rs @@ -6,7 +6,7 @@ use reth_provider::{ test_utils::create_test_provider_factory_with_chain_spec, DatabaseProviderFactory, HashingWriter, LatestStateProvider, TrieWriter, }; -use reth_storage_api::{StateRootProvider, TrieWriter as _}; +use reth_storage_api::{StateRootProvider}; use reth_trie_common::{HashedPostState, HashedStorage}; use std::collections::HashMap; use std::sync::Arc; @@ -20,6 +20,10 @@ use triedb::{ path::{AddressPath, StoragePath}, Database, }; +use reth_db::{init_db, ClientVersion, DatabaseEnv}; +use reth_db::mdbx::DatabaseArguments; +use reth_db_common::init::compute_state_root; +use reth_node_types::NodeTypesWithDBAdapter; use crate::util::{setup_tdb_database}; #[path = "../../benches/util.rs"] @@ -108,8 +112,34 @@ fn main() -> eyre::Result<()> { .with_forks(MAINNET.hardforks.clone()) // Keep MAINNET hardforks .build(), ); - let provider_factory = create_test_provider_factory_with_chain_spec(empty_chain_spec); + + let datadir = tempdir::TempDir::new("state_root_overlay")?; + let db_path = datadir.path().join("mdbx"); + let sf_path = datadir.path().join("static_files"); + let triedb_path = datadir.path().join("triedb"); + reth_fs_util::create_dir_all(&db_path)?; + reth_fs_util::create_dir_all(&sf_path)?; + reth_fs_util::create_dir_all(&triedb_path)?; + + let db = Arc::new(init_db( + &db_path, + DatabaseArguments::new(ClientVersion::default()), + )?); + + use reth_provider::providers::StaticFileProvider; + let sfp: StaticFileProvider = StaticFileProvider::read_write(sf_path)?; + + use reth_provider::providers::triedb::TriedbProvider; + let triedb_provider = Arc::new(TriedbProvider::new(&triedb_path)); + + use reth_provider::providers::ProviderFactory; + let provider_factory = ProviderFactory::new( + db, + empty_chain_spec.clone(), + sfp, + triedb_provider, + ); // Insert base data { let mut provider_rw = provider_factory.provider_rw()?; @@ -131,7 +161,10 @@ fn main() -> eyre::Result<()> { let accounts_for_hashing = accounts.iter().map(|(address, account)| (*address, Some(*account))); provider_rw.insert_account_for_hashing(accounts_for_hashing)?; provider_rw.insert_storage_for_hashing(storage_entries)?; + + let ret = compute_state_root(provider_rw.as_ref(), None)?; provider_rw.commit()?; + } // Build HashedPostState from overlay From 407ac7c8d57c433663e6cc7997ae41515b52fdfe Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Mon, 29 Dec 2025 13:42:09 +0800 Subject: [PATCH 34/36] add merge bin --- Cargo.lock | 1 + crates/optimism/bin/Cargo.toml | 5 ++ crates/optimism/bin/src/main.rs | 12 --- crates/optimism/bin/src/merge_genesis.rs | 74 +++++++++++++++++++ crates/optimism/bin/src/state_root_overlay.rs | 48 ++++++++++-- crates/optimism/bin/src/util.rs | 6 +- crates/storage/db-common/src/init.rs | 4 +- 7 files changed, 129 insertions(+), 21 deletions(-) create mode 100644 crates/optimism/bin/src/merge_genesis.rs diff --git a/Cargo.lock b/Cargo.lock index 75114bc20fc..c5dc4d66dcd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6445,6 +6445,7 @@ dependencies = [ "reth-provider", "reth-storage-api", "reth-trie-common", + "serde_json", "tempdir", "tracing", "tracing-subscriber 0.3.22", diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 38f9f2695e9..e6b498e7bd7 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -43,6 +43,7 @@ tempdir.workspace = true triedb.workspace = true eyre.workspace = true rand.workspace = true +serde_json.workspace = true [lints] workspace = true @@ -79,3 +80,7 @@ path = "src/main.rs" [[bin]] name = "state_root_overlay" path = "src/state_root_overlay.rs" + +[[bin]] +name = "merge_genesis" +path = "src/merge_genesis.rs" diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index 9d8f0279bbb..9d79bf125eb 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -27,18 +27,6 @@ fn main() { } } - let fmt_layer = fmt::layer() - .with_current_span(true) // include span fields on each log - .with_span_list(false) // or true to show the full stack - .with_span_events(FmtSpan::NONE); - - Registry::default() - .with(fmt_layer) - .init(); - - let root = tracing::info_span!("app", trace_id = %Uuid::now_v4()); - let _enter = root.enter(); - if let Err(err) = Cli::::parse().run(async move |builder, rollup_args| { info!(target: "reth::cli", "Launching node triedb"); diff --git a/crates/optimism/bin/src/merge_genesis.rs b/crates/optimism/bin/src/merge_genesis.rs new file mode 100644 index 00000000000..3ce4ac6f39d --- /dev/null +++ b/crates/optimism/bin/src/merge_genesis.rs @@ -0,0 +1,74 @@ +use alloy_genesis::Genesis; +use alloy_primitives::Address; +use std::collections::BTreeMap; +use std::env; +use std::path::PathBuf; +use eyre::Result; + +fn main() -> Result<()> { + // Get the genesis.json file path from command line arguments + let genesis_json_path = env::args() + .nth(1) + .map(PathBuf::from) + .ok_or_else(|| eyre::eyre!("Usage: merge_genesis "))?; + + // Get the genesis_random.json file path from command line arguments + let genesis_random_json_path = env::args() + .nth(2) + .map(PathBuf::from) + .ok_or_else(|| eyre::eyre!("Usage: merge_genesis "))?; + + // Get the output file path from command line arguments + let merged_genesis_json_path = env::args() + .nth(3) + .map(PathBuf::from) + .ok_or_else(|| eyre::eyre!("Usage: merge_genesis "))?; + + // Read the base genesis.json file + let genesis_json_content = std::fs::read_to_string(&genesis_json_path)?; + let mut base_genesis: Genesis = serde_json::from_str(&genesis_json_content)?; + + println!("Loaded base genesis from {}", genesis_json_path.display()); + + // Read the genesis_random.json file + let genesis_random_json_content = std::fs::read_to_string(&genesis_random_json_path)?; + let random_genesis: Genesis = serde_json::from_str(&genesis_random_json_content)?; + + println!("Loaded random genesis from {}", genesis_random_json_path.display()); + + // Get the alloc from random_genesis + let random_alloc = random_genesis.alloc; + + if random_alloc.is_empty() { + println!("Warning: genesis_random.json has no accounts in alloc"); + } + + // Merge alloc: use random_genesis alloc to replace or insert into base_genesis alloc + let mut merged_count = 0; + let mut replaced_count = 0; + + { + let base_alloc = &mut base_genesis.alloc; + + for (address, account) in random_alloc { + if base_alloc.contains_key(&address) { + replaced_count += 1; + } else { + merged_count += 1; + } + base_alloc.insert(address, account); + } + } + + println!("Merged {} new accounts, replaced {} existing accounts", merged_count, replaced_count); + + // Write the merged genesis to output file + let json_string = serde_json::to_string_pretty(&base_genesis)?; + std::fs::write(&merged_genesis_json_path, json_string)?; + + println!("Written merged genesis to {}", merged_genesis_json_path.display()); + println!("Total accounts in merged genesis: {}", base_genesis.alloc.len()); + + Ok(()) +} + diff --git a/crates/optimism/bin/src/state_root_overlay.rs b/crates/optimism/bin/src/state_root_overlay.rs index 50bcea14a5a..1d72bf80896 100644 --- a/crates/optimism/bin/src/state_root_overlay.rs +++ b/crates/optimism/bin/src/state_root_overlay.rs @@ -2,16 +2,16 @@ use alloy_primitives::{keccak256, Address, B256, U256, StorageKey, StorageValue} use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_provider::{ - test_utils::create_test_provider_factory_with_chain_spec, DatabaseProviderFactory, HashingWriter, LatestStateProvider, TrieWriter, }; use reth_primitives_traits::Account; use reth_storage_api::{StateRootProvider}; use reth_trie_common::{HashedPostState, HashedStorage}; -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; +use std::path::PathBuf; use std::sync::Arc; use std::time::Instant; -use alloy_genesis::Genesis; +use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::map::B256Map; use tempdir::TempDir; use triedb::{ @@ -42,8 +42,46 @@ fn main() -> eyre::Result<()> { util::BATCH_SIZE, ); - println!("Generated {} base addresses, {} overlay accounts", - base_addresses.len(), overlay_acct.len()); + println!("Generated {} base addresses, {} overlay accounts, overlay storage {}", base_addresses.len(), overlay_acct.len(), overlay_storage.len()); + + // Convert base_accounts_map and base_storage_map to genesis alloc format + let mut genesis_alloc: BTreeMap = BTreeMap::new(); + + for (address, account) in &base_accounts_map { + // Convert storage from HashMap to BTreeMap + let storage = base_storage_map.get(address).map(|storage_map| { + storage_map + .iter() + .filter(|(_, v)| !v.is_zero()) // Only include non-zero storage values + .map(|(k, v)| { + // Convert U256 to B256 for storage value + (*k, B256::from_slice(&v.to_be_bytes::<32>())) + }) + .collect::>() + }); + + let genesis_account = GenesisAccount { + nonce: Some(account.nonce), + balance: account.balance, + code: None, // We only have bytecode_hash, not the actual code + storage: storage.filter(|s| !s.is_empty()), + private_key: None, + }; + + genesis_alloc.insert(*address, genesis_account); + } + + // Create Genesis struct with the alloc + let genesis = Genesis { + alloc: genesis_alloc, + ..Genesis::default() + }; + + // Write to genesis.json file + let genesis_json_path = PathBuf::from("genesis_random.json"); + let json_string = serde_json::to_string_pretty(&genesis)?; + std::fs::write(&genesis_json_path, json_string)?; + println!("Written genesis alloc to {}", genesis_json_path.display()); let dir = TempDir::new("triedb_overlay_base").unwrap(); let main_file_name_path = dir.path().join("triedb"); diff --git a/crates/optimism/bin/src/util.rs b/crates/optimism/bin/src/util.rs index 61f4a6cfc6a..4c2663f36dd 100644 --- a/crates/optimism/bin/src/util.rs +++ b/crates/optimism/bin/src/util.rs @@ -19,7 +19,7 @@ use std::{ }; use std::collections::HashMap; -pub const BATCH_SIZE: usize = 10_000; +pub const BATCH_SIZE: usize = 20_000; pub fn generate_random_address(rng: &mut StdRng) -> AddressPath { let mut bytes = [0u8; 20]; @@ -28,8 +28,8 @@ pub fn generate_random_address(rng: &mut StdRng) -> AddressPath { AddressPath::for_address(addr) } -pub const DEFAULT_SETUP_DB_EOA_SIZE: usize = 1_000_000; -pub const DEFAULT_SETUP_DB_CONTRACT_SIZE: usize = 100_000; +pub const DEFAULT_SETUP_DB_EOA_SIZE: usize = 2_000_000; +pub const DEFAULT_SETUP_DB_CONTRACT_SIZE: usize = 500_000; pub const DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT: usize = 10; pub const SEED_EOA: u64 = 42; // EOA seeding value pub const SEED_CONTRACT: u64 = 43; // contract account seeding value diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 7c163e56f0b..3e51e96a1cd 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -28,6 +28,7 @@ use reth_trie::{ use reth_trie_db::{DatabaseStateRoot, DatabaseTrieCursorFactory}; use serde::{Deserialize, Serialize}; use std::io::BufRead; +use std::time::Instant; use tracing::{debug, error, info, trace}; use reth_trie::{trie_cursor::{TrieCursor, TrieCursorFactory}}; use reth_provider::providers::state::latest::get_triedb_provider; @@ -111,6 +112,7 @@ where + AsRef, PF::ChainSpec: EthChainSpec
::BlockHeader>, { + let start = Instant::now(); let chain = factory.chain_spec(); let genesis = chain.genesis(); @@ -200,7 +202,7 @@ where // `commit_unwind`` will first commit the DB and then the static file provider, which is // necessary on `init_genesis`. provider_rw.commit()?; - + info!("time elapsed in init_genesis: {:?}", start.elapsed().as_millis()); Ok(hash) } From 6b7ba6f438d10e71221b7c192305033df144d551 Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Tue, 30 Dec 2025 13:58:25 +0800 Subject: [PATCH 35/36] update --- crates/engine/primitives/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index abb79445f41..cbf977ddf74 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -1,7 +1,7 @@ //! Engine tree configuration. /// Triggers persistence when the number of canonical blocks in memory exceeds this threshold. -pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 20; +pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 1024; /// How close to the canonical head we persist blocks. pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 0; From 8e0f6f1e516c095d7d8c49a994413d030d8fa99b Mon Sep 17 00:00:00 2001 From: "cliff.yang" Date: Wed, 31 Dec 2025 17:09:52 +0800 Subject: [PATCH 36/36] update --- crates/engine/primitives/src/config.rs | 2 +- crates/optimism/bin/src/state_root_overlay.rs | 429 +++++++++++------- crates/optimism/bin/src/util.rs | 2 +- 3 files changed, 261 insertions(+), 172 deletions(-) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index cbf977ddf74..0b9b7d9f821 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -1,7 +1,7 @@ //! Engine tree configuration. /// Triggers persistence when the number of canonical blocks in memory exceeds this threshold. -pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 1024; +pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; /// How close to the canonical head we persist blocks. pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 0; diff --git a/crates/optimism/bin/src/state_root_overlay.rs b/crates/optimism/bin/src/state_root_overlay.rs index 1d72bf80896..4d381fcb25f 100644 --- a/crates/optimism/bin/src/state_root_overlay.rs +++ b/crates/optimism/bin/src/state_root_overlay.rs @@ -7,9 +7,10 @@ use reth_provider::{ use reth_primitives_traits::Account; use reth_storage_api::{StateRootProvider}; use reth_trie_common::{HashedPostState, HashedStorage}; -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::path::PathBuf; -use std::sync::Arc; +use std::sync::{Arc, mpsc}; +use std::thread; use std::time::Instant; use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::map::B256Map; @@ -33,16 +34,104 @@ mod util; fn main() -> eyre::Result<()> { println!("Testing overlay state root calculation methods..."); - // Generate shared test data - let (base_addresses, base_accounts_map, base_storage_map, overlay_acct, overlay_storage) = - util::generate_shared_test_data( - util::DEFAULT_SETUP_DB_EOA_SIZE, - util::DEFAULT_SETUP_DB_CONTRACT_SIZE, - util::DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, - util::BATCH_SIZE, - ); - - println!("Generated {} base addresses, {} overlay accounts, overlay storage {}", base_addresses.len(), overlay_acct.len(), overlay_storage.len()); + // Generate 4 sets of data in parallel using threads + let num_threads = 4; + let (tx, rx) = mpsc::channel(); + + let start_gen = Instant::now(); + + // Spawn 4 threads to generate data in parallel + for thread_id in 0..num_threads { + let tx_clone = tx.clone(); + thread::spawn(move || { + let (base_addresses, base_accounts_map, base_storage_map, _overlay_acct, _overlay_storage) = + util::generate_shared_test_data( + util::DEFAULT_SETUP_DB_EOA_SIZE, + util::DEFAULT_SETUP_DB_CONTRACT_SIZE, + util::DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, + util::BATCH_SIZE, + ); + + tx_clone.send((thread_id, base_addresses, base_accounts_map, base_storage_map)) + .expect("Failed to send data from thread"); + }); + } + + // Drop the original sender so the receiver knows when all threads are done + drop(tx); + + // Collect results from all threads + let mut all_results: Vec<(usize, Vec
, HashMap, HashMap>)> = Vec::new(); + for received in rx { + all_results.push(received); + } + + // Sort by thread_id for consistent ordering + all_results.sort_by_key(|(thread_id, _, _, _)| *thread_id); + + println!("Generated {} sets of data in parallel, elapsed: {:?} ms", num_threads, start_gen.elapsed().as_millis()); + + // Merge all base_accounts_map into one + let mut merged_base_accounts_map: HashMap = HashMap::new(); + for (_, _, accounts_map, _) in &all_results { + for (address, account) in accounts_map { + // If address already exists, we keep the first one (or you can decide on merge strategy) + merged_base_accounts_map.entry(*address).or_insert(*account); + } + } + + // Merge all base_storage_map into one + let mut merged_base_storage_map: HashMap> = HashMap::new(); + for (_, _, _, storage_map) in &all_results { + for (address, storage) in storage_map { + let merged_storage = merged_base_storage_map + .entry(*address) + .or_insert_with(HashMap::new); + // Merge storage entries - if key exists, keep the first value (or you can decide on merge strategy) + for (key, value) in storage { + merged_storage.entry(*key).or_insert(*value); + } + } + } + + // Collect all base addresses (deduplicated) + let mut merged_base_addresses: Vec
= Vec::new(); + let mut seen_addresses = HashSet::new(); + for (_, addresses, _, _) in &all_results { + for address in addresses { + if seen_addresses.insert(*address) { + merged_base_addresses.push(*address); + } + } + } + + println!("Merged {} base addresses, {} accounts, {} addresses with storage", + merged_base_addresses.len(), + merged_base_accounts_map.len(), + merged_base_storage_map.len() + ); + + // Use the merged data for the rest of the code + let base_addresses = merged_base_addresses; + let base_accounts_map = merged_base_accounts_map; + let base_storage_map = merged_base_storage_map; + + // // For overlay, we'll use the first thread's overlay data (or generate new if needed) + // // For now, we'll generate overlay from the first result + // let (_, _, _, overlay_acct, overlay_storage) = if !all_results.is_empty() { + // // Generate overlay data separately since we only need it once + // let (_, _, _, overlay_acct, overlay_storage) = util::generate_shared_test_data( + // util::DEFAULT_SETUP_DB_EOA_SIZE, + // util::DEFAULT_SETUP_DB_CONTRACT_SIZE, + // util::DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, + // util::BATCH_SIZE, + // ); + // (Vec::new(), HashMap::new(), HashMap::new(), overlay_acct, overlay_storage) + // } else { + // (Vec::new(), HashMap::new(), HashMap::new(), HashMap::new(), HashMap::new()) + // }; + + // println!("Generated {} base addresses, {} overlay accounts, overlay storage {}", base_addresses.len(), overlay_acct.len(), overlay_storage.len()); // Convert base_accounts_map and base_storage_map to genesis alloc format let mut genesis_alloc: BTreeMap = BTreeMap::new(); @@ -78,168 +167,168 @@ fn main() -> eyre::Result<()> { }; // Write to genesis.json file - let genesis_json_path = PathBuf::from("genesis_random.json"); + let genesis_json_path = PathBuf::from("genesis_random_merged.json"); let json_string = serde_json::to_string_pretty(&genesis)?; std::fs::write(&genesis_json_path, json_string)?; println!("Written genesis alloc to {}", genesis_json_path.display()); - let dir = TempDir::new("triedb_overlay_base").unwrap(); - let main_file_name_path = dir.path().join("triedb"); - let triedb = Database::create_new(&main_file_name_path).unwrap(); - - setup_tdb_database(&triedb, &base_addresses, &base_accounts_map, &base_storage_map).unwrap(); - - let mut account_overlay_mut = OverlayStateMut::new(); - - for (address, account) in &overlay_acct { - let address_path = AddressPath::for_address(*address); - let trie_account = TrieDBAccount::new( - account.nonce, - account.balance, - EMPTY_ROOT_HASH, - account.bytecode_hash.unwrap_or(KECCAK_EMPTY), - ); - account_overlay_mut.insert(address_path.clone().into(), Some(OverlayValue::Account(trie_account))); - } - - // Add overlay storage - for (address, storage) in &overlay_storage { - let address_path = AddressPath::for_address(*address); - for (storage_key, storage_value) in storage { - // Convert B256 back to U256 to get the raw storage slot - let raw_slot = U256::from_be_slice(storage_key.as_slice()); - let storage_path = StoragePath::for_address_path_and_slot( - address_path.clone(), - StorageKey::from(raw_slot), - ); - - if storage_value.is_zero() { - // Zero value means delete the storage slot - account_overlay_mut.insert( - storage_path.clone().into(), - None, // ✅ Delete slot for zero values - ); - } else { - // Non-zero value: insert the storage entry - account_overlay_mut.insert( - storage_path.clone().into(), - Some(OverlayValue::Storage(StorageValue::from_be_slice( - storage_value.to_be_bytes::<32>().as_slice() - ))), - ); - } - } - } - let account_overlay = account_overlay_mut.freeze(); - - let start = Instant::now(); - let tx = triedb.begin_ro()?; - let triedb_root = tx.compute_root_with_overlay(account_overlay.clone())?; - println!("triedb_root = {:?}, overlay state root elapsed = {:?} ms", triedb_root.root, start.elapsed().as_millis()); - - let start = Instant::now(); - tx.commit()?; - println!("triedb commit elapsed = {:?} ns", start.elapsed().as_nanos()); - - // ===== Setup MDBX ===== - println!("\nSetting up MDBX..."); - // Create a chain spec with empty genesis allocation but keep base mainnet hardforks - let empty_chain_spec = Arc::new( - OpChainSpecBuilder::base_mainnet() - .genesis(Genesis::default()) // Empty genesis with no alloc - .build(), - ); - - - let datadir = tempdir::TempDir::new("state_root_overlay")?; - let db_path = datadir.path().join("mdbx"); - let sf_path = datadir.path().join("static_files"); - let triedb_path = datadir.path().join("triedb"); - reth_fs_util::create_dir_all(&db_path)?; - reth_fs_util::create_dir_all(&sf_path)?; - reth_fs_util::create_dir_all(&triedb_path)?; - - let db = Arc::new(init_db( - &db_path, - DatabaseArguments::new(ClientVersion::default()), - )?); - - use reth_provider::providers::StaticFileProvider; - let sfp: StaticFileProvider = StaticFileProvider::read_write(sf_path)?; - - use reth_provider::providers::triedb::TriedbProvider; - let triedb_provider = Arc::new(TriedbProvider::new(&triedb_path)); - - use reth_provider::providers::ProviderFactory; - let provider_factory: ProviderFactory>> = - ProviderFactory::new( - db, - empty_chain_spec.clone(), - sfp, - triedb_provider, - ); - // Insert base data - { - let mut provider_rw = provider_factory.provider_rw()?; - let accounts: Vec<(Address, Account)> = base_accounts_map.iter().map(|(a, acc)| (*a, *acc)).collect(); - let storage_entries: Vec<(Address, Vec)> = base_storage_map - .iter() - .map(|(address, storage)| { - let entries: Vec = storage - .iter() - .map(|(key, value)| reth_primitives_traits::StorageEntry { - key: *key, - value: *value, - }) - .collect(); - (*address, entries) - }) - .collect(); - - let accounts_for_hashing = accounts.iter().map(|(address, account)| (*address, Some(*account))); - provider_rw.insert_account_for_hashing(accounts_for_hashing)?; - provider_rw.insert_storage_for_hashing(storage_entries)?; - - let ret = compute_state_root(provider_rw.as_ref(), None)?; - provider_rw.commit()?; - - } - - // Build HashedPostState from overlay - let mut hashed_accounts: Vec<(B256, Option)> = overlay_acct - .iter() - .map(|(address, account)| { - let hashed = keccak256(address); - (hashed, Some(*account)) - }) - .collect(); - - let mut hashed_storages: B256Map = HashMap::default(); - for (address, storage) in &overlay_storage { - let hashed_address = keccak256(address); - let hashed_storage = HashedStorage::from_iter( - false, - storage.iter().map(|(key, value)| { - let hashed_slot = keccak256(*key); - (hashed_slot, *value) - }), - ); - hashed_storages.insert(hashed_address, hashed_storage); - } - - let hashed_state = HashedPostState { - accounts: hashed_accounts.into_iter().collect(), - storages: hashed_storages, - }; - - let db_provider_ro = provider_factory.database_provider_ro()?; - let latest_ro = LatestStateProvider::new(db_provider_ro); - - let start = Instant::now(); - let (mdbx_root, _updates) = latest_ro.state_root_with_updates(hashed_state)?; - - println!("MDBX state root: {:?}, overlay state root elapsed {:?} ms", mdbx_root, start.elapsed().as_millis()); - assert_eq!(mdbx_root, triedb_root.root); + // let dir = TempDir::new("triedb_overlay_base").unwrap(); + // let main_file_name_path = dir.path().join("triedb"); + // let triedb = Database::create_new(&main_file_name_path).unwrap(); + + // setup_tdb_database(&triedb, &base_addresses, &base_accounts_map, &base_storage_map).unwrap(); + + // let mut account_overlay_mut = OverlayStateMut::new(); + + // for (address, account) in &overlay_acct { + // let address_path = AddressPath::for_address(*address); + // let trie_account = TrieDBAccount::new( + // account.nonce, + // account.balance, + // EMPTY_ROOT_HASH, + // account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + // ); + // account_overlay_mut.insert(address_path.clone().into(), Some(OverlayValue::Account(trie_account))); + // } + + // // Add overlay storage + // for (address, storage) in &overlay_storage { + // let address_path = AddressPath::for_address(*address); + // for (storage_key, storage_value) in storage { + // // Convert B256 back to U256 to get the raw storage slot + // let raw_slot = U256::from_be_slice(storage_key.as_slice()); + // let storage_path = StoragePath::for_address_path_and_slot( + // address_path.clone(), + // StorageKey::from(raw_slot), + // ); + + // if storage_value.is_zero() { + // // Zero value means delete the storage slot + // account_overlay_mut.insert( + // storage_path.clone().into(), + // None, // ✅ Delete slot for zero values + // ); + // } else { + // // Non-zero value: insert the storage entry + // account_overlay_mut.insert( + // storage_path.clone().into(), + // Some(OverlayValue::Storage(StorageValue::from_be_slice( + // storage_value.to_be_bytes::<32>().as_slice() + // ))), + // ); + // } + // } + // } + // let account_overlay = account_overlay_mut.freeze(); + + // let start = Instant::now(); + // let tx = triedb.begin_ro()?; + // let triedb_root = tx.compute_root_with_overlay(account_overlay.clone())?; + // println!("triedb_root = {:?}, overlay state root elapsed = {:?} ms", triedb_root.root, start.elapsed().as_millis()); + + // let start = Instant::now(); + // tx.commit()?; + // println!("triedb commit elapsed = {:?} ns", start.elapsed().as_nanos()); + + // // ===== Setup MDBX ===== + // println!("\nSetting up MDBX..."); + // // Create a chain spec with empty genesis allocation but keep base mainnet hardforks + // let empty_chain_spec = Arc::new( + // OpChainSpecBuilder::base_mainnet() + // .genesis(Genesis::default()) // Empty genesis with no alloc + // .build(), + // ); + + + // let datadir = tempdir::TempDir::new("state_root_overlay")?; + // let db_path = datadir.path().join("mdbx"); + // let sf_path = datadir.path().join("static_files"); + // let triedb_path = datadir.path().join("triedb"); + // reth_fs_util::create_dir_all(&db_path)?; + // reth_fs_util::create_dir_all(&sf_path)?; + // reth_fs_util::create_dir_all(&triedb_path)?; + + // let db = Arc::new(init_db( + // &db_path, + // DatabaseArguments::new(ClientVersion::default()), + // )?); + + // use reth_provider::providers::StaticFileProvider; + // let sfp: StaticFileProvider = StaticFileProvider::read_write(sf_path)?; + + // use reth_provider::providers::triedb::TriedbProvider; + // let triedb_provider = Arc::new(TriedbProvider::new(&triedb_path)); + + // use reth_provider::providers::ProviderFactory; + // let provider_factory: ProviderFactory>> = + // ProviderFactory::new( + // db, + // empty_chain_spec.clone(), + // sfp, + // triedb_provider, + // ); + // // Insert base data + // { + // let mut provider_rw = provider_factory.provider_rw()?; + // let accounts: Vec<(Address, Account)> = base_accounts_map.iter().map(|(a, acc)| (*a, *acc)).collect(); + // let storage_entries: Vec<(Address, Vec)> = base_storage_map + // .iter() + // .map(|(address, storage)| { + // let entries: Vec = storage + // .iter() + // .map(|(key, value)| reth_primitives_traits::StorageEntry { + // key: *key, + // value: *value, + // }) + // .collect(); + // (*address, entries) + // }) + // .collect(); + + // let accounts_for_hashing = accounts.iter().map(|(address, account)| (*address, Some(*account))); + // provider_rw.insert_account_for_hashing(accounts_for_hashing)?; + // provider_rw.insert_storage_for_hashing(storage_entries)?; + + // let ret = compute_state_root(provider_rw.as_ref(), None)?; + // provider_rw.commit()?; + + // } + + // // Build HashedPostState from overlay + // let mut hashed_accounts: Vec<(B256, Option)> = overlay_acct + // .iter() + // .map(|(address, account)| { + // let hashed = keccak256(address); + // (hashed, Some(*account)) + // }) + // .collect(); + + // let mut hashed_storages: B256Map = HashMap::default(); + // for (address, storage) in &overlay_storage { + // let hashed_address = keccak256(address); + // let hashed_storage = HashedStorage::from_iter( + // false, + // storage.iter().map(|(key, value)| { + // let hashed_slot = keccak256(*key); + // (hashed_slot, *value) + // }), + // ); + // hashed_storages.insert(hashed_address, hashed_storage); + // } + + // let hashed_state = HashedPostState { + // accounts: hashed_accounts.into_iter().collect(), + // storages: hashed_storages, + // }; + + // let db_provider_ro = provider_factory.database_provider_ro()?; + // let latest_ro = LatestStateProvider::new(db_provider_ro); + + // let start = Instant::now(); + // let (mdbx_root, _updates) = latest_ro.state_root_with_updates(hashed_state)?; + + // println!("MDBX state root: {:?}, overlay state root elapsed {:?} ms", mdbx_root, start.elapsed().as_millis()); + // assert_eq!(mdbx_root, triedb_root.root); Ok(()) } \ No newline at end of file diff --git a/crates/optimism/bin/src/util.rs b/crates/optimism/bin/src/util.rs index 4c2663f36dd..4feed6254e5 100644 --- a/crates/optimism/bin/src/util.rs +++ b/crates/optimism/bin/src/util.rs @@ -30,7 +30,7 @@ pub fn generate_random_address(rng: &mut StdRng) -> AddressPath { pub const DEFAULT_SETUP_DB_EOA_SIZE: usize = 2_000_000; pub const DEFAULT_SETUP_DB_CONTRACT_SIZE: usize = 500_000; -pub const DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT: usize = 10; +pub const DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT: usize = 40; pub const SEED_EOA: u64 = 42; // EOA seeding value pub const SEED_CONTRACT: u64 = 43; // contract account seeding value