diff --git a/Cargo.lock b/Cargo.lock index 82bd9499b4e..c5dc4d66dcd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -897,7 +897,7 @@ dependencies = [ "arrayvec", "derive_arbitrary", "derive_more", - "nybbles", + "nybbles 0.4.6", "proptest", "proptest-derive 0.5.1", "serde", @@ -3971,6 +3971,15 @@ dependencies = [ "windows-sys 0.60.2", ] +[[package]] +name = "fixed-cache" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba59b6c98ba422a13f17ee1305c995cb5742bba7997f5b4d9af61b2ff0ffb213" +dependencies = [ + "equivalent", +] + [[package]] name = "fixed-hash" version = "0.8.0" @@ -4233,6 +4242,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "gcc" version = "0.3.55" @@ -6238,6 +6256,19 @@ dependencies = [ "libc", ] +[[package]] +name = "nybbles" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +dependencies = [ + "alloy-rlp", + "const-hex", + "proptest", + "serde", + "smallvec", +] + [[package]] name = "nybbles" version = "0.4.6" @@ -6388,9 +6419,19 @@ dependencies = [ name = "op-reth" version = "1.9.3" dependencies = [ + "alloy-genesis", + "alloy-primitives", + "alloy-trie", "clap", + "eyre", + "rand 0.9.2", "reth-apollo", + "reth-chainspec", "reth-cli-util", + "reth-db", + "reth-db-common", + "reth-fs-util", + "reth-node-types", "reth-optimism-chainspec", "reth-optimism-cli", "reth-optimism-consensus", @@ -6400,7 +6441,16 @@ dependencies = [ "reth-optimism-payload-builder", "reth-optimism-primitives", "reth-optimism-rpc", + "reth-primitives-traits", + "reth-provider", + "reth-storage-api", + "reth-trie-common", + "serde_json", + "tempdir", "tracing", + "tracing-subscriber 0.3.22", + "triedb", + "uuid", "xlayer-db", "xlayer-rpc", ] @@ -7340,6 +7390,15 @@ dependencies = [ "rand_core 0.9.3", ] +[[package]] +name = "rapidhash" +version = "4.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2988730ee014541157f48ce4dcc603940e00915edc3c7f9a8d78092256bb2493" +dependencies = [ + "rustversion", +] + [[package]] name = "ratatui" version = "0.29.0" @@ -7501,6 +7560,15 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + [[package]] name = "reqwest" version = "0.11.27" @@ -7774,6 +7842,7 @@ dependencies = [ "tokio", "tokio-stream", "tracing", + "triedb", ] [[package]] @@ -8094,8 +8163,11 @@ dependencies = [ "alloy-consensus", "alloy-genesis", "alloy-primitives", + "alloy-trie", "boyer-moore-magiclen", + "codspeed-criterion-compat", "eyre", + "rand 0.8.5", "reth-chainspec", "reth-codecs", "reth-config", @@ -8109,12 +8181,16 @@ dependencies = [ "reth-provider", "reth-stages-types", "reth-static-file-types", + "reth-storage-api", "reth-trie", + "reth-trie-common", "reth-trie-db", "serde", "serde_json", + "tempdir", "thiserror 2.0.17", "tracing", + "triedb", ] [[package]] @@ -8351,7 +8427,9 @@ dependencies = [ "reth-engine-primitives", "reth-ethereum-engine-primitives", "reth-optimism-chainspec", + "reth-optimism-forks", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-storage-api", "reth-transaction-pool", @@ -8423,6 +8501,8 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-rpc-types-engine", + "alloy-signer", + "alloy-signer-local", "assert_matches", "codspeed-criterion-compat", "crossbeam-channel", @@ -8455,6 +8535,7 @@ dependencies = [ "reth-exex-types", "reth-metrics", "reth-network-p2p", + "reth-node-core", "reth-node-ethereum", "reth-node-metrics", "reth-payload-builder", @@ -8467,10 +8548,12 @@ dependencies = [ "reth-stages", "reth-stages-api", "reth-static-file", + "reth-storage-api", "reth-tasks", "reth-testing-utils", "reth-tracing", "reth-trie", + "reth-trie-common", "reth-trie-parallel", "reth-trie-sparse", "reth-trie-sparse-parallel", @@ -8483,6 +8566,7 @@ dependencies = [ "thiserror 2.0.17", "tokio", "tracing", + "triedb", "xlayer-db", ] @@ -8850,6 +8934,7 @@ dependencies = [ "reth-storage-errors", "reth-trie-common", "revm", + "tracing", ] [[package]] @@ -8884,7 +8969,7 @@ dependencies = [ "alloy-evm", "alloy-primitives", "alloy-rlp", - "nybbles", + "nybbles 0.4.6", "reth-storage-errors", "thiserror 2.0.17", ] @@ -9739,6 +9824,7 @@ dependencies = [ "reth-storage-errors", "revm", "thiserror 2.0.17", + "tracing", ] [[package]] @@ -9795,11 +9881,13 @@ name = "reth-optimism-node" version = "1.9.3" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-genesis", "alloy-network", "alloy-primitives", "alloy-rpc-types-engine", "alloy-rpc-types-eth", + "alloy-sol-types", "clap", "eyre", "futures", @@ -9829,6 +9917,7 @@ dependencies = [ "reth-optimism-storage", "reth-optimism-txpool", "reth-payload-builder", + "reth-payload-primitives", "reth-payload-util", "reth-primitives-traits", "reth-provider", @@ -10171,14 +10260,17 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", + "alloy-trie", "assert_matches", "dashmap 6.1.0", "eyre", + "fixed-cache", "itertools 0.14.0", "metrics", "notify", "parking_lot", "rand 0.9.2", + "rapidhash", "rayon", "reth-chain-state", "reth-chainspec", @@ -10206,9 +10298,11 @@ dependencies = [ "revm-database-interface", "revm-state", "strum 0.27.2", + "tempdir", "tempfile", "tokio", "tracing", + "triedb", ] [[package]] @@ -10695,6 +10789,7 @@ dependencies = [ "tokio", "tokio-stream", "tracing", + "triedb", ] [[package]] @@ -10914,6 +11009,7 @@ dependencies = [ "reth-storage-errors", "reth-trie-common", "revm-database", + "triedb", ] [[package]] @@ -11096,22 +11192,28 @@ dependencies = [ "codspeed-criterion-compat", "itertools 0.14.0", "metrics", + "nybbles 0.3.4", + "nybbles 0.4.6", "parking_lot", "pretty_assertions", "proptest", "proptest-arbitrary-interop", + "reth-chainspec", "reth-ethereum-primitives", "reth-execution-errors", "reth-metrics", "reth-primitives-traits", + "reth-provider", "reth-stages-types", "reth-storage-errors", "reth-tracing", "reth-trie-common", + "reth-trie-db", "reth-trie-sparse", "revm-database", "revm-state", "tracing", + "triedb", "triehash", ] @@ -11134,7 +11236,7 @@ dependencies = [ "derive_more", "hash-db", "itertools 0.14.0", - "nybbles", + "nybbles 0.4.6", "plain_hasher", "proptest", "proptest-arbitrary-interop", @@ -11163,13 +11265,17 @@ dependencies = [ "reth-execution-errors", "reth-primitives-traits", "reth-provider", + "reth-storage-api", "reth-trie", "reth-trie-common", + "reth-trie-db", "revm", "revm-database", "serde_json", "similar-asserts", + "tempdir", "tracing", + "triedb", "triehash", ] @@ -11936,6 +12042,17 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sealed" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22f968c5ea23d555e670b449c1c5e7b2fc399fdaec1d304a17cd48e288abc107" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "sec1" version = "0.7.3" @@ -12697,6 +12814,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "tempdir" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" +dependencies = [ + "rand 0.4.6", + "remove_dir_all", +] + [[package]] name = "tempfile" version = "3.23.0" @@ -13438,6 +13565,28 @@ dependencies = [ "syn 2.0.111", ] +[[package]] +name = "triedb" +version = "0.1.0" +source = "git+https://github.com/base/triedb.git#cedd1a33084ddb2724240193c39df3fbdec1dba0" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-trie", + "arrayvec", + "fxhash", + "memmap2", + "metrics", + "metrics-derive", + "parking_lot", + "proptest", + "proptest-derive 0.6.0", + "rayon", + "sealed", + "static_assertions", + "zerocopy", +] + [[package]] name = "triehash" version = "0.8.4" @@ -13646,6 +13795,7 @@ checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" dependencies = [ "getrandom 0.3.4", "js-sys", + "rand 0.9.2", "wasm-bindgen", ] diff --git a/Cargo.toml b/Cargo.toml index c9ae7164d50..c6a725f4bf0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -591,7 +591,8 @@ mini-moka = "0.10" tar-no-std = { version = "0.3.2", default-features = false } miniz_oxide = { version = "0.8.4", default-features = false } chrono = "0.4.41" - +fixed-cache = "0.1" +rapidhash = "4.2.0" # metrics metrics = "0.24.0" metrics-derive = "0.1" @@ -670,6 +671,7 @@ proptest = "1.7" proptest-derive = "0.5" similar-asserts = { version = "1.5.0", features = ["serde"] } tempfile = "3.20" +tempdir = "0.3.7" test-fuzz = "7" rstest = "0.24.0" test-case = "3" @@ -738,6 +740,8 @@ vergen-git2 = "1.0.5" xlayer-db = { path = "crates/xlayer/db" } xlayer-rpc = { path = "crates/xlayer/rpc" } +triedb ={git="https://github.com/base/triedb.git"} + # [patch.crates-io] # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } # alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index d21c83ae7c4..d1942791f05 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -28,6 +28,7 @@ alloy-primitives = { workspace = true, features = ["std"] } alloy-consensus.workspace = true revm-database.workspace = true revm-state = { workspace = true, optional = true } +triedb.workspace=true # async tokio = { workspace = true, default-features = false, features = ["sync", "macros"] } diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index 254edb248b4..8edfee00f31 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -4,8 +4,8 @@ use alloy_primitives::{keccak256, Address, BlockNumber, Bytes, StorageKey, Stora use reth_errors::ProviderResult; use reth_primitives_traits::{Account, Bytecode, NodePrimitives}; use reth_storage_api::{ - AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider, - StateProvider, StateRootProvider, StorageRootProvider, + AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, PlainPostState, + StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, @@ -147,6 +147,45 @@ impl StateRootProvider for MemoryOverlayStateProviderRef<'_, input.prepend_self(self.trie_input().clone()); self.historical.state_root_from_nodes_with_updates(input) } + + fn state_root_with_updates_triedb( + &self, + plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + use std::collections::HashMap; + let mut cached_plain_state = PlainPostState::default(); + + for block in &self.in_memory { + let bundle_state = &block.execution_output.bundle; + for (address, bundle_account) in bundle_state.state() { + let account = if bundle_account.was_destroyed() || bundle_account.info.is_none() { + None + } else { + bundle_account.info.as_ref().map(|info| reth_primitives_traits::Account::from(info)) + }; + cached_plain_state.accounts.insert(*address, account); + + let storage_map = cached_plain_state.storages.entry(*address).or_insert_with(HashMap::new); + for (slot, storage_slot) in &bundle_account.storage { + let slot_b256 = B256::from_slice(&slot.to_be_bytes::<32>()); + storage_map.insert(slot_b256, storage_slot.present_value); + } + } + } + + let mut merged_state = cached_plain_state; + + for (address, account) in plain_state.accounts { + merged_state.accounts.insert(address, account); + } + + for (address, storage) in plain_state.storages { + let merged_storage = merged_state.storages.entry(address).or_insert_with(HashMap::new); + merged_storage.extend(storage); + } + + self.historical.state_root_with_updates_triedb(merged_state) + } } impl StorageRootProvider for MemoryOverlayStateProviderRef<'_, N> { diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index a8b5b3c8efd..7029c5fa113 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -21,7 +21,7 @@ use reth_node_core::{ dirs::{ChainPath, DataDirPath}, }; use reth_provider::{ - providers::{BlockchainProvider, NodeTypesForProvider, StaticFileProvider}, + providers::{BlockchainProvider, NodeTypesForProvider, StaticFileProvider, triedb::TriedbProvider}, ProviderFactory, StaticFileProviderFactory, }; use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; @@ -103,7 +103,8 @@ impl EnvironmentArgs { ), }; - let provider_factory = self.create_provider_factory(&config, db, sfp)?; + let triedb_provider = TriedbProvider::new(data_dir.triedb()); + let provider_factory = self.create_provider_factory(&config, db, sfp, triedb_provider)?; if access.is_read_write() { debug!(target: "reth::cli", chain=%self.chain.chain(), genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(&provider_factory)?; @@ -122,16 +123,19 @@ impl EnvironmentArgs { config: &Config, db: Arc, static_file_provider: StaticFileProvider, + triedb_provider: TriedbProvider ) -> eyre::Result>>> where C: ChainSpecParser, { let has_receipt_pruning = config.prune.has_receipts_pruning(); let prune_modes = config.prune.segments.clone(); + let factory = ProviderFactory::>>::new( db, self.chain.clone(), static_file_provider, + Arc::new(triedb_provider) ) .with_prune_modes(prune_modes.clone()) .with_genesis_block_number(self.chain.genesis().number.unwrap()); diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 9e8e68e9800..98255db3a91 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -15,6 +15,7 @@ use reth_provider::{ use reth_stages::{stages::ExecutionStage, Stage, StageCheckpoint, UnwindInput}; use std::sync::Arc; use tracing::info; +use reth_provider::providers::triedb::TriedbProvider; pub(crate) async fn dump_execution_stage( db_tool: &DbTool, @@ -42,6 +43,7 @@ where Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, + Arc::new(TriedbProvider::new(output_datadir.triedb())), ), to, from, diff --git a/crates/cli/commands/src/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs index 8b9ba5e937e..e532b01f227 100644 --- a/crates/cli/commands/src/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -12,6 +12,7 @@ use reth_provider::{ use reth_stages::{stages::AccountHashingStage, Stage, StageCheckpoint, UnwindInput}; use std::sync::Arc; use tracing::info; +use reth_provider::providers::triedb::TriedbProvider; pub(crate) async fn dump_hashing_account_stage>>( db_tool: &DbTool, @@ -39,6 +40,7 @@ pub(crate) async fn dump_hashing_account_stage>>( db_tool: &DbTool, @@ -29,6 +30,7 @@ pub(crate) async fn dump_hashing_storage_stage( db_tool: &DbTool, @@ -62,6 +63,7 @@ where Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, + Arc::new(TriedbProvider::new(output_datadir.triedb())), ), to, from, diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 57d03f70fa5..ae45a962951 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -58,7 +58,10 @@ where PayloadAttributesBuilder<<::Payload as PayloadTypes>::PayloadAttributes>, { E2ETestSetupBuilder::new(num_nodes, chain_spec, attributes_generator) - .with_node_config_modifier(move |config| config.set_dev(is_dev)) + .with_node_config_modifier(move |mut config| { + config.set_dev(is_dev) + + }) .build() .await } diff --git a/crates/e2e-test-utils/src/setup_import.rs b/crates/e2e-test-utils/src/setup_import.rs index 81e5a386aac..565a4e49df7 100644 --- a/crates/e2e-test-utils/src/setup_import.rs +++ b/crates/e2e-test-utils/src/setup_import.rs @@ -10,8 +10,8 @@ use reth_node_builder::{EngineNodeLauncher, Node, NodeBuilder, NodeConfig, NodeH use reth_node_core::args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}; use reth_node_ethereum::EthereumNode; use reth_provider::{ - providers::BlockchainProvider, DatabaseProviderFactory, ProviderFactory, StageCheckpointReader, - StaticFileProviderFactory, + providers::{BlockchainProvider, ProviderFactory, triedb::TriedbProvider}, + DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory, }; use reth_rpc_server_types::RpcModuleSelection; use reth_stages_types::StageId; @@ -110,6 +110,7 @@ pub async fn setup_engine_with_chain_import( // Create database path and static files path let db_path = datadir.join("db"); let static_files_path = datadir.join("static_files"); + let triedb_dir = datadir.join("triedb"); // Initialize the database using init_db (same as CLI import command) // Use the same database arguments as the node will use @@ -125,6 +126,7 @@ pub async fn setup_engine_with_chain_import( db.clone(), chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_write(static_files_path.clone())?, + Arc::new(TriedbProvider::new(triedb_dir)), ); // Initialize genesis if needed @@ -311,6 +313,7 @@ mod tests { std::fs::create_dir_all(&datadir).unwrap(); let db_path = datadir.join("db"); let static_files_path = datadir.join("static_files"); + let triedb_dir = datadir.join("triedb"); // Import the chain { @@ -324,6 +327,7 @@ mod tests { chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_write(static_files_path.clone()) .unwrap(), + Arc::new(TriedbProvider::new(triedb_dir.clone())), ); // Initialize genesis @@ -384,6 +388,7 @@ mod tests { chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_only(static_files_path, false) .unwrap(), + Arc::new(TriedbProvider::new(triedb_dir.clone())), ); let provider = provider_factory.database_provider_ro().unwrap(); @@ -469,12 +474,14 @@ mod tests { // Create static files path let static_files_path = datadir.join("static_files"); + let triedb_dir = datadir.join("triedb"); // Create a provider factory let provider_factory: ProviderFactory = ProviderFactory::new( db.clone(), chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_write(static_files_path).unwrap(), + Arc::new(TriedbProvider::new(triedb_dir)), ); // Initialize genesis diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index dd708dee905..25ee9403fcd 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -17,6 +17,7 @@ reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-storage-api.workspace = true reth-transaction-pool.workspace = true +reth-payload-builder-primitives.workspace = true # alloy alloy-consensus.workspace = true @@ -34,6 +35,7 @@ tracing.workspace = true op-alloy-rpc-types-engine = { workspace = true, optional = true } reth-optimism-chainspec = { workspace = true, optional = true } +reth-optimism-forks = { workspace = true, optional = true } [lints] workspace = true @@ -42,5 +44,6 @@ workspace = true op = [ "dep:op-alloy-rpc-types-engine", "dep:reth-optimism-chainspec", + "dep:reth-optimism-forks", "reth-payload-primitives/op", ] diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index d6298502fb5..f8396646ca6 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -7,6 +7,7 @@ use eyre::OptionExt; use futures_util::{stream::Fuse, StreamExt}; use reth_engine_primitives::ConsensusEngineHandle; use reth_payload_builder::PayloadBuilderHandle; +use reth_payload_builder_primitives::PayloadEvents; use reth_payload_primitives::{ BuiltPayload, EngineApiMessageVersion, PayloadAttributesBuilder, PayloadKind, PayloadTypes, }; @@ -21,7 +22,7 @@ use std::{ }; use tokio::time::Interval; use tokio_stream::wrappers::ReceiverStream; -use tracing::error; +use tracing::{error, debug}; /// A mining mode for the local dev engine. #[derive(Debug)] @@ -190,8 +191,8 @@ where Ok(()) } - /// Generates payload attributes for a new block, passes them to FCU and inserts built payload - /// through newPayload. + /// Generates payload attributes for a new block, waits for InsertExecutedBlock to be processed, + /// then calls newPayload. async fn advance(&mut self) -> eyre::Result<()> { let timestamp = std::cmp::max( self.last_timestamp.saturating_add(1), @@ -201,6 +202,11 @@ where .as_secs(), ); + // Subscribe to payload events BEFORE building the payload to ensure we don't miss it + let payload_events = self.payload_builder.subscribe().await + .map_err(|e| eyre::eyre!("Failed to subscribe to payload events: {:?}", e))?; + let mut built_stream = payload_events.into_built_payload_stream(); + let res = self .to_engine .fork_choice_updated( @@ -223,16 +229,62 @@ where }; let block = payload.block(); + let block_hash = block.hash(); + + // Wait for the built_payloads stream to process this payload + // The payload builder emits payloads to the stream, which sends InsertExecutedBlock + // We wait for our specific payload to appear in the stream + debug!(target: "engine::local", block_hash=?block_hash, "Waiting for InsertExecutedBlock to be processed"); + + let mut found = false; + let timeout = tokio::time::Duration::from_millis(1000); + let start = tokio::time::Instant::now(); + + while !found && start.elapsed() < timeout { + tokio::select! { + result = built_stream.next() => { + match result { + Some(p) => { + if let Some(executed_block) = p.executed_block() { + if executed_block.recovered_block().hash() == block_hash { + debug!(target: "engine::local", block_hash=?block_hash, "Found payload in built_payloads stream, InsertExecutedBlock should be processed"); + found = true; + // Give a small additional delay to ensure InsertExecutedBlock is fully processed + tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; + break; + } + } + } + None => { + debug!(target: "engine::local", "Payload event stream ended"); + break; + } + } + } + _ = tokio::time::sleep(tokio::time::Duration::from_millis(10)) => { + // Continue waiting, but check timeout + if start.elapsed() >= timeout { + debug!(target: "engine::local", block_hash=?block_hash, "Timeout waiting for payload in built_payloads stream"); + break; + } + } + } + } + + if !found { + debug!(target: "engine::local", block_hash=?block_hash, "Did not find payload in built_payloads stream, proceeding anyway"); + } let payload = T::block_to_payload(payload.block().clone()); + debug!(target: "engine::local", block_hash=?block_hash, "start new_payload"); let res = self.to_engine.new_payload(payload).await?; - + debug!(target: "engine::local", block_hash=?block_hash, "end new_payload"); if !res.is_valid() { eyre::bail!("Invalid payload") } self.last_timestamp = timestamp; - self.last_block_hashes.push_back(block.hash()); + self.last_block_hashes.push_back(block_hash); // ensure we keep at most 64 blocks if self.last_block_hashes.len() > 64 { self.last_block_hashes.pop_front(); diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index 34deaf3e10c..b2dd3563210 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -7,6 +7,9 @@ use reth_ethereum_engine_primitives::EthPayloadAttributes; use reth_payload_primitives::PayloadAttributesBuilder; use std::sync::Arc; +#[cfg(feature = "op")] +use reth_optimism_forks::OpHardforks; + /// The attributes builder for local Ethereum payload. #[derive(Debug)] #[non_exhaustive] @@ -48,9 +51,26 @@ where impl PayloadAttributesBuilder for LocalPayloadAttributesBuilder where - ChainSpec: Send + Sync + EthereumHardforks + 'static, + ChainSpec: Send + Sync + EthereumHardforks + OpHardforks + 'static, { fn build(&self, timestamp: u64) -> op_alloy_rpc_types_engine::OpPayloadAttributes { + use alloy_primitives::B64; + + let eip_1559_params = if self.chain_spec.is_holocene_active_at_timestamp(timestamp) || + self.chain_spec.is_jovian_active_at_timestamp(timestamp) + { + Some(B64::ZERO) + } else { + None + }; + + let min_base_fee = if self.chain_spec.is_jovian_active_at_timestamp(timestamp) { + + Some(1_000_000_000u64) + } else { + None + }; + op_alloy_rpc_types_engine::OpPayloadAttributes { payload_attributes: self.build(timestamp), // Add dummy system transaction @@ -59,9 +79,9 @@ where .into(), ]), no_tx_pool: None, - gas_limit: None, - eip_1559_params: None, - min_base_fee: None, + gas_limit: Some(30_000_000), + eip_1559_params, + min_base_fee, } } } diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 34fa87b0f47..b8ca96e16f5 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -16,6 +16,7 @@ reth-chain-state.workspace = true reth-chainspec = { workspace = true, optional = true } reth-consensus.workspace = true reth-db.workspace = true +reth-node-core.workspace = true reth-engine-primitives.workspace = true reth-errors.workspace = true reth-execution-types.workspace = true @@ -66,6 +67,8 @@ tracing.workspace = true derive_more.workspace = true parking_lot.workspace = true crossbeam-channel.workspace = true +triedb.workspace=true +reth-storage-api.workspace = true # optional deps for test-utils reth-prune-types = { workspace = true, optional = true } @@ -94,9 +97,12 @@ reth-testing-utils.workspace = true reth-tracing.workspace = true reth-node-ethereum.workspace = true reth-e2e-test-utils.workspace = true - +alloy-signer-local.workspace = true +reth-storage-api.workspace = true +reth-trie-common.workspace = true # alloy revm-state.workspace = true +alloy-signer.workspace = true assert_matches.workspace = true criterion.workspace = true diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index fd9999b9eba..907f677bbc3 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -9,6 +9,7 @@ use reth_provider::{ AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; +use reth_storage_api::PlainPostState; use reth_revm::db::BundleState; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, @@ -201,6 +202,13 @@ impl StateRootProvider for CachedStateProvider { ) -> ProviderResult<(B256, TrieUpdates)> { self.state_provider.state_root_from_nodes_with_updates(input) } + + fn state_root_with_updates_triedb( + &self, + plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + self.state_provider.state_root_with_updates_triedb(plain_state) + } } impl StateProofProvider for CachedStateProvider { diff --git a/crates/engine/tree/src/tree/instrumented_state.rs b/crates/engine/tree/src/tree/instrumented_state.rs index 9d96aca3a2e..3cf80202ab0 100644 --- a/crates/engine/tree/src/tree/instrumented_state.rs +++ b/crates/engine/tree/src/tree/instrumented_state.rs @@ -8,6 +8,7 @@ use reth_provider::{ AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; +use reth_storage_api::PlainPostState; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, @@ -224,6 +225,13 @@ impl StateRootProvider for InstrumentedStateProvider { ) -> ProviderResult<(B256, TrieUpdates)> { self.state_provider.state_root_from_nodes_with_updates(input) } + + fn state_root_with_updates_triedb( + &self, + plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + self.state_provider.state_root_with_updates_triedb(plain_state) + } } impl StateProofProvider for InstrumentedStateProvider { diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index a3046262ca0..d541f86adf9 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1298,7 +1298,7 @@ where debug!(target: "engine::tree", count=blocks_to_persist.len(), blocks = ?blocks_to_persist.iter().map(|block| block.recovered_block().num_hash()).collect::>(), "Persisting blocks"); let (tx, rx) = oneshot::channel(); let _ = self.persistence.save_blocks(blocks_to_persist, tx); - + info!("start save blocks"); self.persistence_state.start_save(highest_num_hash, rx); } diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index 7fbae4cac5c..b853e26adc1 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -19,14 +19,14 @@ use alloy_rpc_types_engine::{ }; use assert_matches::assert_matches; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; -use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; +use reth_chainspec::{ChainSpec, DEV, HOLESKY, MAINNET}; use reth_engine_primitives::{EngineApiValidator, ForkchoiceStatus, NoopInvalidBlockHook}; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_ethereum_primitives::{Block, EthPrimitives}; +use reth_ethereum_primitives::{Block, BlockBody, EthPrimitives}; use reth_evm_ethereum::MockEvmConfig; use reth_primitives_traits::Block as _; -use reth_provider::{test_utils::MockEthProvider, ExecutionOutcome}; +use reth_provider::{test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, ProviderFactory, providers::BlockchainProvider, LatestStateProviderRef}; use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::{ collections::BTreeMap, @@ -37,6 +37,16 @@ use std::{ }, }; use tokio::sync::oneshot; +use reth_chain_state::ExecutedBlock; +use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; +use revm::database::BundleState; +use alloy_eips::eip7685::Requests; +use alloy_eips::eip1559::INITIAL_BASE_FEE; // Import from alloy_eips instead +use alloy_signer::SignerSync; +use reth_execution_types::ExecutionOutcome; +// Add this import +use reth_db_common::init::init_genesis; +use reth_provider::test_utils::MockEthProvider; /// Mock engine validator for tests #[derive(Debug, Clone)] @@ -1991,3 +2001,360 @@ mod forkchoice_updated_tests { assert!(result.is_some(), "OpStack should handle canonical head"); } } + +#[test] +fn test_fcu_with_real_provider() { + use reth_chainspec::{EthereumHardfork}; + use reth_chainspec::EthChainSpec; + use reth_payload_primitives::EngineApiMessageVersion; + use reth_node_ethereum::EthEvmConfig; + use alloy_rpc_types_engine::PayloadAttributes; + use alloy_primitives::Address; + + reth_tracing::init_test_tracing(); + + let mut chain_spec = Arc::try_unwrap(DEV.clone()) + .unwrap_or_else(|arc| (*arc).clone()); + chain_spec.hardforks.remove(&EthereumHardfork::Cancun); + chain_spec.hardforks.remove(&EthereumHardfork::Shanghai); + chain_spec.hardforks.remove(&EthereumHardfork::Prague); + + let chain_spec = Arc::new(chain_spec); + let genesis_hash = chain_spec.genesis_hash(); + + let provider_factory = create_test_provider_factory_with_chain_spec(Arc::clone(&chain_spec)); + + init_genesis(&provider_factory).expect("Failed to initialize genesis"); + + let provider = BlockchainProvider::new(provider_factory.clone()) + .expect("Failed to create BlockchainProvider"); + + let consensus = Arc::new(EthBeaconConsensus::new(Arc::clone(&chain_spec))); + let payload_validator = MockEngineValidator; + + let (from_tree_tx, _from_tree_rx) = unbounded_channel(); + + let genesis_header = chain_spec.genesis_header().clone(); + let sealed_genesis_header = SealedHeader::seal_slow(genesis_header); + let engine_api_tree_state = + EngineApiTreeState::new(10, 10, sealed_genesis_header.num_hash(), EngineApiKind::Ethereum); + let canonical_in_memory_state = CanonicalInMemoryState::with_head( + sealed_genesis_header.clone(), + None, + None, + ); + + let (action_tx, _action_rx) = channel(); + let persistence_handle = PersistenceHandle::new(action_tx); + + let (to_payload_service, _payload_command_rx) = unbounded_channel(); + let payload_builder = PayloadBuilderHandle::new(to_payload_service); + payload_builder.spawn_payload_builder_service(); + let evm_config = EthEvmConfig::new(chain_spec.clone()); + + let engine_validator = BasicEngineValidator::new( + provider.clone(), + consensus.clone(), + evm_config.clone(), + payload_validator, + TreeConfig::default(), + Box::new(NoopInvalidBlockHook::default()), + ); + + let mut tree = EngineApiTreeHandler::new( + provider.clone(), + consensus, + engine_validator, + from_tree_tx, + engine_api_tree_state, + canonical_in_memory_state, + persistence_handle, + PersistenceState::default(), + payload_builder, + TreeConfig::default() + .with_legacy_state_root(false) + .with_has_enough_parallelism(true), + EngineApiKind::Ethereum, + evm_config, + ); + + let fcu_state = ForkchoiceState { + head_block_hash: genesis_hash, + safe_block_hash: genesis_hash, + finalized_block_hash: genesis_hash, + }; + + let genesis_timestamp = chain_spec.genesis_header().timestamp; + let payload_attrs = Some(PayloadAttributes { + timestamp: genesis_timestamp + 12, // 12 seconds after genesis + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: None, + parent_beacon_block_root: None, + }); + + let mut outcome = tree + .on_forkchoice_updated(fcu_state, payload_attrs, EngineApiMessageVersion::default()) + .expect("Failed to process forkchoice update"); + + println!("outcome: {outcome:?}"); + + // let rt = tokio::runtime::Builder::new_current_thread() + // .enable_all() + // .build() + // .unwrap(); + // rt.block_on(tokio::time::sleep(tokio::time::Duration::from_secs(86400))); + std::thread::sleep(std::time::Duration::from_secs(86400)); + + // let fcu_result = outcome.outcome.await.expect("Failed to await forkchoice result"); + // assert!( + // fcu_result.payload_status.is_valid() || fcu_result.payload_status.is_syncing(), + // "Forkchoice update should be valid or syncing, got: {:?}", + // fcu_result.payload_status + // ); +} + +#[test] +fn test_state_root_calculation_with_real_provider() { + reth_tracing::init_test_tracing(); + use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork}; + use reth_chainspec::EthChainSpec; + let mut chain_spec = Arc::try_unwrap(DEV.clone()) + .unwrap_or_else(|arc| (*arc).clone()); + chain_spec.hardforks.remove(&EthereumHardfork::Cancun); + chain_spec.hardforks.remove(&EthereumHardfork::Shanghai); + chain_spec.hardforks.remove(&EthereumHardfork::Prague); + + let chain_spec = Arc::new(chain_spec); + + // let chain_spec = Arc::new(chain_spec); + let genesis_hash = chain_spec.genesis_hash(); + + // Create a real provider factory with database + let provider_factory = create_test_provider_factory_with_chain_spec(Arc::clone(&chain_spec)); + + // Initialize genesis in the database + init_genesis(&provider_factory).expect("Failed to initialize genesis"); + + // Create BlockchainProvider from the factory + let provider = BlockchainProvider::new(provider_factory.clone()) + .expect("Failed to create BlockchainProvider"); + let genesis_block = provider.block(alloy_eips::HashOrNumber::Number(0)) + .expect("Failed to query genesis block"); + + assert!(genesis_block.is_some(), "Genesis block should exist"); + let genesis = genesis_block.unwrap(); + + // Seal the block to get its hash + let sealed_genesis = genesis.seal_slow(); + let block_hash = sealed_genesis.hash(); + + // Assert that the genesis block hash matches the expected genesis hash + assert_eq!( + block_hash, + genesis_hash, + "Genesis block hash should match chain spec genesis hash" + ); + + let consensus = Arc::new(EthBeaconConsensus::new(Arc::clone(&chain_spec))); + + let payload_validator = MockEngineValidator; + + let (from_tree_tx, from_tree_rx) = unbounded_channel(); + + let genesis_header = chain_spec.genesis_header().clone(); + let sealed_genesis_header = SealedHeader::seal_slow(genesis_header); + let engine_api_tree_state = + EngineApiTreeState::new(10, 10, sealed_genesis_header.num_hash(), EngineApiKind::Ethereum); + let canonical_in_memory_state = CanonicalInMemoryState::with_head( + sealed_genesis_header.clone(), + None, + None, + ); + + // Set up persistence + let (action_tx, _action_rx) = channel(); + let persistence_handle = PersistenceHandle::new(action_tx); + + // Set up payload builder + let (to_payload_service, _payload_command_rx) = unbounded_channel(); + let payload_builder = PayloadBuilderHandle::new(to_payload_service); + + // Use real EVM config (not mock) for actual execution + use reth_node_ethereum::EthEvmConfig; + let evm_config = EthEvmConfig::new(chain_spec.clone()); + + // Create engine validator + let engine_validator = BasicEngineValidator::new( + provider.clone(), + consensus.clone(), + evm_config.clone(), + payload_validator, + TreeConfig::default(), + Box::new(NoopInvalidBlockHook::default()), + ); + + // Create tree handler + let mut tree = EngineApiTreeHandler::new( + provider.clone(), + consensus, + engine_validator, + from_tree_tx, + engine_api_tree_state, + canonical_in_memory_state, + persistence_handle, + PersistenceState::default(), + payload_builder, + TreeConfig::default() + .with_legacy_state_root(false) + .with_has_enough_parallelism(true), + EngineApiKind::Ethereum, + evm_config, + ); + + use reth_node_core::args::DevArgs; + use alloy_signer_local::{coins_bip39::English, MnemonicBuilder, PrivateKeySigner}; + use reth_ethereum_primitives::{Transaction, TransactionSigned, Block, BlockBody}; + use alloy_consensus::{SignableTransaction, TxEip1559}; + use reth_chainspec::MIN_TRANSACTION_GAS; + use alloy_primitives::{Address, U256}; + use alloy_consensus::proofs::calculate_transaction_root; + + let dev_mnemonic = DevArgs::default().dev_mnemonic; + let sender_pk: PrivateKeySigner = MnemonicBuilder::::default() + .phrase(dev_mnemonic) + .index(0) + .expect("invalid derivation path") + .build() + .expect("failed to build signer from mnemonic"); + let sender_address = sender_pk.address(); + + let tx = Transaction::Eip1559(TxEip1559 { + chain_id: chain_spec.chain.id(), + nonce: 0, + gas_limit: MIN_TRANSACTION_GAS, + to: Address::random().into(), + max_fee_per_gas: INITIAL_BASE_FEE as u128, + max_priority_fee_per_gas: 1, + value: U256::from(10), + input: Default::default(), + access_list: Default::default(), + }); + + // Sign the transaction + let signature_hash = tx.signature_hash(); + let signature = sender_pk.sign_hash_sync(&signature_hash).unwrap(); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); + + // Create block 1 header + let mut block1_header = chain_spec.genesis_header().clone(); + block1_header.number = 1; + block1_header.parent_hash = genesis_hash; + block1_header.timestamp = block1_header.timestamp + 12; // 12 seconds later + block1_header.gas_limit = chain_spec.genesis_header().gas_limit; + + let genesis_header = chain_spec.genesis_header(); + let base_fee = chain_spec + .next_block_base_fee(genesis_header, block1_header.timestamp) + .expect("Failed to calculate base fee"); + block1_header.base_fee_per_gas = Some(base_fee); + + // Calculate transactions root + let transactions = vec![signed_tx]; + block1_header.transactions_root = calculate_transaction_root(&transactions); + + use reth_primitives_traits::proofs::calculate_receipt_root; + + // Create a temporary block to execute and get receipts + let temp_block = SealedBlock::::from_sealed_parts( + SealedHeader::seal_slow(block1_header.clone()), + BlockBody { + transactions: transactions.clone(), + ommers: Vec::new(), + withdrawals: None, + }, + ); + + // Recover senders for the block + let recovered_temp_block = temp_block.try_recover() + .expect("Failed to recover block"); + + + let db_provider = provider_factory.provider() + .expect("Failed to get database provider"); + let state_db = StateProviderDatabase::new(LatestStateProviderRef::new(&db_provider)); + let evm_config = EthEvmConfig::ethereum(chain_spec.clone()); + + use reth_evm::execute::Executor; + let execution_output = evm_config + .batch_executor(state_db) + .execute(&recovered_temp_block) + .expect("Failed to execute block"); + + // Calculate receipts root + use alloy_consensus::TxReceipt; + let receipts_with_bloom: Vec<_> = execution_output.result.receipts + .iter() + .map(|r| r.with_bloom_ref()) + .collect(); + let receipts_root = calculate_receipt_root(&receipts_with_bloom); + + block1_header.receipts_root =receipts_root; + block1_header.gas_used = execution_output.gas_used; + + + use reth_storage_api::StateRootProvider; + // Get hashed post state from bundle state + let hashed_state = HashedPostState::from_bundle_state::( + execution_output.state.state() + ); + + // Calculate state root using the same state provider + let state_provider_for_root = LatestStateProviderRef::new(&db_provider); + let (state_root, _trie_updates) = state_provider_for_root + .state_root_with_updates(hashed_state) + .expect("Failed to calculate state root"); + block1_header.state_root = state_root; + + // Seal the header + let sealed_block1_header = SealedHeader::seal_slow(block1_header); + let block1_hash = sealed_block1_header.hash(); + + // Create block 1 as a SealedBlock + use reth_primitives_traits::SealedBlock; + let sealed_block1 = SealedBlock::::from_sealed_parts( + sealed_block1_header, + BlockBody { + transactions, + ommers: Vec::new(), + withdrawals: None, + }, + ); + + // Create execution payload + let block1 = sealed_block1.into_block(); + let payload1 = ExecutionPayloadV1::from_block_unchecked(block1_hash, &block1); + + // Send newPayload for block 1 + let outcome = tree + .on_new_payload(ExecutionData { + payload: payload1.into(), + sidecar: ExecutionPayloadSidecar::none(), + }) + .expect("Failed to process new payload"); + + // Verify the outcome + assert!( + outcome.outcome.is_valid() || outcome.outcome.is_syncing(), + "Block 1 should be valid or syncing, got: {:?}", + outcome.outcome.status + ); + // + // // Verify state root was calculated by checking if the block was inserted + // // The state root calculation happens during block execution/validation + // // If the block is valid, it means state root was calculated correctly + // if outcome.outcome.is_valid() { + // // Block was successfully validated, which means state root calculation succeeded + // println!("Block 1 validated successfully - state root calculation completed"); + // } +} diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 575934007f9..f3b20bc13ae 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -69,6 +69,7 @@ alloy-sol-types.workspace = true alloy-contract.workspace = true alloy-rpc-types-beacon = { workspace = true, features = ["ssz"] } alloy-consensus.workspace = true +alloy-eips.workspace = true futures.workspace = true tokio.workspace = true diff --git a/crates/ethereum/node/tests/e2e/engine.rs b/crates/ethereum/node/tests/e2e/engine.rs new file mode 100644 index 00000000000..5182e52e14e --- /dev/null +++ b/crates/ethereum/node/tests/e2e/engine.rs @@ -0,0 +1,191 @@ +use crate::utils::eth_payload_attributes; +use alloy_genesis::Genesis; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_e2e_test_utils::{ + node::NodeTestContext, setup, transaction::TransactionTestContext, wallet::Wallet, +}; +use reth_node_builder::{NodeBuilder, NodeHandle}; +use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; +use reth_node_ethereum::EthereumNode; +use reth_tasks::TaskManager; +use std::sync::Arc; +use reth_provider::BlockReaderIdExt; +use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; + +#[tokio::test] +async fn can_call_fcu_with_attributes_to_execute_next_block() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_sepc = ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(); + let (mut nodes, _tasks, _wallet) = setup::( + 1, + Arc::new(chain_sepc.clone()), + false, + eth_payload_attributes, + ) + .await?; + + let mut node = nodes.pop().unwrap(); + + let genesis_hash = node.block_hash(0); + + let provider = &node.inner.provider; + let current_head = provider + .sealed_header_by_number_or_tag(BlockNumberOrTag::Latest) + .unwrap() + .unwrap(); + let current_head_hash = current_head.hash(); + assert_eq!(current_head_hash,chain_sepc.genesis_hash()); + + // let current_head_number = current_head.number(); + let current_timestamp = current_head.timestamp; + // + // Create payload attributes for the next block + use alloy_rpc_types_engine::PayloadAttributes; + use alloy_primitives::{Address, B256}; + use reth_payload_primitives::EngineApiMessageVersion; + use reth_ethereum_engine_primitives::EthPayloadBuilderAttributes; + + let wallet = Wallet::default(); + let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallet.inner).await; + let _tx_hash = node.rpc.inject_tx(raw_tx).await?; + + let payload_attrs = PayloadAttributes { + timestamp: current_timestamp + 12, // 12 seconds after current block + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + // + // Call FCU with payload attributes + use alloy_rpc_types_engine::ForkchoiceState; + let fcu_state = ForkchoiceState { + head_block_hash: current_head_hash, + safe_block_hash: current_head_hash, + finalized_block_hash: current_head_hash, + }; + + let fcu_result = node + .inner + .add_ons_handle + .beacon_engine_handle + .fork_choice_updated( + fcu_state, + Some(payload_attrs.into()), + EngineApiMessageVersion::default(), + ) + .await?; + println!("fcu_result: {fcu_result:?}"); + + let payload_id = fcu_result + .payload_id + .expect("FCU with attributes should return a payload ID"); + + // Wait a bit for payload to be built + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + // Get the built payload + use reth_rpc_api::clients::EngineApiClient; + // use reth_ethereum_engine_primitives::EthEngineTypes; + + let engine_client = node.inner.add_ons_handle.beacon_engine_handle.clone(); + // engine_client. + // engine_client.new_payload().await; + let payload_builder_handle = node.inner.payload_builder_handle.clone(); + + // Wait a bit for payload to be built + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + // Get the best payload by payload_id + let built_payload = payload_builder_handle + .best_payload(payload_id) + .await + .transpose() + .ok() + .flatten() + .expect("Payload should be built"); + + // Convert the built payload to ExecutionData using the helper method + use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_node_api::PayloadTypes; + + let execution_data = EthEngineTypes::::block_to_payload( + built_payload.block().clone() + ); + + let new_payload_result = engine_client.new_payload(execution_data).await?; + println!("new_payload_result: {new_payload_result:?}"); + + + // // Verify FCU was successful and got a payload ID + // assert!( + // fcu_result.payload_status.is_valid(), + // "FCU should return valid status, got: {:?}", + // fcu_result.payload_status.status + // ); + // + // let payload_id = fcu_result + // .payload_id + // .expect("FCU with attributes should return a payload ID"); + // + // // Wait a bit for payload to be built + // tokio::time::sleep(std::time::Duration::from_millis(500)).await; + // + // // Get the built payload + // use reth_rpc_api::clients::EngineApiClient; + // use reth_ethereum_engine_primitives::EthEngineTypes; + // + // let engine_client = node.inner.add_ons_handle.beacon_engine_handle.clone(); + // let payload_envelope = EngineApiClient::::get_payload_v3( + // &engine_client, + // payload_id, + // ) + // .await?; + // + // // Verify the payload + // let built_block = payload_envelope.block(); + // assert_eq!( + // built_block.header.parent_hash, + // current_head_hash, + // "Built block should have correct parent hash" + // ); + // assert_eq!( + // built_block.header.number, + // current_head_number + 1, + // "Built block should be next block number" + // ); + // assert_eq!( + // built_block.header.timestamp, + // payload_attrs.timestamp, + // "Built block should have correct timestamp" + // ); + // + // // Submit the payload + // let new_block_hash = node.submit_payload(payload_envelope.payload().clone()).await?; + // + // // Update forkchoice to make the new block canonical + // node.update_forkchoice(current_head_hash, new_block_hash).await?; + // + // // Verify the new block is now the head + // let new_head = provider + // .sealed_header_by_number_or_tag(alloy_eips::eip2718::BlockNumberOrTag::Latest) + // .unwrap() + // .unwrap(); + // assert_eq!( + // new_head.hash(), + // new_block_hash, + // "New block should be the canonical head" + // ); + // assert_eq!( + // new_head.number(), + // current_head_number + 1, + // "New head should be next block number" + // ); + + Ok(()) +} \ No newline at end of file diff --git a/crates/ethereum/node/tests/e2e/main.rs b/crates/ethereum/node/tests/e2e/main.rs index 0ebee83cd55..238addd3bd5 100644 --- a/crates/ethereum/node/tests/e2e/main.rs +++ b/crates/ethereum/node/tests/e2e/main.rs @@ -7,5 +7,6 @@ mod p2p; mod pool; mod rpc; mod utils; +mod engine; const fn main() {} diff --git a/crates/evm/evm/Cargo.toml b/crates/evm/evm/Cargo.toml index 4bc8ef06dbb..32bd742c2a9 100644 --- a/crates/evm/evm/Cargo.toml +++ b/crates/evm/evm/Cargo.toml @@ -19,7 +19,6 @@ reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-storage-errors.workspace = true reth-trie-common.workspace = true - revm.workspace = true # alloy @@ -32,6 +31,7 @@ auto_impl.workspace = true derive_more.workspace = true futures-util.workspace = true metrics = { workspace = true, optional = true } +tracing.workspace = true [dev-dependencies] reth-ethereum-primitives.workspace = true diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index fca8f6241d5..7b79df5c029 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -18,13 +18,17 @@ pub use reth_execution_types::{BlockExecutionOutput, ExecutionOutcome}; use reth_primitives_traits::{ Block, HeaderTy, NodePrimitives, ReceiptTy, Recovered, RecoveredBlock, SealedHeader, TxTy, }; -use reth_storage_api::StateProvider; +use reth_storage_api::{PlainPostState, StateProvider}; pub use reth_storage_errors::provider::ProviderError; use reth_trie_common::{updates::TrieUpdates, HashedPostState}; +use std::collections::HashMap; +use std::time::Instant; +use alloy_primitives::U256; use revm::{ context::result::ExecutionResult, database::{states::bundle_state::BundleRetention, BundleState, State}, }; +use tracing::info; /// A type that knows how to execute a block. It is assumed to operate on a /// [`crate::Evm`] internally and use [`State`] as database. @@ -514,10 +518,68 @@ where db.merge_transitions(BundleRetention::Reverts); // calculate the state root - let hashed_state = state.hashed_post_state(&db.bundle_state); - let (state_root, trie_updates) = state - .state_root_with_updates(hashed_state.clone()) - .map_err(BlockExecutionError::other)?; + // let start = Instant::now(); + // let hashed_state = state.hashed_post_state(&db.bundle_state); + // info!("hashed_post_state, elapsed: {:?}", start.elapsed().as_millis()); + + // // Calculate state root using the previous method (mdbx) + // let (mdbx_state_root, mdbx_trie_updates) = state + // .state_root_with_updates(hashed_state.clone()) + // .map_err(BlockExecutionError::other)?; + + // Convert BundleState to PlainPostState for triedb computation + let start = Instant::now(); + tracing::info!("BasicBlockBuilder::finish, plain_state total_accts: {:?}", db.bundle_state.state().len()); + + let mut total_storage = 0; + let mut plain_state = PlainPostState::default(); + for (address, bundle_account) in db.bundle_state.state() { + let account = if bundle_account.was_destroyed() || bundle_account.info.is_none() { + None + } else { + bundle_account.info.as_ref().map(|info| reth_primitives_traits::Account::from(info)) + }; + plain_state.accounts.insert(*address, account); + + let mut storage_map = HashMap::new(); + for (slot, storage_slot) in &bundle_account.storage { + // Convert U256 slot to B256 (32-byte representation) + let slot_b256 = B256::from_slice(&slot.to_be_bytes::<32>()); + storage_map.insert(slot_b256, storage_slot.present_value); + } + if !storage_map.is_empty() { + plain_state.storages.insert(*address, storage_map); + total_storage += bundle_account.storage.len(); + } + + } + tracing::info!("BasicBlockBuilder::finish, plain_state total_storage: {:?}", total_storage); + info!("BasicBlockBuilder::finish, convert elapsed: {:?}", start.elapsed().as_millis()); + + // Calculate state root using triedb method + let start = Instant::now(); + let pr = state.state_root_with_updates_triedb(plain_state); + let (triedb_state_root, triedb_trie_updates) = + pr.map_err(BlockExecutionError::other)?; + info!("state_root_with_updates_triedb, elapsed: {:?}", start.elapsed().as_millis()); + + // // Compare the two state roots + // if mdbx_state_root != triedb_state_root { + // tracing::debug!( + // "reth::evm - State root mismatch! MDBX: {:?}, TrieDB: {:?}", + // mdbx_state_root, + // triedb_state_root + // ); + // } else { + // tracing::debug!( + // "reth::evm - State root match: {:?}", + // triedb_state_root + // ); + // } + + // Use triedb state root (or you can choose to use mdbx_state_root) + let state_root = triedb_state_root; + let trie_updates = triedb_trie_updates; let (transactions, senders) = self.transactions.into_iter().map(|tx| tx.into_parts()).unzip(); @@ -535,7 +597,7 @@ where let block = RecoveredBlock::new_unhashed(block, senders); - Ok(BlockBuilderOutcome { execution_result: result, hashed_state, trie_updates, block }) + Ok(BlockBuilderOutcome { execution_result: result, hashed_state: HashedPostState::default(), trie_updates, block }) } fn executor_mut(&mut self) -> &mut Self::Executor { diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index b865d742736..6200b3c9932 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -89,6 +89,7 @@ use tokio::sync::{ use futures::{future::Either, stream, Stream, StreamExt}; use reth_node_ethstats::EthStatsService; use reth_node_events::{cl::ConsensusLayerHealthEvents, node::NodeEvent}; +use reth_provider::providers::triedb::TriedbProvider; /// Reusable setup for launching a node. /// @@ -468,6 +469,7 @@ where self.right().clone(), self.chain_spec(), StaticFileProvider::read_write(self.data_dir().static_files())?, + Arc::new(TriedbProvider::new(self.data_dir().triedb())) ) .with_prune_modes(self.prune_modes()) .with_static_files_metrics() diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index 6f1d3bfc711..a6d4a34087f 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -62,7 +62,8 @@ impl DatabaseArgs { Some(0) => Some(MaxReadTransactionDuration::Unbounded), // if 0, disable timeout Some(secs) => Some(MaxReadTransactionDuration::Set(Duration::from_secs(secs))), }; - + tracing::info!("mdbx config, exclusive {:?}, max_read_transaction_duration {:?}, geometry_max_size {:?}, growth_step {:?}, max_readers {:?}, sync_mode {:?}", + self.exclusive, max_read_transaction_duration, self.max_size, self.growth_step, self.max_readers, self.sync_mode); reth_db::mdbx::DatabaseArguments::new(client_version) .with_log_level(self.log_level) .with_exclusive(self.exclusive) diff --git a/crates/node/core/src/args/datadir_args.rs b/crates/node/core/src/args/datadir_args.rs index cb0590f1779..d8e392281d1 100644 --- a/crates/node/core/src/args/datadir_args.rs +++ b/crates/node/core/src/args/datadir_args.rs @@ -27,6 +27,15 @@ pub struct DatadirArgs { verbatim_doc_comment )] pub static_files_path: Option, + + /// The absolute path to triedb path. + #[arg( + long = "datadir.triedb", + alias = "datadir.triedb", + value_name = "PATH", + verbatim_doc_comment + )] + pub triedb_path: Option, } impl DatadirArgs { diff --git a/crates/node/core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs index ca7befc0f08..1ef07735a18 100644 --- a/crates/node/core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -41,7 +41,7 @@ impl Default for PayloadBuilderArgs { fn default() -> Self { Self { extra_data: default_extra_data(), - interval: Duration::from_secs(1), + interval: Duration::from_secs(1000), gas_limit: None, deadline: SLOT_DURATION, max_payload_tasks: 3, diff --git a/crates/node/core/src/dirs.rs b/crates/node/core/src/dirs.rs index 4f8507c4e68..0c30c227438 100644 --- a/crates/node/core/src/dirs.rs +++ b/crates/node/core/src/dirs.rs @@ -301,6 +301,18 @@ impl ChainPath { } } + /// Returns the path to the `TrieDB` database directory for this chain. + /// + /// `//triedb` + pub fn triedb(&self) -> PathBuf { + let datadir_args = &self.2; + if let Some(triedb_path) = &datadir_args.triedb_path { + triedb_path.clone() + } else { + self.data_dir().join("triedb") + } + } + /// Returns the path to the reth p2p secret key for this chain. /// /// `//discovery-secret` diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 0708cc57d51..e6b498e7bd7 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -20,14 +20,30 @@ reth-optimism-payload-builder.workspace = true reth-optimism-primitives.workspace = true reth-optimism-forks.workspace = true reth-apollo.workspace = true - +reth-chainspec.workspace = true +alloy-genesis.workspace = true +reth-provider.workspace = true +reth-storage-api.workspace = true +reth-trie-common.workspace = true +reth-db.workspace = true +reth-db-common.workspace = true +reth-node-types.workspace = true +reth-fs-util.workspace = true +reth-primitives-traits.workspace = true +alloy-primitives.workspace = true +alloy-trie.workspace = true +tracing-subscriber.workspace = true clap = { workspace = true, features = ["derive", "env"] } tracing.workspace = true - +uuid = { version = "1", features = ["v4", "fast-rng"] } # xlayer xlayer-rpc.workspace = true xlayer-db.workspace = true - +tempdir.workspace = true +triedb.workspace = true +eyre.workspace = true +rand.workspace = true +serde_json.workspace = true [lints] workspace = true @@ -60,3 +76,11 @@ min-trace-logs = ["tracing/release_max_level_trace"] [[bin]] name = "op-reth" path = "src/main.rs" + +[[bin]] +name = "state_root_overlay" +path = "src/state_root_overlay.rs" + +[[bin]] +name = "merge_genesis" +path = "src/merge_genesis.rs" diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index 2119b5894fc..9d79bf125eb 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -10,6 +10,9 @@ use std::{path::Path, sync::Arc}; use tracing::error; use xlayer_db::utils::{initialize, set_enable_inner_tx}; use xlayer_rpc::utils::{XlayerExt, XlayerExtApiServer}; +use tracing_subscriber::fmt::format::FmtSpan; +use tracing_subscriber::{fmt, prelude::*, Registry}; +use uuid::Uuid; #[global_allocator] static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); @@ -26,7 +29,7 @@ fn main() { if let Err(err) = Cli::::parse().run(async move |builder, rollup_args| { - info!(target: "reth::cli", "Launching node"); + info!(target: "reth::cli", "Launching node triedb"); // For X Layer if rollup_args.xlayer_args.apollo.enabled { diff --git a/crates/optimism/bin/src/merge_genesis.rs b/crates/optimism/bin/src/merge_genesis.rs new file mode 100644 index 00000000000..3ce4ac6f39d --- /dev/null +++ b/crates/optimism/bin/src/merge_genesis.rs @@ -0,0 +1,74 @@ +use alloy_genesis::Genesis; +use alloy_primitives::Address; +use std::collections::BTreeMap; +use std::env; +use std::path::PathBuf; +use eyre::Result; + +fn main() -> Result<()> { + // Get the genesis.json file path from command line arguments + let genesis_json_path = env::args() + .nth(1) + .map(PathBuf::from) + .ok_or_else(|| eyre::eyre!("Usage: merge_genesis "))?; + + // Get the genesis_random.json file path from command line arguments + let genesis_random_json_path = env::args() + .nth(2) + .map(PathBuf::from) + .ok_or_else(|| eyre::eyre!("Usage: merge_genesis "))?; + + // Get the output file path from command line arguments + let merged_genesis_json_path = env::args() + .nth(3) + .map(PathBuf::from) + .ok_or_else(|| eyre::eyre!("Usage: merge_genesis "))?; + + // Read the base genesis.json file + let genesis_json_content = std::fs::read_to_string(&genesis_json_path)?; + let mut base_genesis: Genesis = serde_json::from_str(&genesis_json_content)?; + + println!("Loaded base genesis from {}", genesis_json_path.display()); + + // Read the genesis_random.json file + let genesis_random_json_content = std::fs::read_to_string(&genesis_random_json_path)?; + let random_genesis: Genesis = serde_json::from_str(&genesis_random_json_content)?; + + println!("Loaded random genesis from {}", genesis_random_json_path.display()); + + // Get the alloc from random_genesis + let random_alloc = random_genesis.alloc; + + if random_alloc.is_empty() { + println!("Warning: genesis_random.json has no accounts in alloc"); + } + + // Merge alloc: use random_genesis alloc to replace or insert into base_genesis alloc + let mut merged_count = 0; + let mut replaced_count = 0; + + { + let base_alloc = &mut base_genesis.alloc; + + for (address, account) in random_alloc { + if base_alloc.contains_key(&address) { + replaced_count += 1; + } else { + merged_count += 1; + } + base_alloc.insert(address, account); + } + } + + println!("Merged {} new accounts, replaced {} existing accounts", merged_count, replaced_count); + + // Write the merged genesis to output file + let json_string = serde_json::to_string_pretty(&base_genesis)?; + std::fs::write(&merged_genesis_json_path, json_string)?; + + println!("Written merged genesis to {}", merged_genesis_json_path.display()); + println!("Total accounts in merged genesis: {}", base_genesis.alloc.len()); + + Ok(()) +} + diff --git a/crates/optimism/bin/src/state_root_overlay.rs b/crates/optimism/bin/src/state_root_overlay.rs new file mode 100644 index 00000000000..4d381fcb25f --- /dev/null +++ b/crates/optimism/bin/src/state_root_overlay.rs @@ -0,0 +1,334 @@ +use alloy_primitives::{keccak256, Address, B256, U256, StorageKey, StorageValue}; +use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; +use reth_optimism_chainspec::OpChainSpecBuilder; +use reth_provider::{ + DatabaseProviderFactory, HashingWriter, LatestStateProvider, TrieWriter, +}; +use reth_primitives_traits::Account; +use reth_storage_api::{StateRootProvider}; +use reth_trie_common::{HashedPostState, HashedStorage}; +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::path::PathBuf; +use std::sync::{Arc, mpsc}; +use std::thread; +use std::time::Instant; +use alloy_genesis::{Genesis, GenesisAccount}; +use alloy_primitives::map::B256Map; +use tempdir::TempDir; +use triedb::{ + account::Account as TrieDBAccount, + overlay::{OverlayStateMut, OverlayValue}, + path::{AddressPath, StoragePath}, + Database, +}; +use reth_db::{init_db, ClientVersion, DatabaseEnv}; +use reth_db::mdbx::DatabaseArguments; +use reth_db_common::init::compute_state_root; +use reth_node_types::NodeTypesWithDBAdapter; +use reth_optimism_node::OpNode; +use reth_optimism_primitives::OpPrimitives; +use crate::util::{setup_tdb_database}; + +mod util; + +fn main() -> eyre::Result<()> { + println!("Testing overlay state root calculation methods..."); + + // Generate 4 sets of data in parallel using threads + let num_threads = 4; + let (tx, rx) = mpsc::channel(); + + let start_gen = Instant::now(); + + // Spawn 4 threads to generate data in parallel + for thread_id in 0..num_threads { + let tx_clone = tx.clone(); + thread::spawn(move || { + let (base_addresses, base_accounts_map, base_storage_map, _overlay_acct, _overlay_storage) = + util::generate_shared_test_data( + util::DEFAULT_SETUP_DB_EOA_SIZE, + util::DEFAULT_SETUP_DB_CONTRACT_SIZE, + util::DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, + util::BATCH_SIZE, + ); + + tx_clone.send((thread_id, base_addresses, base_accounts_map, base_storage_map)) + .expect("Failed to send data from thread"); + }); + } + + // Drop the original sender so the receiver knows when all threads are done + drop(tx); + + // Collect results from all threads + let mut all_results: Vec<(usize, Vec
, HashMap, HashMap>)> = Vec::new(); + for received in rx { + all_results.push(received); + } + + // Sort by thread_id for consistent ordering + all_results.sort_by_key(|(thread_id, _, _, _)| *thread_id); + + println!("Generated {} sets of data in parallel, elapsed: {:?} ms", num_threads, start_gen.elapsed().as_millis()); + + // Merge all base_accounts_map into one + let mut merged_base_accounts_map: HashMap = HashMap::new(); + for (_, _, accounts_map, _) in &all_results { + for (address, account) in accounts_map { + // If address already exists, we keep the first one (or you can decide on merge strategy) + merged_base_accounts_map.entry(*address).or_insert(*account); + } + } + + // Merge all base_storage_map into one + let mut merged_base_storage_map: HashMap> = HashMap::new(); + for (_, _, _, storage_map) in &all_results { + for (address, storage) in storage_map { + let merged_storage = merged_base_storage_map + .entry(*address) + .or_insert_with(HashMap::new); + // Merge storage entries - if key exists, keep the first value (or you can decide on merge strategy) + for (key, value) in storage { + merged_storage.entry(*key).or_insert(*value); + } + } + } + + // Collect all base addresses (deduplicated) + let mut merged_base_addresses: Vec
= Vec::new(); + let mut seen_addresses = HashSet::new(); + for (_, addresses, _, _) in &all_results { + for address in addresses { + if seen_addresses.insert(*address) { + merged_base_addresses.push(*address); + } + } + } + + println!("Merged {} base addresses, {} accounts, {} addresses with storage", + merged_base_addresses.len(), + merged_base_accounts_map.len(), + merged_base_storage_map.len() + ); + + // Use the merged data for the rest of the code + let base_addresses = merged_base_addresses; + let base_accounts_map = merged_base_accounts_map; + let base_storage_map = merged_base_storage_map; + + // // For overlay, we'll use the first thread's overlay data (or generate new if needed) + // // For now, we'll generate overlay from the first result + // let (_, _, _, overlay_acct, overlay_storage) = if !all_results.is_empty() { + // // Generate overlay data separately since we only need it once + // let (_, _, _, overlay_acct, overlay_storage) = util::generate_shared_test_data( + // util::DEFAULT_SETUP_DB_EOA_SIZE, + // util::DEFAULT_SETUP_DB_CONTRACT_SIZE, + // util::DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, + // util::BATCH_SIZE, + // ); + // (Vec::new(), HashMap::new(), HashMap::new(), overlay_acct, overlay_storage) + // } else { + // (Vec::new(), HashMap::new(), HashMap::new(), HashMap::new(), HashMap::new()) + // }; + + // println!("Generated {} base addresses, {} overlay accounts, overlay storage {}", base_addresses.len(), overlay_acct.len(), overlay_storage.len()); + + // Convert base_accounts_map and base_storage_map to genesis alloc format + let mut genesis_alloc: BTreeMap = BTreeMap::new(); + + for (address, account) in &base_accounts_map { + // Convert storage from HashMap to BTreeMap + let storage = base_storage_map.get(address).map(|storage_map| { + storage_map + .iter() + .filter(|(_, v)| !v.is_zero()) // Only include non-zero storage values + .map(|(k, v)| { + // Convert U256 to B256 for storage value + (*k, B256::from_slice(&v.to_be_bytes::<32>())) + }) + .collect::>() + }); + + let genesis_account = GenesisAccount { + nonce: Some(account.nonce), + balance: account.balance, + code: None, // We only have bytecode_hash, not the actual code + storage: storage.filter(|s| !s.is_empty()), + private_key: None, + }; + + genesis_alloc.insert(*address, genesis_account); + } + + // Create Genesis struct with the alloc + let genesis = Genesis { + alloc: genesis_alloc, + ..Genesis::default() + }; + + // Write to genesis.json file + let genesis_json_path = PathBuf::from("genesis_random_merged.json"); + let json_string = serde_json::to_string_pretty(&genesis)?; + std::fs::write(&genesis_json_path, json_string)?; + println!("Written genesis alloc to {}", genesis_json_path.display()); + + // let dir = TempDir::new("triedb_overlay_base").unwrap(); + // let main_file_name_path = dir.path().join("triedb"); + // let triedb = Database::create_new(&main_file_name_path).unwrap(); + + // setup_tdb_database(&triedb, &base_addresses, &base_accounts_map, &base_storage_map).unwrap(); + + // let mut account_overlay_mut = OverlayStateMut::new(); + + // for (address, account) in &overlay_acct { + // let address_path = AddressPath::for_address(*address); + // let trie_account = TrieDBAccount::new( + // account.nonce, + // account.balance, + // EMPTY_ROOT_HASH, + // account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + // ); + // account_overlay_mut.insert(address_path.clone().into(), Some(OverlayValue::Account(trie_account))); + // } + + // // Add overlay storage + // for (address, storage) in &overlay_storage { + // let address_path = AddressPath::for_address(*address); + // for (storage_key, storage_value) in storage { + // // Convert B256 back to U256 to get the raw storage slot + // let raw_slot = U256::from_be_slice(storage_key.as_slice()); + // let storage_path = StoragePath::for_address_path_and_slot( + // address_path.clone(), + // StorageKey::from(raw_slot), + // ); + + // if storage_value.is_zero() { + // // Zero value means delete the storage slot + // account_overlay_mut.insert( + // storage_path.clone().into(), + // None, // ✅ Delete slot for zero values + // ); + // } else { + // // Non-zero value: insert the storage entry + // account_overlay_mut.insert( + // storage_path.clone().into(), + // Some(OverlayValue::Storage(StorageValue::from_be_slice( + // storage_value.to_be_bytes::<32>().as_slice() + // ))), + // ); + // } + // } + // } + // let account_overlay = account_overlay_mut.freeze(); + + // let start = Instant::now(); + // let tx = triedb.begin_ro()?; + // let triedb_root = tx.compute_root_with_overlay(account_overlay.clone())?; + // println!("triedb_root = {:?}, overlay state root elapsed = {:?} ms", triedb_root.root, start.elapsed().as_millis()); + + // let start = Instant::now(); + // tx.commit()?; + // println!("triedb commit elapsed = {:?} ns", start.elapsed().as_nanos()); + + // // ===== Setup MDBX ===== + // println!("\nSetting up MDBX..."); + // // Create a chain spec with empty genesis allocation but keep base mainnet hardforks + // let empty_chain_spec = Arc::new( + // OpChainSpecBuilder::base_mainnet() + // .genesis(Genesis::default()) // Empty genesis with no alloc + // .build(), + // ); + + + // let datadir = tempdir::TempDir::new("state_root_overlay")?; + // let db_path = datadir.path().join("mdbx"); + // let sf_path = datadir.path().join("static_files"); + // let triedb_path = datadir.path().join("triedb"); + // reth_fs_util::create_dir_all(&db_path)?; + // reth_fs_util::create_dir_all(&sf_path)?; + // reth_fs_util::create_dir_all(&triedb_path)?; + + // let db = Arc::new(init_db( + // &db_path, + // DatabaseArguments::new(ClientVersion::default()), + // )?); + + // use reth_provider::providers::StaticFileProvider; + // let sfp: StaticFileProvider = StaticFileProvider::read_write(sf_path)?; + + // use reth_provider::providers::triedb::TriedbProvider; + // let triedb_provider = Arc::new(TriedbProvider::new(&triedb_path)); + + // use reth_provider::providers::ProviderFactory; + // let provider_factory: ProviderFactory>> = + // ProviderFactory::new( + // db, + // empty_chain_spec.clone(), + // sfp, + // triedb_provider, + // ); + // // Insert base data + // { + // let mut provider_rw = provider_factory.provider_rw()?; + // let accounts: Vec<(Address, Account)> = base_accounts_map.iter().map(|(a, acc)| (*a, *acc)).collect(); + // let storage_entries: Vec<(Address, Vec)> = base_storage_map + // .iter() + // .map(|(address, storage)| { + // let entries: Vec = storage + // .iter() + // .map(|(key, value)| reth_primitives_traits::StorageEntry { + // key: *key, + // value: *value, + // }) + // .collect(); + // (*address, entries) + // }) + // .collect(); + + // let accounts_for_hashing = accounts.iter().map(|(address, account)| (*address, Some(*account))); + // provider_rw.insert_account_for_hashing(accounts_for_hashing)?; + // provider_rw.insert_storage_for_hashing(storage_entries)?; + + // let ret = compute_state_root(provider_rw.as_ref(), None)?; + // provider_rw.commit()?; + + // } + + // // Build HashedPostState from overlay + // let mut hashed_accounts: Vec<(B256, Option)> = overlay_acct + // .iter() + // .map(|(address, account)| { + // let hashed = keccak256(address); + // (hashed, Some(*account)) + // }) + // .collect(); + + // let mut hashed_storages: B256Map = HashMap::default(); + // for (address, storage) in &overlay_storage { + // let hashed_address = keccak256(address); + // let hashed_storage = HashedStorage::from_iter( + // false, + // storage.iter().map(|(key, value)| { + // let hashed_slot = keccak256(*key); + // (hashed_slot, *value) + // }), + // ); + // hashed_storages.insert(hashed_address, hashed_storage); + // } + + // let hashed_state = HashedPostState { + // accounts: hashed_accounts.into_iter().collect(), + // storages: hashed_storages, + // }; + + // let db_provider_ro = provider_factory.database_provider_ro()?; + // let latest_ro = LatestStateProvider::new(db_provider_ro); + + // let start = Instant::now(); + // let (mdbx_root, _updates) = latest_ro.state_root_with_updates(hashed_state)?; + + // println!("MDBX state root: {:?}, overlay state root elapsed {:?} ms", mdbx_root, start.elapsed().as_millis()); + // assert_eq!(mdbx_root, triedb_root.root); + + Ok(()) +} \ No newline at end of file diff --git a/crates/optimism/bin/src/util.rs b/crates/optimism/bin/src/util.rs new file mode 100644 index 00000000000..4feed6254e5 --- /dev/null +++ b/crates/optimism/bin/src/util.rs @@ -0,0 +1,348 @@ +use std::path::{Path, PathBuf}; +use tempdir::TempDir; +use rand::prelude::*; +use rand::RngCore; +use alloy_primitives::{Address, StorageKey, StorageValue, U256, B256}; +use reth_primitives_traits::{Account, StorageEntry}; +use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; +use triedb::{ + account::Account as TrieDBAccount, + path::{AddressPath, StoragePath}, + transaction::TransactionError, + Database, +}; +use std::{ + fs, io, + sync::{Arc, Barrier}, + thread, + time::Duration, +}; +use std::collections::HashMap; + +pub const BATCH_SIZE: usize = 20_000; + +pub fn generate_random_address(rng: &mut StdRng) -> AddressPath { + let mut bytes = [0u8; 20]; + rng.fill_bytes(&mut bytes); + let addr = Address::from_slice(&bytes); + AddressPath::for_address(addr) +} + +pub const DEFAULT_SETUP_DB_EOA_SIZE: usize = 2_000_000; +pub const DEFAULT_SETUP_DB_CONTRACT_SIZE: usize = 500_000; +pub const DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT: usize = 40; +pub const SEED_EOA: u64 = 42; // EOA seeding value +pub const SEED_CONTRACT: u64 = 43; // contract account seeding value + + +#[derive(Debug)] +#[allow(dead_code)] +pub struct FlatTrieDatabase { + _base_dir: Option, + pub main_file_name: String, + pub file_name_path: PathBuf, + pub meta_file_name: String, + pub meta_file_name_path: PathBuf, +} +pub fn get_flat_trie_database( + fallback_eoa_size: usize, + fallback_contract_size: usize, + fallback_storage_per_contract: usize, + overlay_size: usize, +) -> (FlatTrieDatabase,(HashMap, HashMap>) ){ + + let dir = TempDir::new("triedb_bench_base").unwrap(); + + let main_file_name_path = dir.path().join("triedb"); + let meta_file_name_path = dir.path().join("triedb.meta"); + let db = Database::create_new(&main_file_name_path).unwrap(); + + let (addresses, accounts_map, storage_map, overlay_acct, overlay_storage) = + generate_shared_test_data(fallback_eoa_size, fallback_contract_size, fallback_storage_per_contract, overlay_size); + + let ret = setup_tdb_database(&db, &addresses, &accounts_map, &storage_map) + .unwrap(); + + (FlatTrieDatabase { + _base_dir: Some(dir), + main_file_name: "triedb".to_string(), + file_name_path: main_file_name_path, + meta_file_name: "triedb.meta".to_string(), + meta_file_name_path, + }, (overlay_acct, overlay_storage )) +} +pub fn setup_tdb_database( + db: &Database, + addresses: &[Address], + accounts_map: &HashMap, + storage_map: &HashMap>, +) -> Result<(), TransactionError> { + { + let mut tx = db.begin_rw()?; + + // Set accounts from the provided data + for address in addresses { + if let Some(account) = accounts_map.get(address) { + let address_path = AddressPath::for_address(*address); + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + ); + tx.set_account(address_path, Some(trie_account))?; + } + } + + // Set storage from the provided data (only for contracts) + for (address, storage) in storage_map { + let address_path = AddressPath::for_address(*address); + for (storage_key, storage_value) in storage { + let storage_path = StoragePath::for_address_path_and_slot( + address_path.clone(), + StorageKey::from(*storage_key), + ); + // Fix: Use the actual storage value, not the slot + let storage_value_triedb = StorageValue::from_be_slice( + storage_value.to_be_bytes::<32>().as_slice() + ); + tx.set_storage_slot(storage_path, Some(storage_value_triedb))?; + } + } + + tx.commit()?; + } + + Ok(()) +} + +// Helper function to generate shared test data using alloy primitives +pub fn generate_shared_test_data( + eoa_count: usize, + contract_count: usize, + storage_per_contract: usize, + overlay_count: usize, // total number of overlay addresses (can include duplicates and new ones) +) -> ( + Vec
, // all base addresses (EOA + contracts) + HashMap, // base accounts map + HashMap>, // base storage map: address -> storage_key -> value + HashMap, // overlay accounts map (can have duplicates with base + new addresses) + HashMap>, // overlay storage map +) { + let mut rng = StdRng::seed_from_u64(SEED_CONTRACT); + + // Generate EOA addresses + let eoa_addresses: Vec
= (0..eoa_count).map(|_| { + let mut addr_bytes = [0u8; 20]; + rng.fill(&mut addr_bytes); + Address::from_slice(&addr_bytes) + }).collect(); + + // Generate contract addresses + let contract_addresses: Vec
= (0..contract_count).map(|_| { + let mut addr_bytes = [0u8; 20]; + rng.fill(&mut addr_bytes); + Address::from_slice(&addr_bytes) + }).collect(); + + // Combine all base addresses + let mut addresses = eoa_addresses.clone(); + addresses.extend(contract_addresses.clone()); + + // Generate base accounts map + let mut accounts_map = HashMap::new(); + for (i, address) in addresses.iter().enumerate() { + let account = Account { + nonce: i as u64, + balance: U256::from(i as u64), + bytecode_hash: if contract_addresses.contains(address) { + // Contracts have bytecode hash + Some(EMPTY_ROOT_HASH) + } else { + // EOAs have no bytecode + None + }, + }; + accounts_map.insert(*address, account); + } + + // Generate base storage map (only for contracts) + let mut storage_map: HashMap> = HashMap::new(); + for address in &contract_addresses { + let mut contract_storage = HashMap::new(); + for key in 1..=storage_per_contract { + let storage_key = B256::from(U256::from(key)); + let storage_value = U256::from(key); + contract_storage.insert(storage_key, storage_value); + } + storage_map.insert(*address, contract_storage); + } + + // Generate overlay states + // Some addresses can be duplicates (updates to existing), some can be new + let mut overlay_accounts_map = HashMap::new(); + let mut overlay_storage_map: HashMap> = HashMap::new(); + + for i in 0..overlay_count { + // Randomly decide: duplicate existing address or new address + let is_existing = rng.gen_bool(0.5) && !addresses.is_empty(); + let address = if is_existing { + // Update existing account (only storage, no account update) + addresses[rng.gen_range(0..addresses.len())] + } else { + // Create new account + let mut addr_bytes = [0u8; 20]; + rng.fill(&mut addr_bytes); + Address::from_slice(&addr_bytes) + }; + + // Only generate overlay account for newly created accounts + if !is_existing { + // Generate overlay account (with different values) + let overlay_account = Account { + nonce: (i + 1000) as u64, // different nonce + balance: U256::from((i + 2000) as u64), // different balance + bytecode_hash: if rng.gen_bool(0.3) { + // 30% chance to be a contract + Some(EMPTY_ROOT_HASH) + } else { + None + }, + }; + overlay_accounts_map.insert(address, overlay_account); + } + + // Generate overlay storage (only for contracts) + // For existing addresses, check if they're contracts in base data + // For new addresses, check if the overlay account is a contract + let is_contract = if is_existing { + // Check if existing address is a contract in base data + accounts_map.get(&address) + .map(|acc| acc.bytecode_hash.is_some()) + .unwrap_or(false) + } else { + // Check if new overlay account is a contract + overlay_accounts_map.get(&address) + .map(|acc| acc.bytecode_hash.is_some()) + .unwrap_or(false) + }; + + if is_contract { + let mut contract_storage = HashMap::new(); + + // Random number of storage changes (max half of storage_per_contract) + let max_changes = (storage_per_contract / 2).max(1); + let num_changes = rng.gen_range(1..=max_changes); + + // Get existing storage if this address exists in base storage_map + let existing_storage = storage_map.get(&address); + + for _ in 0..num_changes { + let change_type = rng.gen_range(0..3); // 0: new, 1: delete, 2: update + + match change_type { + 0 => { + // New storage slot + let storage_key = B256::from(U256::from(rng.gen_range(1000..2000))); + let storage_value = U256::from(rng.gen_range(5000..10000)); + contract_storage.insert(storage_key, storage_value); + } + 1 => { + // Delete existing storage (value = 0) + if let Some(existing) = existing_storage { + if !existing.is_empty() { + let keys: Vec = existing.keys().copied().collect(); + if !keys.is_empty() { + let key_to_delete = keys[rng.gen_range(0..keys.len())]; + contract_storage.insert(key_to_delete, U256::ZERO); + } + } + } + } + 2 => { + // Update existing storage + if let Some(existing) = existing_storage { + if !existing.is_empty() { + let keys: Vec = existing.keys().copied().collect(); + if !keys.is_empty() { + let key_to_update = keys[rng.gen_range(0..keys.len())]; + let new_value = U256::from(rng.gen_range(10000..20000)); + contract_storage.insert(key_to_update, new_value); + } + } + } + } + _ => unreachable!(), + } + } + + if !contract_storage.is_empty() { + overlay_storage_map.insert(address, contract_storage); + } + } + } + + ( + addresses, + accounts_map, + storage_map, + overlay_accounts_map, + overlay_storage_map, + ) +} + +pub fn copy_files(from: &FlatTrieDatabase, to: &Path) -> Result<(), io::Error> { + for (file, from_path) in [ + (&from.main_file_name, &from.file_name_path), + (&from.meta_file_name, &from.meta_file_name_path), + ] { + let to_path = to.join(file); + fs::copy(from_path, &to_path)?; + } + Ok(()) +} + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_generate_shared_test_data_single_eoa() { + let (addresses, accounts_map, storage_map, overlay_accounts_map, overlay_storage_map) = + generate_shared_test_data(1, 0, 0, 0); + + // Should have exactly 1 base address (EOA) + assert_eq!(addresses.len(), 1, "Should have exactly 1 base address"); + + // Should have exactly 1 account in base accounts map + assert_eq!(accounts_map.len(), 1, "Should have exactly 1 account in base accounts map"); + + // Verify the account properties + let address = &addresses[0]; + let account = accounts_map.get(address).expect("Address should exist in accounts_map"); + assert_eq!(account.nonce, 0, "EOA should have nonce 0"); + assert_eq!(account.balance, U256::from(0), "EOA should have balance 0"); + assert_eq!(account.bytecode_hash, None, "EOA should have no bytecode hash"); + + // Storage map should be empty (no contracts) + assert!(storage_map.is_empty(), "Storage map should be empty when contract_count is 0"); + + // Overlay maps should be empty (overlay_count is 0) + assert!(overlay_accounts_map.is_empty(), "Overlay accounts map should be empty when overlay_count is 0"); + assert!(overlay_storage_map.is_empty(), "Overlay storage map should be empty when overlay_count is 0"); + } + #[test] + fn test_generate_shared_test_data_single_eoa_single_contract() { + let (addresses, accounts_map, storage_map, overlay_accounts_map, overlay_storage_map) = + generate_shared_test_data(1, 1, 0, 0); + + // Should have exactly 1 base address (EOA) + assert_eq!(addresses.len(), 2, "Should have exactly 1 base address"); + + // Should have exactly 1 account in base accounts map + assert_eq!(accounts_map.len(), 2, "Should have exactly 1 account in base accounts map"); + + + } +} \ No newline at end of file diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index d7bbe29330f..c94de8db4f7 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -20,7 +20,7 @@ reth-execution-types.workspace = true reth-storage-errors.workspace = true reth-rpc-eth-api = { workspace = true, optional = true } - +tracing.workspace = true # ethereum alloy-eips.workspace = true alloy-evm.workspace = true diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index e5df16ee2e7..f974fc2f684 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -162,6 +162,17 @@ where parent: &Header, attributes: &Self::NextBlockEnvCtx, ) -> Result, Self::Error> { + let base_fee = self.chain_spec().next_block_base_fee(parent, attributes.timestamp).unwrap_or_default(); + + tracing::info!( + target: "evm::op", + parent_number = parent.number(), + parent_base_fee = parent.base_fee_per_gas().unwrap_or_default(), + next_block_base_fee = base_fee, + timestamp = attributes.timestamp, + gas_limit = attributes.gas_limit, + "Setting base fee for next block EVM environment" + ); Ok(EvmEnv::for_op_next_block( parent, NextEvmEnvAttributes { diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 085362059f2..71d5e2dbd48 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -31,7 +31,7 @@ reth-node-core.workspace = true reth-rpc-engine-api.workspace = true reth-engine-local = { workspace = true, features = ["op"] } reth-rpc-api.workspace = true - +alloy-sol-types.workspace = true # op-reth reth-optimism-payload-builder.workspace = true reth-optimism-evm = { workspace = true, features = ["rpc"] } @@ -81,8 +81,10 @@ reth-payload-util.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-rpc.workspace = true reth-rpc-eth-types.workspace = true +reth-payload-primitives.workspace = true alloy-network.workspace = true +alloy-eips.workspace = true futures.workspace = true op-alloy-network.workspace = true diff --git a/crates/optimism/node/tests/assets/genesis_token.json b/crates/optimism/node/tests/assets/genesis_token.json new file mode 100644 index 00000000000..18fbf4364a9 --- /dev/null +++ b/crates/optimism/node/tests/assets/genesis_token.json @@ -0,0 +1,107 @@ +{ + "config": { + "chainId": 8453, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "arrowGlacierBlock": 0, + "grayGlacierBlock": 0, + "mergeNetsplitBlock": 0, + "bedrockBlock": 0, + "regolithTime": 0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true, + "optimism": { + "eip1559Elasticity": 6, + "eip1559Denominator": 50 + } + }, + "nonce": "0x0", + "timestamp": "0x0", + "extraData": "0x00", + "gasLimit": "0x1c9c380", + "difficulty": "0x0", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "0x14dc79964da2c08b23698b3d3cc7ca32193d9955": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x15d34aaf54267db7d7c367839aaf71a00a2c6a65": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x1cbd3b2770909d4e10f157cabc84c7264073c9ec": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x23618e81e3f5cdf7f54c3d65f7fbc0abf5b21e8f": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x2546bcd3c84621e976d8185a91a922ae77ecec30": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x70997970c51812dc3a010c7d01b50e0d17dc79c8": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x71be63f3384f5fb98995898a86b02fb2426c5788": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x8626f6940e2eb28930efb4cef49b2d1f2c9c1199": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x90f79bf6eb2c4f870365e785982e1f101e93b906": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x976ea74026e726554db657fa54763abd0c3a0aa9": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x9965507d1a55bcc2695c58ba16fb37d819b0a4dc": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x9c41de96b2088cdc640c6182dfcf5491dc574a57": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xa0ee7a142d267c1f36714e4a8f75612f20a79720": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xbcd4042de499d14e55001ccbb24a551f3b954096": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xbda5747bfd65f08deb54cb465eb87d40e51b197e": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xcd3b766ccdd6ae721141f452c550ca635964ce71": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xdd2fd4581271e230360230f9337d5c0430bf44c0": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xdf3e18d64bc6a983f673ab319ccae4f1a57c7097": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266": { + "balance": "0xd3c21bcecceda1000000" + }, + "0xfabb0ac9d68b0b445fb7357272ff202c5651694a": { + "balance": "0xd3c21bcecceda1000000" + }, + "0x5FbDB2315678afecb367f032d93F642f64180aa3": { + "balance": "0x0", + "code": "0x583560e01c806370a082311461001f578063a9059cbb14610039576100a6575b6004355f6000526000602001526040600020545f5260205ff35b60043573ffffffffffffffffffffffffffffffffffffffff1633602435815f6000526000602001526040600020549003825f600052600060200152604060002055815f60005260006020015260406000205401825f60005260006020015260406000205560015f5260205ff35b5f80fd", + "storage": { + "0xa3c1274aadd82e4d12c8004c33fb244ca686dad4fcc8957fc5668588c11d9502": "0x0000000000000000000000000000000000000001000000000000000000000000" + } + } + }, + "number": "0x0" +} \ No newline at end of file diff --git a/crates/optimism/node/tests/it/engine.rs b/crates/optimism/node/tests/it/engine.rs new file mode 100644 index 00000000000..ec081638416 --- /dev/null +++ b/crates/optimism/node/tests/it/engine.rs @@ -0,0 +1,567 @@ +use std::hash::Hash; +use alloy_primitives::{keccak256,hex, Bytes, Address, B256}; +use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes, PayloadStatusEnum}; +use op_alloy_rpc_types_engine::OpPayloadAttributes; +use reth_e2e_test_utils::{ + setup, transaction::TransactionTestContext, wallet::Wallet, +}; +use alloy_primitives::{TxKind, U256}; +use alloy_sol_types::sol; +use reth_node_api::PayloadTypes; +use alloy_network::{EthereumWallet, TransactionBuilder}; +use reth_optimism_chainspec::{OpChainSpecBuilder, OP_SEPOLIA}; +use reth_optimism_node::{OpNode}; +use reth_optimism_payload_builder::{OpPayloadBuilderAttributes}; +use reth_optimism_primitives::OpTransactionSigned; +use reth_provider::BlockReaderIdExt; +use std::sync::Arc; +use alloy_eips::{BlockId, BlockNumberOrTag, Encodable2718}; +use reth_payload_builder::EthPayloadBuilderAttributes; +use reth_revm::database::EvmStateProvider; +use reth_rpc_api::{EngineApiClient}; +use alloy_rpc_types_engine::ExecutionPayloadV3; +use alloy_rpc_types_eth::{BlockTransactions, TransactionRequest}; +use alloy_rpc_types_eth::transaction::request::TransactionInput; +use reth_rpc_api::EthApiServer; +use alloy_sol_types::{SolCall, SolValue}; + +#[tokio::test] +async fn full_engine_api_bock_building_get_validation() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = OpChainSpecBuilder::default() + .chain(OP_SEPOLIA.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .regolith_activated() + .canyon_activated() + .ecotone_activated() + .build(); + + let (mut nodes, _tasks, _wallet) = setup::( + 1, + Arc::new(chain_spec.clone()), + false, + |timestamp| { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + // Construct Optimism-specific payload attributes + OpPayloadBuilderAttributes:: { + payload_attributes: EthPayloadBuilderAttributes::new(B256::ZERO, attributes), + transactions: vec![], // Empty vector of transactions for the builder + no_tx_pool: false, + gas_limit: Some(30_000_000), + eip_1559_params: None, + min_base_fee: None, + } + }, + ) + .await?; + + let mut node = nodes.pop().unwrap(); + let provider = node.inner.provider.clone(); + + let genesis_hash = node.block_hash(0); + + let wallet = Wallet::default(); + let raw_tx = TransactionTestContext::transfer_tx_bytes(OP_SEPOLIA.chain.id(), wallet.inner).await; + let _tx_hash = node.rpc.inject_tx(raw_tx).await?; + + let current_head = provider.sealed_header_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest)?.unwrap(); + let current_timestamp = current_head.timestamp; + + let payload_attrs = PayloadAttributes { + timestamp: current_timestamp + 2, // 2 seconds after current block (OP block time) + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + let fcu_state = ForkchoiceState { + head_block_hash: genesis_hash, + safe_block_hash: genesis_hash, + finalized_block_hash: genesis_hash, + }; + + let op_attrs = OpPayloadAttributes { + payload_attributes: payload_attrs.clone(), + transactions: None, + no_tx_pool: None, + gas_limit: Some(30_000_000), + eip_1559_params: None, + min_base_fee: None, + }; + + let engine_client = node.inner.engine_http_client(); + let fcu_result = engine_client + .fork_choice_updated_v3(fcu_state, Some(op_attrs)) + .await?; + let payload_id = fcu_result.payload_id.expect("payload id"); + + // Wait a bit for payload to be built + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + let payload_v3 = engine_client.get_payload_v3(payload_id).await?; + assert_eq!(genesis_hash, payload_v3.execution_payload.payload_inner.payload_inner.parent_hash); + assert_eq!(21000, payload_v3.execution_payload.payload_inner.payload_inner.gas_used); + + // newPaylaod + let payload_builder_handle = node.inner.payload_builder_handle.clone(); + let built_payload = payload_builder_handle + .best_payload(payload_id) + .await + .transpose() + .ok() + .flatten() + .expect("Payload should be built"); + let block = Arc::new(built_payload.block().clone()); + let payload_v3 = ExecutionPayloadV3::from_block_unchecked( + block.hash(), + &Arc::unwrap_or_clone(block.clone()).into_block(), + ); + let versioned_hashes: Vec = Vec::new(); + let parent_beacon_block_root = block.parent_beacon_block_root.unwrap_or_default(); + + let new_payload_result = engine_client + .new_payload_v3(payload_v3, versioned_hashes, parent_beacon_block_root) + .await?; + assert_eq!(new_payload_result.status, PayloadStatusEnum::Valid); + + Ok(()) +} + + +#[tokio::test] +async fn full_engine_api_bock_building_continuously() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = OpChainSpecBuilder::default() + .chain(OP_SEPOLIA.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .regolith_activated() + .canyon_activated() + .ecotone_activated() + .build(); + + let (mut nodes, _tasks, _wallet) = setup::( + 1, + Arc::new(chain_spec.clone()), + false, + |timestamp| { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + // Construct Optimism-specific payload attributes + OpPayloadBuilderAttributes:: { + payload_attributes: EthPayloadBuilderAttributes::new(B256::ZERO, attributes), + transactions: vec![], // Empty vector of transactions for the builder + no_tx_pool: false, + gas_limit: Some(30_000_000), + eip_1559_params: None, + min_base_fee: None, + } + }, + ) + .await?; + + let mut node = nodes.pop().unwrap(); + let provider = node.inner.provider.clone(); + + let genesis_hash = node.block_hash(0); + + let wallet = Wallet::default(); + let signer = wallet.inner.clone(); + let raw_tx = TransactionTestContext::transfer_tx_bytes(OP_SEPOLIA.chain.id(), signer.clone()).await; + let _tx_hash = node.rpc.inject_tx(raw_tx).await?; + + let current_head = provider.sealed_header_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest)?.unwrap(); + let current_timestamp = current_head.timestamp; + + let payload_attrs = PayloadAttributes { + timestamp: current_timestamp + 2, // 2 seconds after current block (OP block time) + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + let fcu_state = ForkchoiceState { + head_block_hash: genesis_hash, + safe_block_hash: genesis_hash, + finalized_block_hash: genesis_hash, + }; + + let op_attrs = OpPayloadAttributes { + payload_attributes: payload_attrs.clone(), + transactions: None, + no_tx_pool: None, + gas_limit: Some(30_000_000), + eip_1559_params: None, + min_base_fee: None, + }; + + let engine_client = node.inner.engine_http_client(); + let fcu_result = engine_client + .fork_choice_updated_v3(fcu_state, Some(op_attrs)) + .await?; + let payload_id = fcu_result.payload_id.expect("payload id"); + + // Wait a bit for payload to be built + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + let payload_v3 = engine_client.get_payload_v3(payload_id).await?; + let block_1_hash = payload_v3.execution_payload.payload_inner.payload_inner.block_hash; + assert_eq!(genesis_hash, payload_v3.execution_payload.payload_inner.payload_inner.parent_hash); + assert_eq!(21000, payload_v3.execution_payload.payload_inner.payload_inner.gas_used); + + // newPaylaod + let payload_builder_handle = node.inner.payload_builder_handle.clone(); + let built_payload = payload_builder_handle + .best_payload(payload_id) + .await + .transpose() + .ok() + .flatten() + .expect("Payload should be built"); + let block = Arc::new(built_payload.block().clone()); + let payload_v3 = ExecutionPayloadV3::from_block_unchecked( + block.hash(), + &Arc::unwrap_or_clone(block.clone()).into_block(), + ); + let versioned_hashes: Vec = Vec::new(); + let parent_beacon_block_root = block.parent_beacon_block_root.unwrap_or_default(); + + let new_payload_result = engine_client + .new_payload_v3(payload_v3, versioned_hashes, parent_beacon_block_root) + .await?; + assert_eq!(new_payload_result.status, PayloadStatusEnum::Valid); + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + // Build block2 + // let raw_tx2 = TransactionTestContext::transfer_tx_bytes(chain_spec.chain.id(), signer.clone()).await; + let tx2 = TransactionRequest { + nonce: Some(1), + value: Some(U256::from(100)), + to: Some(TxKind::Call(Address::random())), + gas: Some(21000), + max_fee_per_gas: Some(25e9 as u128), // bump fee as needed + max_priority_fee_per_gas: Some(20e9 as u128), + chain_id: Some(chain_spec.chain.id()), + ..Default::default() + }; + let signed2 = TransactionTestContext::sign_tx(signer.clone(), tx2).await; + let raw_tx2 = signed2.encoded_2718().into(); + let _tx_hash2 = node.rpc.inject_tx(raw_tx2).await?; + let fcu_state_2 = ForkchoiceState { + head_block_hash: block_1_hash, + safe_block_hash: block_1_hash, + finalized_block_hash: genesis_hash, + }; + let payload_attrs_2 = PayloadAttributes { + timestamp: payload_attrs.timestamp + 2, + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + let op_attrs_2 = OpPayloadAttributes { + payload_attributes: payload_attrs_2.clone(), + transactions: None, + no_tx_pool: None, + gas_limit: Some(30_000_000), + eip_1559_params: None, + min_base_fee: None, + }; + let fcu_result_2 = engine_client + .fork_choice_updated_v3(fcu_state_2, Some(op_attrs_2)) + .await?; + assert_eq!(fcu_result_2.payload_status.status, PayloadStatusEnum::Valid); + let payload_id_2 = fcu_result_2.payload_id.expect("second payload id"); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + Ok(()) +} + +#[tokio::test] +async fn full_engine_api_multi_address() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = OpChainSpecBuilder::default() + .chain(OP_SEPOLIA.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis_token.json")).unwrap()) + .regolith_activated() + .canyon_activated() + .ecotone_activated() + .build(); + + let (mut nodes, _tasks, _wallet) = setup::( + 1, + Arc::new(chain_spec.clone()), + false, + |timestamp| { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + // Construct Optimism-specific payload attributes + OpPayloadBuilderAttributes:: { + payload_attributes: EthPayloadBuilderAttributes::new(B256::ZERO, attributes), + transactions: vec![], // Empty vector of transactions for the builder + no_tx_pool: false, + gas_limit: Some(30_000_000), + eip_1559_params: None, + min_base_fee: None, + } + }, + ) + .await?; + + let mut node = nodes.pop().unwrap(); + let provider = node.inner.provider.clone(); + let genesis_hash = node.block_hash(0); + + sol! { + function balanceOf(address) view returns (uint256); + } + + let token: Address = "0x5FbDB2315678afecb367f032d93F642f64180aa3".parse().unwrap(); + let receiver: Address = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92265".parse().unwrap(); + + let calldata = balanceOfCall(receiver).abi_encode(); + + let call_req = TransactionRequest { + to: Some(TxKind::Call(token)), + input: TransactionInput::from(calldata), // not wrapped in Option + ..Default::default() + }; + let res_bytes = node + .rpc + .inner + .eth_api() + .call(call_req.clone().into(), Some(BlockId::latest()), None, None) + .await?; + + if res_bytes.is_empty() { + println!("Call returned empty bytes (revert or no code)"); + } else { + let balance = balanceOfCall::abi_decode_returns(&res_bytes).expect("decode failed"); + println!("balanceOf receiver {receiver:x} = {balance}"); + } + + // let provider = node.inner.provider.clone(); + // + // let genesis_hash = node.block_hash(0); + // + let wallet = Wallet::default(); + let sender_address = wallet.inner.address(); + + + let calldata: Bytes = hex!("a9059cbb000000000000000000000011f39Fd6e51aad88F6F4ce6aB8827279cffFb922650000000000000000000000000000000000000000000000000000000000000001").into(); + + // Build tx + let nonce = node.rpc.inner.eth_api().transaction_count(wallet.inner.address(), None).await.unwrap(); + let tx_request = TransactionRequest { + from: Some(sender_address), + to: Some(token.into()), // TxKind::Call + value: Some(U256::ZERO), + gas: Some(300_000), // sufficient gas + gas_price: Some(1_000_000_000), // 1 gwei + nonce: Some(nonce.to::()), + input: calldata.into(), // TransactionInput + chain_id: Some(OP_SEPOLIA.chain.id()), + ..Default::default() + }; + + let signer = wallet.inner.clone(); + let wallet_wrapper = EthereumWallet::from(signer); // Wrap the signer + let envelope = tx_request.build(&wallet_wrapper).await.unwrap(); + let raw_tx = envelope.encoded_2718(); + + // Inject + let tx_hash = node.rpc.inject_tx(raw_tx.into()).await?; + println!("Injected transfer tx: {tx_hash}"); + + let current_head = provider.sealed_header_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest)?.unwrap(); + let current_timestamp = current_head.timestamp; + + let payload_attrs = PayloadAttributes { + timestamp: current_timestamp + 2, // 2 seconds after current block (OP block time) + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + + let fcu_state = ForkchoiceState { + head_block_hash: genesis_hash, + safe_block_hash: genesis_hash, + finalized_block_hash: genesis_hash, + }; + + let op_attrs = OpPayloadAttributes { + payload_attributes: payload_attrs.clone(), + transactions: None, + no_tx_pool: None, + gas_limit: Some(30_000_000), + eip_1559_params: None, + min_base_fee: None, + }; + + let engine_client = node.inner.engine_http_client(); + let fcu_result = engine_client + .fork_choice_updated_v3(fcu_state, Some(op_attrs)) + .await?; + let payload_id = fcu_result.payload_id.expect("payload id"); + + // Wait a bit for payload to be built + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + let payload_v3 = engine_client.get_payload_v3(payload_id).await?; + assert_eq!(genesis_hash, payload_v3.execution_payload.payload_inner.payload_inner.parent_hash); + // assert_eq!(68354, payload_v3.execution_payload.payload_inner.payload_inner.gas_used); + + // newPaylaod + let payload_builder_handle = node.inner.payload_builder_handle.clone(); + let built_payload = payload_builder_handle + .best_payload(payload_id) + .await + .transpose() + .ok() + .flatten() + .expect("Payload should be built"); + let block = Arc::new(built_payload.block().clone()); + let payload2_v3 = ExecutionPayloadV3::from_block_unchecked( + block.hash(), + &Arc::unwrap_or_clone(block.clone()).into_block(), + ); + let versioned_hashes: Vec = Vec::new(); + let parent_beacon_block_root = block.parent_beacon_block_root.unwrap_or_default(); + println!("payload2_v3: {:?}", payload2_v3); + let new_payload_result = engine_client + .new_payload_v3(payload2_v3.clone(), versioned_hashes, parent_beacon_block_root) + .await?; + assert_eq!(new_payload_result.status, PayloadStatusEnum::Valid); + + let head = payload_v3.execution_payload.payload_inner.payload_inner.block_hash; + let fcu_state = ForkchoiceState { + head_block_hash: head, + safe_block_hash: head, + finalized_block_hash: head, + }; + let ret = engine_client.fork_choice_updated_v3(fcu_state, None).await?; + print!("ret: {:?}", ret); + + let latest_block = node + .rpc + .inner + .eth_api() + .block_by_number(BlockNumberOrTag::Pending, false) // false = no full tx objects + .await? + .expect("latest block should exist"); + // + println!("Unsafe/latest block number: {}", latest_block.header.number); + // + match &latest_block.transactions { + BlockTransactions::Full(txs) => { + for tx in txs { + println!("Tx hash: {:?}", tx); + // println!("Tx input: {:?}", tx.input); + } + } + BlockTransactions::Hashes(hashes) => { + println!("Block has {} tx hashes", hashes.len()); + // for h in hashes { + // if let Some(receipt) = node + // .rpc + // .inner + // .eth_api() + // .transaction_receipt(*h) + // .await? + // { + // println!("Tx {:?}", h); + // } else { + // println!("Tx {h:?} has no receipt yet"); + // } + // } + } + BlockTransactions::Uncle => { + unreachable!() + } + } + + let res_bytes = node + .rpc + .inner + .eth_api() + .call(call_req.into(), Some(BlockId::latest()), None, None) + .await?; + + if res_bytes.is_empty() { + println!("Call returned empty bytes (revert or no code)"); + } else { + let balance = balanceOfCall::abi_decode_returns(&res_bytes).expect("decode failed"); + println!("balanceOf receiver {receiver:x} = {balance}"); + } + + // for tx_bytes in &payload_v3.execution_payload.payload_inner.payload_inner.transactions { + // // Calculate hash of the transaction + // let hash = keccak256(tx_bytes); + // + // // Get receipt + // let receipt = node + // .rpc + // .inner + // .eth_api() + // .transaction_receipt(hash) + // .await + // .expect("should not error") + // .expect("receipt should exist"); + // + // // Ensure success (status == 1) + // // assert!(receipt.inner.status_code.expect("status code").to_bool()); + // println!("receipt: {:?}", receipt); + // } + + + Ok(()) +} + + +#[test] +fn test_slot() { + use alloy_primitives::{keccak256, Address, B256, U256}; + + fn mapping_slot_balance_of(holder: Address) -> B256 { + let slot = U256::from(1u64); + + // Left-pad the 20-byte address to 32 bytes + let mut addr_word = [0u8; 32]; + addr_word[12..].copy_from_slice(holder.as_slice()); + + // Build abi.encode(key, slot) + let mut buf = [0u8; 64]; + buf[0..32].copy_from_slice(&addr_word); + buf[32..64].copy_from_slice(&slot.to_be_bytes::<32>()); + + keccak256(buf) + } + + + let holder: Address = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266".parse().unwrap(); + let storage_slot = mapping_slot_balance_of(holder); + println!("{storage_slot:#x}"); + +} diff --git a/crates/optimism/node/tests/it/main.rs b/crates/optimism/node/tests/it/main.rs index fbd49d4c1cf..ef6533c707e 100644 --- a/crates/optimism/node/tests/it/main.rs +++ b/crates/optimism/node/tests/it/main.rs @@ -4,4 +4,6 @@ mod builder; mod priority; +mod engine; + const fn main() {} diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 41b825a2b72..490e3606661 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -28,6 +28,7 @@ use reth_primitives_traits::{ /// Re-export for use in downstream arguments. pub use op_alloy_rpc_types_engine::OpPayloadAttributes; +use tracing::info; use reth_optimism_primitives::OpPrimitives; /// Optimism Payload Builder Attributes diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index f3f1b03ab2e..2b77f73b8bd 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -131,7 +131,13 @@ impl PayloadBuilderHandle { attr: T::PayloadBuilderAttributes, ) -> Receiver> { let (tx, rx) = oneshot::channel(); - let _ = self.to_service.send(PayloadServiceCommand::BuildNewPayload(attr, tx)); + let ret = self.to_service.send(PayloadServiceCommand::BuildNewPayload(attr, tx)); + match ret { + Ok(_) => {}, + Err(payload_err) => { + eprintln!("payload error: {payload_err:?}"); + } + } rx } diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index e0d40070878..1c4eaae47ec 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -4,8 +4,8 @@ use alloy_primitives::{ }; use reth_primitives_traits::{Account, Bytecode}; use reth_storage_api::{ - AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider, - StateProvider, StateRootProvider, StorageRootProvider, + AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, PlainPostState, + StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ @@ -91,6 +91,13 @@ impl StateRootProvider for StateProviderTest { ) -> ProviderResult<(B256, TrieUpdates)> { unimplemented!("state root computation is not supported") } + + fn state_root_with_updates_triedb( + &self, + _plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + todo!() + } } impl StorageRootProvider for StateProviderTest { diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index b52b30eb518..e95ee1c112e 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -52,6 +52,7 @@ futures.workspace = true tokio.workspace = true tokio-stream.workspace = true reqwest = { workspace = true, features = ["rustls-tls-native-roots"] } +triedb.workspace=true # metrics metrics.workspace = true diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 8209af0fa53..7bd4909334d 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -50,6 +50,13 @@ impl reth_storage_api::StateRootProvider for StateProviderTraitObjWrapper<'_> { ) -> reth_errors::ProviderResult<(B256, reth_trie::updates::TrieUpdates)> { self.0.state_root_from_nodes_with_updates(input) } + + fn state_root_with_updates_triedb( + &self, + plain_state: reth_storage_api::PlainPostState, + ) -> reth_errors::ProviderResult<(B256, reth_trie::updates::TrieUpdates)> { + self.0.state_root_with_updates_triedb(plain_state) + } } impl reth_storage_api::StorageRootProvider for StateProviderTraitObjWrapper<'_> { diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 3fe1c7f1f97..26642132bc5 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -1,7 +1,7 @@ use alloy_primitives::{keccak256, Address, BlockNumber, TxHash, TxNumber, B256}; use reth_chainspec::MAINNET; use reth_db::{ - test_utils::{create_test_rw_db, create_test_rw_db_with_path, create_test_static_files_dir}, + test_utils::{create_test_rw_db, create_test_rw_db_with_path, create_test_static_files_dir, create_test_triedb_dir}, DatabaseEnv, }; use reth_db_api::{ @@ -17,7 +17,7 @@ use reth_db_api::{ use reth_ethereum_primitives::{Block, EthPrimitives, Receipt}; use reth_primitives_traits::{Account, SealedBlock, SealedHeader, StorageEntry}; use reth_provider::{ - providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, + providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter, triedb::TriedbProvider}, test_utils::MockNodeTypesWithDB, HistoryWriter, ProviderError, ProviderFactory, StaticFileProviderFactory, StatsReader, }; @@ -26,6 +26,7 @@ use reth_storage_errors::provider::ProviderResult; use reth_testing_utils::generators::ChangeSet; use std::{collections::BTreeMap, fmt::Debug, path::Path}; use tempfile::TempDir; +use std::sync::Arc; /// Test database that is used for testing stage implementations. #[derive(Debug)] @@ -38,12 +39,14 @@ impl Default for TestStageDB { /// Create a new instance of [`TestStageDB`] fn default() -> Self { let (static_dir, static_dir_path) = create_test_static_files_dir(); + let (triedb_dir, _) = create_test_triedb_dir(); Self { temp_static_files_dir: static_dir, factory: ProviderFactory::new( create_test_rw_db(), MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), + Arc::new(TriedbProvider::new(triedb_dir)), ), } } @@ -52,6 +55,7 @@ impl Default for TestStageDB { impl TestStageDB { pub fn new(path: &Path) -> Self { let (static_dir, static_dir_path) = create_test_static_files_dir(); + let (triedb_dir, _) = create_test_triedb_dir(); Self { temp_static_files_dir: static_dir, @@ -59,6 +63,7 @@ impl TestStageDB { create_test_rw_db_with_path(path), MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), + Arc::new(TriedbProvider::new(triedb_dir)), ), } } diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index a4122ebf5c0..45e3c3263ea 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -11,7 +11,8 @@ repository.workspace = true # reth reth-chainspec.workspace = true reth-db-api.workspace = true -reth-provider.workspace = true +reth-db.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } reth-primitives-traits.workspace = true reth-config.workspace = true reth-trie.workspace = true @@ -19,11 +20,14 @@ reth-trie-db.workspace = true reth-etl.workspace = true reth-codecs.workspace = true reth-stages-types.workspace = true +reth-storage-api.workspace = true reth-fs-util.workspace = true reth-node-types.workspace = true reth-static-file-types.workspace = true reth-execution-errors.workspace = true - +reth-trie-common.workspace = true +alloy-trie.workspace = true +triedb.workspace = true # eth alloy-consensus.workspace = true alloy-genesis.workspace = true @@ -33,6 +37,7 @@ alloy-primitives.workspace = true eyre.workspace = true thiserror.workspace = true boyer-moore-magiclen.workspace = true +rand = "0.8" # io serde.workspace = true @@ -41,9 +46,32 @@ serde_json.workspace = true # tracing tracing.workspace = true +# Add tempdir for the binary +tempdir = "0.3.7" + +[features] +default = [] +trie-db-ext = [] +bin-utils = ["reth-provider/test-utils"] + [dev-dependencies] reth-db = { workspace = true, features = ["mdbx"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-storage-api.workspace = true +tempdir = "0.3.7" +rand = "0.8" +criterion = { workspace = true } + + +[[bench]] +name = "state_root_comparison" +harness = false +required-features = ["trie-db-ext"] + +[[bin]] +name = "state_root_runner" +path = "src/bin/state_root_runner.rs" +required-features = ["trie-db-ext"] # so you can also run the TrieDB method [lints] workspace = true diff --git a/crates/storage/db-common/README.md b/crates/storage/db-common/README.md new file mode 100644 index 00000000000..2ca66d32937 --- /dev/null +++ b/crates/storage/db-common/README.md @@ -0,0 +1,16 @@ + +## test + +```aiignore +cargo test -p reth-db-common --features trie-db-ext test_triedb_state_root -- --nocapture +``` + +## bench +```aiignore +cargo bench -p reth-db-common --features trie-db-ext +cargo bench -p reth-db-common --features trie-db-ext --bench state_root_comparison -- state_root_with_overlay_triedb +cargo bench -p reth-db-common --features trie-db-ext --bench state_root_comparison -- state_root_with_overlay_mdbx +``` + +cargo run --release -p reth-db-common --features trie-db-ext --bin state_root_runner -- traditional 100000 5 +cargo run --release -p reth-db-common --features trie-db-ext --bin state_root_overlay \ No newline at end of file diff --git a/crates/storage/db-common/benches/state_root_comparison.rs b/crates/storage/db-common/benches/state_root_comparison.rs new file mode 100644 index 00000000000..6c972e846fe --- /dev/null +++ b/crates/storage/db-common/benches/state_root_comparison.rs @@ -0,0 +1,288 @@ +#![allow(missing_docs, unreachable_pub)] + +mod util; + +use alloy_primitives::{keccak256, Address, StorageKey, StorageValue, B256, U256}; +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use rand::prelude::*; +use rand::{Rng, SeedableRng}; +use rand::rngs::StdRng; +use reth_chainspec::MAINNET; +use reth_primitives_traits::{Account, StorageEntry}; +use reth_provider::LatestStateProvider; +use reth_provider::{ + test_utils::create_test_provider_factory_with_chain_spec, + DatabaseProviderFactory, DBProvider, HashingWriter, ProviderFactory, TrieWriter, +}; +use reth_storage_api::{StateRootProvider, TrieWriter as _}; +use reth_trie::{HashedPostState, HashedStorage, StateRoot as StateRootComputer}; +use reth_trie_db::DatabaseHashedCursorFactory; +use reth_trie::{StateRootTrieDb, TrieExtDatabase}; +use std::path::PathBuf; +use std::time::Duration; +use alloy_primitives::map::{B256Map, HashMap}; +use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; +use tempdir::TempDir; +use triedb::overlay::{OverlayStateMut, OverlayValue}; +use triedb::{path::AddressPath, account::Account as TrieDBAccount, Database}; +use triedb::path::StoragePath; +use reth_db_common::init::compute_state_root; +use reth_db_common::init_triedb::calculate_state_root_with_triedb; +use crate::util::{get_flat_trie_database, copy_files, DEFAULT_SETUP_DB_CONTRACT_SIZE, DEFAULT_SETUP_DB_EOA_SIZE, DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, SEED_CONTRACT, BATCH_SIZE, generate_random_address}; + +fn generate_random_accounts_and_storage( + num_accounts: usize, + storage_per_account: usize, + rng: &mut impl Rng, +) -> (Vec<(Address, Account)>, Vec<(Address, Vec)>) { + let mut accounts = Vec::new(); + let mut storage_entries = Vec::new(); + + for _ in 0..num_accounts { + let mut address_bytes = [0u8; 20]; + rng.fill(&mut address_bytes); + let address = Address::from_slice(&address_bytes); + + let account = Account { + nonce: rng.gen_range(0..=u64::MAX), + balance: U256::from(rng.gen_range(0u128..=u128::MAX)), + bytecode_hash: { + let mut hash_bytes = [0u8; 32]; + rng.fill(&mut hash_bytes); + Some(B256::from(hash_bytes)) + }, + }; + accounts.push((address, account)); + + let mut storage_vec = Vec::new(); + for _ in 0..storage_per_account { + let mut storage_key_bytes = [0u8; 32]; + rng.fill(&mut storage_key_bytes); + let storage_key = B256::from(storage_key_bytes); + + let mut storage_value_bytes = [0u8; 32]; + rng.fill(&mut storage_value_bytes); + let storage_value = U256::from_be_slice(&storage_value_bytes); + + storage_vec.push(StorageEntry { + key: storage_key, + value: storage_value, + }); + } + storage_entries.push((address, storage_vec)); + } + + (accounts, storage_entries) +} + +fn setup_test_data( + num_accounts: usize, + storage_per_account: usize, +) -> reth_provider::providers::ProviderFactory { + let mut rng = rand::thread_rng(); + let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + + let (accounts, storage_entries) = + generate_random_accounts_and_storage(num_accounts, storage_per_account, &mut rng); + + let mut provider_rw = provider_factory.provider_rw().unwrap(); + + let accounts_for_hashing = accounts + .iter() + .map(|(address, account)| (*address, Some(*account))); + + provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); + provider_rw.insert_storage_for_hashing(storage_entries).unwrap(); + provider_rw.commit().unwrap(); + + provider_factory +} + +pub fn bench_state_root_comparison(c: &mut Criterion) { + let mut group = c.benchmark_group("State Root Calculation"); + group.sample_size(10); + + for size in [100000] { + let provider_factory = setup_test_data(size, 5); + + // Benchmark traditional method + group.bench_function(BenchmarkId::new("traditional", size), |b| { + b.iter(|| { + let provider_rw = provider_factory.provider_rw().unwrap(); + compute_state_root(&*provider_rw, None).unwrap(); + provider_rw.commit().unwrap(); + }) + }); + + // Benchmark TrieDB method + group.bench_function(BenchmarkId::new("triedb", size), |b| { + b.iter_with_setup( + || { + let tmp_dir = TempDir::new("bench_triedb").unwrap(); + let db_path = tmp_dir.path().join(format!("test_{}.db", size)); + (tmp_dir, db_path) + }, + |(tmp_dir, trie_db_path)| { + let provider = provider_factory.provider_rw().unwrap(); + calculate_state_root_with_triedb(&*provider, trie_db_path, None).unwrap() + }, + ) + }); + } + + group.finish(); +} +fn bench_state_root_with_overlay_triedb(c: &mut Criterion) { + let mut group = c.benchmark_group("state_root_with_overlay"); + let (base_dir, (overlay_acct, overlay_storage)) = get_flat_trie_database( + DEFAULT_SETUP_DB_EOA_SIZE, + DEFAULT_SETUP_DB_CONTRACT_SIZE, + DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, + BATCH_SIZE + ); + let dir = TempDir::new("triedb_bench_state_root_with_overlay").unwrap(); + let file_name = base_dir.main_file_name.clone(); + copy_files(&base_dir, dir.path()).unwrap(); + + // Generate overlay from the returned overlay data (accounts + storage) + let mut account_overlay_mut = OverlayStateMut::new(); + + // Add account overlays + for (address, account) in &overlay_acct { + let address_path = AddressPath::for_address(*address); + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, + KECCAK_EMPTY, + ); + account_overlay_mut.insert(address_path.clone().into(), Some(OverlayValue::Account(trie_account))); + } + + // Add storage overlays + for (address, storage) in &overlay_storage { + let address_path = AddressPath::for_address(*address); + for (storage_key, storage_value) in storage { + let storage_path = StoragePath::for_address_path_and_slot( + address_path.clone(), + StorageKey::from(*storage_key), + ); + account_overlay_mut.insert( + storage_path.clone().into(), + Some(OverlayValue::Storage(StorageValue::from_be_slice( + storage_path.get_slot().pack().as_slice() + ))), + ); + } + } + + let account_overlay = account_overlay_mut.freeze(); + + let overlay_count = overlay_acct.len() + overlay_storage.values().map(|s| s.len()).sum::(); + + group.throughput(criterion::Throughput::Elements(overlay_count as u64)); + group.measurement_time(Duration::from_secs(30)); + group.bench_function(BenchmarkId::new("state_root_with_overlay_triedb", overlay_count), |b| { + b.iter_with_setup( + || { + let db_path = dir.path().join(&file_name); + Database::open(db_path).unwrap() + }, + |db| { + let tx = db.begin_ro().unwrap(); + + let _root_result = tx.compute_root_with_overlay(account_overlay.clone()).unwrap(); + + tx.commit().unwrap(); + }, + ); + }); + + group.finish(); +} + +fn bench_state_root_with_overlay_mdbx(c: &mut Criterion) { + let mut group = c.benchmark_group("state_root_mdbx_with_overlay"); + + // Generate random data and overlay + let (addresses, accounts_map, storage_map, overlay_acct, overlay_storage) = + util::generate_shared_test_data( + DEFAULT_SETUP_DB_EOA_SIZE, // eoa_count + DEFAULT_SETUP_DB_CONTRACT_SIZE, // contract_count + DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, + BATCH_SIZE, // overlay_count + ); + + // Write base data into database using provider_rw + let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + { + let mut provider_rw = provider_factory.provider_rw().unwrap(); + + // Convert base accounts to vector format + let accounts: Vec<(Address, Account)> = accounts_map.into_iter().collect(); + let storage_entries: Vec<(Address, Vec)> = storage_map.into_iter() + .map(|(address, storage)| { + let entries: Vec = storage.into_iter() + .map(|(key, value)| StorageEntry { key, value }) + .collect(); + (address, entries) + }) + .collect(); + + let accounts_for_hashing = accounts.iter().map(|(address, account)| (*address, Some(*account))); + provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); + provider_rw.insert_storage_for_hashing(storage_entries).unwrap(); + provider_rw.commit().unwrap(); + } + + // Create HashedPostState from overlay data + let mut hashed_accounts: Vec<(B256, Option)> = overlay_acct.iter() + .map(|(address, account)| { + let hashed = keccak256(address); + (hashed, Some(*account)) + }) + .collect(); + + // Build HashedStorage for overlay storage + let mut hashed_storages: B256Map = HashMap::default(); + for (address, storage) in &overlay_storage { + let hashed_address = keccak256(address); + let hashed_storage = HashedStorage::from_iter( + false, // wiped = false + storage.iter().map(|(key, value)| { + // key is a raw storage slot (B256), need to hash it + let hashed_slot = keccak256(*key); + (hashed_slot, *value) + }), + ); + hashed_storages.insert(hashed_address, hashed_storage); + } + + let hashed_state = HashedPostState { + accounts: hashed_accounts.into_iter().collect(), + storages: hashed_storages, + }; + + // Use provider_ro for state_root_with_updates + let db_provider_ro = provider_factory.database_provider_ro().unwrap(); + let latest_ro = LatestStateProvider::new(db_provider_ro); + + let overlay_count = overlay_acct.len() + overlay_storage.values().map(|s| s.len()).sum::(); + + group.throughput(criterion::Throughput::Elements(overlay_count as u64)); + group.measurement_time(Duration::from_secs(30)); + group.bench_function(BenchmarkId::new("state_root_with_overlay_mdbx", overlay_count), |b| { + b.iter(|| { + let _ = latest_ro.state_root_with_updates(hashed_state.clone()); + }) + }); + + group.finish(); +} + +criterion_group! { + name = benches; + config = Criterion::default(); + targets = bench_state_root_comparison, bench_state_root_with_overlay_triedb, bench_state_root_with_overlay_mdbx +} +criterion_main!(benches); diff --git a/crates/storage/db-common/benches/util.rs b/crates/storage/db-common/benches/util.rs new file mode 100644 index 00000000000..61f4a6cfc6a --- /dev/null +++ b/crates/storage/db-common/benches/util.rs @@ -0,0 +1,348 @@ +use std::path::{Path, PathBuf}; +use tempdir::TempDir; +use rand::prelude::*; +use rand::RngCore; +use alloy_primitives::{Address, StorageKey, StorageValue, U256, B256}; +use reth_primitives_traits::{Account, StorageEntry}; +use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; +use triedb::{ + account::Account as TrieDBAccount, + path::{AddressPath, StoragePath}, + transaction::TransactionError, + Database, +}; +use std::{ + fs, io, + sync::{Arc, Barrier}, + thread, + time::Duration, +}; +use std::collections::HashMap; + +pub const BATCH_SIZE: usize = 10_000; + +pub fn generate_random_address(rng: &mut StdRng) -> AddressPath { + let mut bytes = [0u8; 20]; + rng.fill_bytes(&mut bytes); + let addr = Address::from_slice(&bytes); + AddressPath::for_address(addr) +} + +pub const DEFAULT_SETUP_DB_EOA_SIZE: usize = 1_000_000; +pub const DEFAULT_SETUP_DB_CONTRACT_SIZE: usize = 100_000; +pub const DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT: usize = 10; +pub const SEED_EOA: u64 = 42; // EOA seeding value +pub const SEED_CONTRACT: u64 = 43; // contract account seeding value + + +#[derive(Debug)] +#[allow(dead_code)] +pub struct FlatTrieDatabase { + _base_dir: Option, + pub main_file_name: String, + pub file_name_path: PathBuf, + pub meta_file_name: String, + pub meta_file_name_path: PathBuf, +} +pub fn get_flat_trie_database( + fallback_eoa_size: usize, + fallback_contract_size: usize, + fallback_storage_per_contract: usize, + overlay_size: usize, +) -> (FlatTrieDatabase,(HashMap, HashMap>) ){ + + let dir = TempDir::new("triedb_bench_base").unwrap(); + + let main_file_name_path = dir.path().join("triedb"); + let meta_file_name_path = dir.path().join("triedb.meta"); + let db = Database::create_new(&main_file_name_path).unwrap(); + + let (addresses, accounts_map, storage_map, overlay_acct, overlay_storage) = + generate_shared_test_data(fallback_eoa_size, fallback_contract_size, fallback_storage_per_contract, overlay_size); + + let ret = setup_tdb_database(&db, &addresses, &accounts_map, &storage_map) + .unwrap(); + + (FlatTrieDatabase { + _base_dir: Some(dir), + main_file_name: "triedb".to_string(), + file_name_path: main_file_name_path, + meta_file_name: "triedb.meta".to_string(), + meta_file_name_path, + }, (overlay_acct, overlay_storage )) +} +pub fn setup_tdb_database( + db: &Database, + addresses: &[Address], + accounts_map: &HashMap, + storage_map: &HashMap>, +) -> Result<(), TransactionError> { + { + let mut tx = db.begin_rw()?; + + // Set accounts from the provided data + for address in addresses { + if let Some(account) = accounts_map.get(address) { + let address_path = AddressPath::for_address(*address); + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + ); + tx.set_account(address_path, Some(trie_account))?; + } + } + + // Set storage from the provided data (only for contracts) + for (address, storage) in storage_map { + let address_path = AddressPath::for_address(*address); + for (storage_key, storage_value) in storage { + let storage_path = StoragePath::for_address_path_and_slot( + address_path.clone(), + StorageKey::from(*storage_key), + ); + // Fix: Use the actual storage value, not the slot + let storage_value_triedb = StorageValue::from_be_slice( + storage_value.to_be_bytes::<32>().as_slice() + ); + tx.set_storage_slot(storage_path, Some(storage_value_triedb))?; + } + } + + tx.commit()?; + } + + Ok(()) +} + +// Helper function to generate shared test data using alloy primitives +pub fn generate_shared_test_data( + eoa_count: usize, + contract_count: usize, + storage_per_contract: usize, + overlay_count: usize, // total number of overlay addresses (can include duplicates and new ones) +) -> ( + Vec
, // all base addresses (EOA + contracts) + HashMap, // base accounts map + HashMap>, // base storage map: address -> storage_key -> value + HashMap, // overlay accounts map (can have duplicates with base + new addresses) + HashMap>, // overlay storage map +) { + let mut rng = StdRng::seed_from_u64(SEED_CONTRACT); + + // Generate EOA addresses + let eoa_addresses: Vec
= (0..eoa_count).map(|_| { + let mut addr_bytes = [0u8; 20]; + rng.fill(&mut addr_bytes); + Address::from_slice(&addr_bytes) + }).collect(); + + // Generate contract addresses + let contract_addresses: Vec
= (0..contract_count).map(|_| { + let mut addr_bytes = [0u8; 20]; + rng.fill(&mut addr_bytes); + Address::from_slice(&addr_bytes) + }).collect(); + + // Combine all base addresses + let mut addresses = eoa_addresses.clone(); + addresses.extend(contract_addresses.clone()); + + // Generate base accounts map + let mut accounts_map = HashMap::new(); + for (i, address) in addresses.iter().enumerate() { + let account = Account { + nonce: i as u64, + balance: U256::from(i as u64), + bytecode_hash: if contract_addresses.contains(address) { + // Contracts have bytecode hash + Some(EMPTY_ROOT_HASH) + } else { + // EOAs have no bytecode + None + }, + }; + accounts_map.insert(*address, account); + } + + // Generate base storage map (only for contracts) + let mut storage_map: HashMap> = HashMap::new(); + for address in &contract_addresses { + let mut contract_storage = HashMap::new(); + for key in 1..=storage_per_contract { + let storage_key = B256::from(U256::from(key)); + let storage_value = U256::from(key); + contract_storage.insert(storage_key, storage_value); + } + storage_map.insert(*address, contract_storage); + } + + // Generate overlay states + // Some addresses can be duplicates (updates to existing), some can be new + let mut overlay_accounts_map = HashMap::new(); + let mut overlay_storage_map: HashMap> = HashMap::new(); + + for i in 0..overlay_count { + // Randomly decide: duplicate existing address or new address + let is_existing = rng.gen_bool(0.5) && !addresses.is_empty(); + let address = if is_existing { + // Update existing account (only storage, no account update) + addresses[rng.gen_range(0..addresses.len())] + } else { + // Create new account + let mut addr_bytes = [0u8; 20]; + rng.fill(&mut addr_bytes); + Address::from_slice(&addr_bytes) + }; + + // Only generate overlay account for newly created accounts + if !is_existing { + // Generate overlay account (with different values) + let overlay_account = Account { + nonce: (i + 1000) as u64, // different nonce + balance: U256::from((i + 2000) as u64), // different balance + bytecode_hash: if rng.gen_bool(0.3) { + // 30% chance to be a contract + Some(EMPTY_ROOT_HASH) + } else { + None + }, + }; + overlay_accounts_map.insert(address, overlay_account); + } + + // Generate overlay storage (only for contracts) + // For existing addresses, check if they're contracts in base data + // For new addresses, check if the overlay account is a contract + let is_contract = if is_existing { + // Check if existing address is a contract in base data + accounts_map.get(&address) + .map(|acc| acc.bytecode_hash.is_some()) + .unwrap_or(false) + } else { + // Check if new overlay account is a contract + overlay_accounts_map.get(&address) + .map(|acc| acc.bytecode_hash.is_some()) + .unwrap_or(false) + }; + + if is_contract { + let mut contract_storage = HashMap::new(); + + // Random number of storage changes (max half of storage_per_contract) + let max_changes = (storage_per_contract / 2).max(1); + let num_changes = rng.gen_range(1..=max_changes); + + // Get existing storage if this address exists in base storage_map + let existing_storage = storage_map.get(&address); + + for _ in 0..num_changes { + let change_type = rng.gen_range(0..3); // 0: new, 1: delete, 2: update + + match change_type { + 0 => { + // New storage slot + let storage_key = B256::from(U256::from(rng.gen_range(1000..2000))); + let storage_value = U256::from(rng.gen_range(5000..10000)); + contract_storage.insert(storage_key, storage_value); + } + 1 => { + // Delete existing storage (value = 0) + if let Some(existing) = existing_storage { + if !existing.is_empty() { + let keys: Vec = existing.keys().copied().collect(); + if !keys.is_empty() { + let key_to_delete = keys[rng.gen_range(0..keys.len())]; + contract_storage.insert(key_to_delete, U256::ZERO); + } + } + } + } + 2 => { + // Update existing storage + if let Some(existing) = existing_storage { + if !existing.is_empty() { + let keys: Vec = existing.keys().copied().collect(); + if !keys.is_empty() { + let key_to_update = keys[rng.gen_range(0..keys.len())]; + let new_value = U256::from(rng.gen_range(10000..20000)); + contract_storage.insert(key_to_update, new_value); + } + } + } + } + _ => unreachable!(), + } + } + + if !contract_storage.is_empty() { + overlay_storage_map.insert(address, contract_storage); + } + } + } + + ( + addresses, + accounts_map, + storage_map, + overlay_accounts_map, + overlay_storage_map, + ) +} + +pub fn copy_files(from: &FlatTrieDatabase, to: &Path) -> Result<(), io::Error> { + for (file, from_path) in [ + (&from.main_file_name, &from.file_name_path), + (&from.meta_file_name, &from.meta_file_name_path), + ] { + let to_path = to.join(file); + fs::copy(from_path, &to_path)?; + } + Ok(()) +} + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_generate_shared_test_data_single_eoa() { + let (addresses, accounts_map, storage_map, overlay_accounts_map, overlay_storage_map) = + generate_shared_test_data(1, 0, 0, 0); + + // Should have exactly 1 base address (EOA) + assert_eq!(addresses.len(), 1, "Should have exactly 1 base address"); + + // Should have exactly 1 account in base accounts map + assert_eq!(accounts_map.len(), 1, "Should have exactly 1 account in base accounts map"); + + // Verify the account properties + let address = &addresses[0]; + let account = accounts_map.get(address).expect("Address should exist in accounts_map"); + assert_eq!(account.nonce, 0, "EOA should have nonce 0"); + assert_eq!(account.balance, U256::from(0), "EOA should have balance 0"); + assert_eq!(account.bytecode_hash, None, "EOA should have no bytecode hash"); + + // Storage map should be empty (no contracts) + assert!(storage_map.is_empty(), "Storage map should be empty when contract_count is 0"); + + // Overlay maps should be empty (overlay_count is 0) + assert!(overlay_accounts_map.is_empty(), "Overlay accounts map should be empty when overlay_count is 0"); + assert!(overlay_storage_map.is_empty(), "Overlay storage map should be empty when overlay_count is 0"); + } + #[test] + fn test_generate_shared_test_data_single_eoa_single_contract() { + let (addresses, accounts_map, storage_map, overlay_accounts_map, overlay_storage_map) = + generate_shared_test_data(1, 1, 0, 0); + + // Should have exactly 1 base address (EOA) + assert_eq!(addresses.len(), 2, "Should have exactly 1 base address"); + + // Should have exactly 1 account in base accounts map + assert_eq!(accounts_map.len(), 2, "Should have exactly 1 account in base accounts map"); + + + } +} \ No newline at end of file diff --git a/crates/storage/db-common/src/bin/state_root_overlay.rs b/crates/storage/db-common/src/bin/state_root_overlay.rs new file mode 100644 index 00000000000..8395d5913ae --- /dev/null +++ b/crates/storage/db-common/src/bin/state_root_overlay.rs @@ -0,0 +1,207 @@ +use alloy_primitives::{keccak256, Address, B256, U256, StorageKey, StorageValue}; +use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_primitives_traits::Account; +use reth_provider::{ + test_utils::create_test_provider_factory_with_chain_spec, + DatabaseProviderFactory, HashingWriter, LatestStateProvider, TrieWriter, +}; +use reth_storage_api::{StateRootProvider}; +use reth_trie_common::{HashedPostState, HashedStorage}; +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Instant; +use alloy_genesis::Genesis; +use alloy_primitives::map::B256Map; +use tempdir::TempDir; +use triedb::{ + account::Account as TrieDBAccount, + overlay::{OverlayStateMut, OverlayValue}, + path::{AddressPath, StoragePath}, + Database, +}; +use reth_db::{init_db, ClientVersion, DatabaseEnv}; +use reth_db::mdbx::DatabaseArguments; +use reth_db_common::init::compute_state_root; +use reth_node_types::NodeTypesWithDBAdapter; +use crate::util::{setup_tdb_database}; + +#[path = "../../benches/util.rs"] +mod util; + +fn main() -> eyre::Result<()> { + println!("Testing overlay state root calculation methods..."); + + // Generate shared test data + let (base_addresses, base_accounts_map, base_storage_map, overlay_acct, overlay_storage) = + util::generate_shared_test_data( + util::DEFAULT_SETUP_DB_EOA_SIZE, + util::DEFAULT_SETUP_DB_CONTRACT_SIZE, + util::DEFAULT_SETUP_DB_STORAGE_PER_CONTRACT, + util::BATCH_SIZE, + ); + + println!("Generated {} base addresses, {} overlay accounts", + base_addresses.len(), overlay_acct.len()); + + let dir = TempDir::new("triedb_overlay_base").unwrap(); + let main_file_name_path = dir.path().join("triedb"); + let triedb = Database::create_new(&main_file_name_path).unwrap(); + + setup_tdb_database(&triedb, &base_addresses, &base_accounts_map, &base_storage_map).unwrap(); + + let mut account_overlay_mut = OverlayStateMut::new(); + + for (address, account) in &overlay_acct { + let address_path = AddressPath::for_address(*address); + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + ); + account_overlay_mut.insert(address_path.clone().into(), Some(OverlayValue::Account(trie_account))); + } + + // Add overlay storage + for (address, storage) in &overlay_storage { + let address_path = AddressPath::for_address(*address); + for (storage_key, storage_value) in storage { + // Convert B256 back to U256 to get the raw storage slot + let raw_slot = U256::from_be_slice(storage_key.as_slice()); + let storage_path = StoragePath::for_address_path_and_slot( + address_path.clone(), + StorageKey::from(raw_slot), + ); + + if storage_value.is_zero() { + // Zero value means delete the storage slot + account_overlay_mut.insert( + storage_path.clone().into(), + None, // ✅ Delete slot for zero values + ); + } else { + // Non-zero value: insert the storage entry + account_overlay_mut.insert( + storage_path.clone().into(), + Some(OverlayValue::Storage(StorageValue::from_be_slice( + storage_value.to_be_bytes::<32>().as_slice() + ))), + ); + } + } + } + let account_overlay = account_overlay_mut.freeze(); + + let start = Instant::now(); + let tx = triedb.begin_ro()?; + let triedb_root = tx.compute_root_with_overlay(account_overlay.clone())?; + println!("triedb_root = {:?}, overlay state root elapsed = {:?} ms", triedb_root.root, start.elapsed().as_millis()); + + let start = Instant::now(); + tx.commit()?; + println!("triedb commit elapsed = {:?} ns", start.elapsed().as_nanos()); + + // ===== Setup MDBX ===== + println!("\nSetting up MDBX..."); + // Create a chain spec with empty genesis allocation but keep MAINNET hardforks + let empty_chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(Genesis::default()) // Empty genesis with no alloc + .with_forks(MAINNET.hardforks.clone()) // Keep MAINNET hardforks + .build(), + ); + + + let datadir = tempdir::TempDir::new("state_root_overlay")?; + let db_path = datadir.path().join("mdbx"); + let sf_path = datadir.path().join("static_files"); + let triedb_path = datadir.path().join("triedb"); + reth_fs_util::create_dir_all(&db_path)?; + reth_fs_util::create_dir_all(&sf_path)?; + reth_fs_util::create_dir_all(&triedb_path)?; + + let db = Arc::new(init_db( + &db_path, + DatabaseArguments::new(ClientVersion::default()), + )?); + + use reth_provider::providers::StaticFileProvider; + let sfp: StaticFileProvider = StaticFileProvider::read_write(sf_path)?; + + use reth_provider::providers::triedb::TriedbProvider; + let triedb_provider = Arc::new(TriedbProvider::new(&triedb_path)); + + use reth_provider::providers::ProviderFactory; + let provider_factory = ProviderFactory::new( + db, + empty_chain_spec.clone(), + sfp, + triedb_provider, + ); + // Insert base data + { + let mut provider_rw = provider_factory.provider_rw()?; + let accounts: Vec<(Address, Account)> = base_accounts_map.iter().map(|(a, acc)| (*a, *acc)).collect(); + let storage_entries: Vec<(Address, Vec)> = base_storage_map + .iter() + .map(|(address, storage)| { + let entries: Vec = storage + .iter() + .map(|(key, value)| reth_primitives_traits::StorageEntry { + key: *key, + value: *value, + }) + .collect(); + (*address, entries) + }) + .collect(); + + let accounts_for_hashing = accounts.iter().map(|(address, account)| (*address, Some(*account))); + provider_rw.insert_account_for_hashing(accounts_for_hashing)?; + provider_rw.insert_storage_for_hashing(storage_entries)?; + + let ret = compute_state_root(provider_rw.as_ref(), None)?; + provider_rw.commit()?; + + } + + // Build HashedPostState from overlay + let mut hashed_accounts: Vec<(B256, Option)> = overlay_acct + .iter() + .map(|(address, account)| { + let hashed = keccak256(address); + (hashed, Some(*account)) + }) + .collect(); + + let mut hashed_storages: B256Map = HashMap::default(); + for (address, storage) in &overlay_storage { + let hashed_address = keccak256(address); + let hashed_storage = HashedStorage::from_iter( + false, + storage.iter().map(|(key, value)| { + let hashed_slot = keccak256(*key); + (hashed_slot, *value) + }), + ); + hashed_storages.insert(hashed_address, hashed_storage); + } + + let hashed_state = HashedPostState { + accounts: hashed_accounts.into_iter().collect(), + storages: hashed_storages, + }; + + let db_provider_ro = provider_factory.database_provider_ro()?; + let latest_ro = LatestStateProvider::new(db_provider_ro); + + let start = Instant::now(); + let (mdbx_root, _updates) = latest_ro.state_root_with_updates(hashed_state)?; + + println!("MDBX state root: {:?}, overlay state root elapsed {:?} ms", mdbx_root, start.elapsed().as_millis()); + assert_eq!(mdbx_root, triedb_root.root); + + Ok(()) +} \ No newline at end of file diff --git a/crates/storage/db-common/src/bin/state_root_overlay_min.rs b/crates/storage/db-common/src/bin/state_root_overlay_min.rs new file mode 100644 index 00000000000..08dd6add030 --- /dev/null +++ b/crates/storage/db-common/src/bin/state_root_overlay_min.rs @@ -0,0 +1,98 @@ +use alloy_primitives::{address, keccak256, B256, U256}; +use alloy_trie::{EMPTY_ROOT_HASH, KECCAK_EMPTY}; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_primitives_traits::Account; +use reth_provider::{ + test_utils::create_test_provider_factory_with_chain_spec,DatabaseProviderFactory, + LatestStateProvider, ProviderFactory, +}; +use reth_storage_api::StateRootProvider; +use reth_trie_common::HashedPostState; +use std::sync::Arc; +use alloy_genesis::Genesis; +use tempdir::TempDir; +use triedb::{ + account::Account as TrieDBAccount, + overlay::{OverlayStateMut, OverlayValue}, + path::AddressPath, + Database, +}; + +fn main() -> eyre::Result<()> { + println!("Testing overlay state root calculation with single account..."); + + // ===== Setup TrieDB ===== + let dir = TempDir::new("triedb_overlay_min").unwrap(); + let main_file_name_path = dir.path().join("triedb"); + let triedb = Database::create_new(&main_file_name_path).unwrap(); + + let tdb_pre_root = triedb.state_root(); + println!("TrieDB pre state root: {:?}", tdb_pre_root); + + // Create overlay with single account + let mut overlay_mut = OverlayStateMut::new(); + let address = address!("0xd8da6bf26964af9d7eed9e03e53415d37aa96045"); + let address_path = AddressPath::for_address(address); + let trie_account = TrieDBAccount::new( + 1, // nonce + U256::from(100), // balance + EMPTY_ROOT_HASH, // storage_root + KECCAK_EMPTY, // code_hash + ); + overlay_mut.insert(address_path.clone().into(), Some(OverlayValue::Account(trie_account))); + let account_overlay = overlay_mut.freeze(); + + // Calculate state root with TrieDB + let tx = triedb.begin_ro()?; + let triedb_root = tx.compute_root_with_overlay(account_overlay.clone())?; + println!("TrieDB state root with overlay: {:?}", triedb_root.root); + tx.commit()?; + + // ===== Setup MDBX ===== + println!("\nSetting up MDBX..."); + let empty_chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(Genesis::default()) + .with_forks(MAINNET.hardforks.clone()) + .build(), + ); + let provider_factory = create_test_provider_factory_with_chain_spec(empty_chain_spec); + + let db_provider_ro_pre = provider_factory.database_provider_ro()?; + let latest_ro_pre = LatestStateProvider::new(db_provider_ro_pre); + let empty_state = HashedPostState::default(); + let (mdbx_pre_root, _) = latest_ro_pre.state_root_with_updates(empty_state)?; + println!("MDBX pre state root: {:?}", mdbx_pre_root); + + // Build HashedPostState from overlay (single account) + let account = Account { + nonce: 1, + balance: U256::from(100), + bytecode_hash: None, // No bytecode + }; + let hashed_address = keccak256(address); + let hashed_state = HashedPostState { + accounts: vec![(hashed_address, Some(account))].into_iter().collect(), + storages: Default::default(), + }; + + // Calculate state root with MDBX + let db_provider_ro = provider_factory.database_provider_ro()?; + let latest_ro = LatestStateProvider::new(db_provider_ro); + let (mdbx_root, _updates) = latest_ro.state_root_with_updates(hashed_state)?; + println!("MDBX state root with overlay: {:?}", mdbx_root); + + // ===== Compare Results ===== + println!("\n=== Comparison ==="); + println!("TrieDB root: {:?}", triedb_root.root); + println!("MDBX root: {:?}", mdbx_root); + + if triedb_root.root == mdbx_root { + println!("\n✅ SUCCESS: Both methods produce the same state root!"); + Ok(()) + } else { + println!("\n❌ FAILURE: State roots differ!"); + eyre::bail!("State root mismatch: TrieDB={:?}, MDBX={:?}", triedb_root.root, mdbx_root) + } +} \ No newline at end of file diff --git a/crates/storage/db-common/src/bin/state_root_runner.rs b/crates/storage/db-common/src/bin/state_root_runner.rs new file mode 100644 index 00000000000..04cd5fc4a4a --- /dev/null +++ b/crates/storage/db-common/src/bin/state_root_runner.rs @@ -0,0 +1,126 @@ +use std::time::Instant; + +use alloy_primitives::{Address, B256, U256}; +use rand::Rng; +use reth_chainspec::MAINNET; +use reth_primitives_traits::{Account, StorageEntry}; +use reth_provider::{ + test_utils::create_test_provider_factory_with_chain_spec, + DatabaseProviderFactory, HashingWriter, ProviderFactory, TrieWriter, +}; +use reth_storage_api::TrieWriter as _; +use reth_db_common::init::compute_state_root; +use reth_db_common::init_triedb::calculate_state_root_with_triedb; + +fn generate_random_accounts_and_storage( + num_accounts: usize, + storage_per_account: usize, + rng: &mut impl Rng, +) -> (Vec<(Address, Account)>, Vec<(Address, Vec)>) { + let mut accounts = Vec::new(); + let mut storage_entries = Vec::new(); + + for _ in 0..num_accounts { + let mut address_bytes = [0u8; 20]; + rng.fill(&mut address_bytes); + let address = Address::from_slice(&address_bytes); + + let account = Account { + nonce: rng.gen_range(0..=u64::MAX), + balance: U256::from(rng.gen_range(0u128..=u128::MAX)), + bytecode_hash: { + let mut hash_bytes = [0u8; 32]; + rng.fill(&mut hash_bytes); + Some(B256::from(hash_bytes)) + }, + }; + accounts.push((address, account)); + + let mut storage_vec = Vec::new(); + for _ in 0..storage_per_account { + let mut key_bytes = [0u8; 32]; + rng.fill(&mut key_bytes); + let key = B256::from(key_bytes); + + let mut value_bytes = [0u8; 32]; + rng.fill(&mut value_bytes); + let value = U256::from_be_slice(&value_bytes); + + storage_vec.push(StorageEntry { key, value }); + } + storage_entries.push((address, storage_vec)); + } + + (accounts, storage_entries) +} + +fn setup_test_data( + num_accounts: usize, + storage_per_account: usize, +) -> ProviderFactory { + let mut rng = rand::thread_rng(); + let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + + let (accounts, storage_entries) = + generate_random_accounts_and_storage(num_accounts, storage_per_account, &mut rng); + + // single RW tx to populate DB, then commit + let mut provider_rw = provider_factory.provider_rw().unwrap(); + + let accounts_for_hashing = accounts + .iter() + .map(|(address, account)| (*address, Some(*account))); + + provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); + provider_rw.insert_storage_for_hashing(storage_entries).unwrap(); + provider_rw.commit().unwrap(); + + provider_factory +} + +fn main() { + // args: traditional | triedb [num_accounts] [storage_per_account] + let mut args = std::env::args().skip(1); + let mode = args.next().unwrap_or_else(|| "traditional".to_string()); + let num_accounts: usize = args.next().unwrap_or_else(|| "100000".to_string()).parse().unwrap(); + let storage_per_account: usize = + args.next().unwrap_or_else(|| "5".to_string()).parse().unwrap(); + + println!( + "Running state root with mode={mode}, num_accounts={num_accounts}, storage_per_account={storage_per_account}" + ); + + let provider_factory = setup_test_data(num_accounts, storage_per_account); + + match mode.as_str() { + "traditional" => { + let provider_rw = provider_factory.provider_rw().unwrap(); + let start = Instant::now(); + let root = compute_state_root(&*provider_rw, None).unwrap(); + // If you want to persist trie tables, commit here: + provider_rw.commit().unwrap(); + let elapsed = start.elapsed(); + println!("traditional: root={root:?}, elapsed={:?}", elapsed); + } + "triedb" => { + use tempdir::TempDir; + + let provider_rw = provider_factory.provider_rw().unwrap(); + let tmp_dir = TempDir::new("state_root_triedb").unwrap(); + let trie_db_path = tmp_dir.path().join("triedb.db"); + + let start = Instant::now(); + let root = + calculate_state_root_with_triedb(&*provider_rw, trie_db_path.clone(), None).unwrap(); + let elapsed = start.elapsed(); + println!( + "triedb: root={root:?}, elapsed={:?}", + elapsed + ); + } + other => { + eprintln!("Unknown mode: {other}. Use 'traditional' or 'triedb'."); + std::process::exit(1); + } + } +} diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 810acf4d5d4..3e51e96a1cd 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -2,7 +2,7 @@ use alloy_consensus::BlockHeader; use alloy_genesis::GenesisAccount; -use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256}; +use alloy_primitives::{keccak256, map::HashMap, Address, StorageValue, StorageKey, B256, U256}; use reth_chainspec::EthChainSpec; use reth_codecs::Compact; use reth_config::config::EtlConfig; @@ -25,10 +25,20 @@ use reth_trie::{ prefix_set::{TriePrefixSets, TriePrefixSetsMut}, IntermediateStateRootState, Nibbles, StateRoot as StateRootComputer, StateRootProgress, }; -use reth_trie_db::DatabaseStateRoot; +use reth_trie_db::{DatabaseStateRoot, DatabaseTrieCursorFactory}; use serde::{Deserialize, Serialize}; use std::io::BufRead; +use std::time::Instant; use tracing::{debug, error, info, trace}; +use reth_trie::{trie_cursor::{TrieCursor, TrieCursorFactory}}; +use reth_provider::providers::state::latest::get_triedb_provider; +use triedb::{ + account::Account as TrieDBAccount, + path::{AddressPath, StoragePath}, + transaction::TransactionError, +}; +use alloy_trie::EMPTY_ROOT_HASH; +use alloy_consensus::constants::KECCAK_EMPTY; /// Default soft limit for number of bytes to read from state dump file, before inserting into /// database. @@ -102,6 +112,7 @@ where + AsRef, PF::ChainSpec: EthChainSpec
::BlockHeader>, { + let start = Instant::now(); let chain = factory.chain_spec(); let genesis = chain.genesis(); @@ -152,8 +163,18 @@ where insert_genesis_state(&provider_rw, alloc.iter())?; - // compute state root to populate trie tables - compute_state_root(&provider_rw, None)?; + // let ret = compute_state_root(&provider_rw, None)?; + + // Calculate state root using triedb + match compute_state_root_triedb(alloc.iter()) { + Ok(triedb_state_root) => { + println!("compute_state_root_triedb done: {:?}", triedb_state_root); + } + Err(e) => { + println!("compute_state_root_triedb failed: {:?}", e); + // Don't fail genesis init if triedb fails, just log it + } + } // set stage checkpoint to genesis block number for all stages let checkpoint = StageCheckpoint { block_number: genesis_block_number, ..Default::default() }; @@ -181,7 +202,7 @@ where // `commit_unwind`` will first commit the DB and then the static file provider, which is // necessary on `init_genesis`. provider_rw.commit()?; - + info!("time elapsed in init_genesis: {:?}", start.elapsed().as_millis()); Ok(hash) } @@ -624,7 +645,7 @@ where /// Computes the state root (from scratch) based on the accounts and storages present in the /// database. -fn compute_state_root( +pub fn compute_state_root( provider: &Provider, prefix_sets: Option, ) -> Result @@ -683,6 +704,66 @@ where } } +/// Computes the state root using triedb by inserting all genesis accounts and storage. +pub fn compute_state_root_triedb<'a, 'b>( + alloc: impl Iterator, +) -> Result { + let triedb_provider = get_triedb_provider() + .ok_or_else(|| InitStorageError::Provider(ProviderError::UnsupportedProvider))?; + + let mut tx = triedb_provider.inner.begin_rw() + .map_err(|e| InitStorageError::Provider(ProviderError::TrieWitnessError(format!("Failed to begin triedb transaction: {e:?}"))))?; + + // Insert all genesis accounts and storage into triedb + for (address, genesis_account) in alloc { + let address_path = AddressPath::for_address(*address); + + // Convert GenesisAccount to Account + let account = Account { + nonce: genesis_account.nonce.unwrap_or(0), + balance: genesis_account.balance, + bytecode_hash: genesis_account.code.as_ref().map(|code| keccak256(code)), + }; + + // Insert storage first (if exists), so storage root can be computed + if let Some(ref storage) = genesis_account.storage { + for (storage_key, storage_value) in storage { + let raw_slot = U256::from_be_slice(storage_key.as_slice()); + let storage_path = StoragePath::for_address_path_and_slot( + address_path.clone(), + StorageKey::from(raw_slot), + ); + + let storage_value_u256 = U256::from_be_slice(storage_value.as_slice()); + if !storage_value_u256.is_zero() { + let storage_value_triedb = StorageValue::from_be_slice( + storage_value_u256.to_be_bytes::<32>().as_slice() + ); + tx.set_storage_slot(storage_path, Some(storage_value_triedb)).unwrap(); + } + } + } + + // Insert account (storage root will be computed by triedb when we commit) + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, // Will be computed by triedb + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + ); + + tx.set_account(address_path, Some(trie_account)).unwrap(); + } + + // Commit - this computes the state root + tx.commit() + .map_err(|e| InitStorageError::Provider(ProviderError::TrieWitnessError(format!("Failed to commit triedb transaction: {e:?}"))))?; + + // Get the computed state root + let triedb_state_root = triedb_provider.inner.state_root(); + Ok(triedb_state_root) +} + /// Type to deserialize state root from state dump file. #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] struct StateRoot { @@ -835,4 +916,13 @@ mod tests { )], ); } + + #[test] + fn init_genesis_with_triedb() { + let genesis_hash = + init_genesis(&create_test_provider_factory_with_chain_spec(MAINNET.clone())).unwrap(); + + // actual, expected + assert_eq!(genesis_hash, MAINNET_GENESIS_HASH); + } } diff --git a/crates/storage/db-common/src/init_triedb.rs b/crates/storage/db-common/src/init_triedb.rs new file mode 100644 index 00000000000..ce2f21bb84d --- /dev/null +++ b/crates/storage/db-common/src/init_triedb.rs @@ -0,0 +1,241 @@ +use reth_provider::{ + DBProvider, ProviderError, TrieWriter, +}; +use reth_trie::{ + prefix_set::TriePrefixSets, + IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress, +}; +use reth_trie_db::DatabaseHashedCursorFactory; +use reth_trie::{StateRootTrieDb, TrieExtDatabase}; +use alloy_primitives::B256; +use tracing::{info, trace}; +use std::path::Path; + +/// Calculate state root using TrieDB and commit trie updates. +/// +/// This function: +/// 1. Uses `StateRootTrieDb` with `DatabaseHashedCursorFactory` to read from the database +/// 2. Calculates state root using TrieDB +/// 3. Returns the computed state root +/// +/// # Arguments +/// +/// * `provider` - Database provider that implements `DBProvider` and `TrieWriter` +/// * `trie_db_path` - Path where the TrieDB database should be created +/// * `prefix_sets` - Optional prefix sets for incremental state root calculation (currently unused) +/// +/// # Returns +/// +/// * `Ok(B256)` - The computed state root hash +/// * `Err(ProviderError)` - If state root calculation fails +pub fn calculate_state_root_with_triedb( + provider: &Provider, + trie_db_path: impl AsRef, + _prefix_sets: Option, +) -> Result +where + Provider: DBProvider + TrieWriter, +{ + trace!(target: "reth::state_root", "Calculating state root using TrieDB"); + let tx = provider.tx_ref(); + let hashed_cursor_factory = DatabaseHashedCursorFactory::new(tx); + let trie_ext_db = TrieExtDatabase::new(trie_db_path); + let state_root_ext = StateRootTrieDb::new(hashed_cursor_factory, trie_ext_db); + let ret = state_root_ext.calculate_commit(); + match ret { + Ok(root) => Ok(root), + Err(error) => Err(ProviderError::TrieWitnessError("".to_string())), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempdir::TempDir; + use reth_provider::{ + test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, + ProviderFactory, HashingWriter, DBProvider, + }; + use reth_chainspec::MAINNET; + use reth_provider::DatabaseProviderFactory; + use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; + use reth_trie::{StateRootTrieDb, TrieExtDatabase}; + use alloy_primitives::{Address, U256, keccak256, B256}; + use reth_primitives_traits::{Account, StorageEntry}; + use reth_trie::{ + StateRoot as StateRootComputer, StateRootProgress, + }; + use reth_storage_api::TrieWriter; + use crate::init::compute_state_root; + use rand::Rng; + use reth_trie::trie_cursor::{TrieCursor, TrieCursorFactory}; + + fn generate_random_accounts_and_storage( + num_accounts: usize, + storage_per_account: usize, + rng: &mut impl Rng, + ) -> (Vec<(Address, Account)>, Vec<(Address, Vec)>) { + let mut accounts = Vec::new(); + let mut storage_entries = Vec::new(); + + for _ in 0..num_accounts { + let mut address_bytes = [0u8; 20]; + rng.fill(&mut address_bytes); + let address = Address::from_slice(&address_bytes); + + let account = Account { + nonce: rng.gen_range(0..=u64::MAX), + balance: U256::from(rng.gen_range(0u128..=u128::MAX)), + bytecode_hash: { + let mut hash_bytes = [0u8; 32]; + rng.fill(&mut hash_bytes); + Some(B256::from(hash_bytes)) + } + }; + accounts.push((address, account)); + + let mut storage_vec = Vec::new(); + for _ in 0..storage_per_account { + let mut storage_key_bytes = [0u8; 32]; + rng.fill(&mut storage_key_bytes); + let storage_key = B256::from(storage_key_bytes); + + let mut storage_value_bytes = [0u8; 32]; + rng.fill(&mut storage_value_bytes); + let storage_value = U256::from_be_slice(&storage_value_bytes); + + storage_vec.push(StorageEntry { + key: storage_key, + value: storage_value, + }); + } + storage_entries.push((address, storage_vec)); + } + + (accounts, storage_entries) + } + + #[test] + pub fn test_triedb_state_root_with_random_accts() { + let mut rng = rand::thread_rng(); + let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + + let mut provider_rw = provider_factory.database_provider_rw().unwrap(); + + let (dummy_accounts, storage_entries) = generate_random_accounts_and_storage( + 10000, // num_accounts + 10, // storage_per_account + &mut rng, + ); + + let accounts_for_hashing = dummy_accounts + .iter() + .map(|(address, account)| (*address, Some(*account))); + + provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); + provider_rw.insert_storage_for_hashing(storage_entries).unwrap(); + provider_rw.commit().unwrap(); + + let traditional_root = { + let provider_rw = provider_factory.database_provider_rw().unwrap(); + compute_state_root(&provider_rw, None).unwrap() + }; + + let triedb_root = { + let provider_ro = provider_factory.database_provider_ro().unwrap(); + let tx = provider_ro.tx_ref(); + let hashed_cursor_factory = DatabaseHashedCursorFactory::new(tx); + let tmp_dir = TempDir::new("test_triedb").unwrap(); + let file_path = tmp_dir.path().join("test.db"); + let trie_ext_db = TrieExtDatabase::new(file_path); + let state_root_ext = StateRootTrieDb::new(hashed_cursor_factory, trie_ext_db); + state_root_ext.calculate_commit().unwrap() + }; + + assert_eq!(triedb_root, traditional_root, "State roots should match"); + } + + #[test] + pub fn test_triedb_state_root_with_determistic_accts() { + let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); + let mut provider_rw = provider_factory.database_provider_rw().unwrap(); + + let accounts: Vec<(Address, Account)> = vec![ + ( + Address::from_slice(&[ + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, + ]), // keccak256(address) = [20, 104, 40, 128, 86, 49, 12, 130, 170, 76, 1, 167, 225, 42, 16, 248, 17, 26, 5, 96, 231, 43, 112, 5, 85, 71, 144, 49, 184, 108, 53, 125] + Account { nonce: 1, balance: U256::from(100u64), bytecode_hash: None }, + ), + ( + Address::from_slice(&[ + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, + ]), // keccak256(address) = [213, 38, 136, 168, 249, 38, 200, 22, 202, 30, 7, 144, 103, 202, 186, 148, 79, 21, 142, 118, 72, 23, 184, 63, 196, 53, 148, 55, 12, 169, 207, 98] + Account { nonce: 2, balance: U256::from(200u64), bytecode_hash: None }, + ), + ( + Address::from_slice(&[ + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x10, + ]), // keccak256(address) = [144,176, 210, 137, 234, 33, 29, 202, 142, 2, 12, 156, 200, 197, 214, 186, 47, 65, 111, 225, 95, 166, 146, 180, 113, 132, 164, 185, 70, 178, 33, 77] + Account { nonce: 3, balance: U256::from(300u64), bytecode_hash: None }, + ), + ]; + + // let storage_entries: Vec<(Address, Vec)> = accounts + // .iter() + // .map(|(address, _)| { + // let addr_bytes = address.as_slice(); + // + // let key1 = B256::from_slice(&keccak256([addr_bytes, &[0x01]].concat()).as_slice()); + // let key2 = B256::from_slice(&keccak256([addr_bytes, &[0x02]].concat()).as_slice()); + // + // let value1 = U256::from(keccak256([addr_bytes, &[0xA1]].concat()).as_slice()); + // let value2 = U256::from(keccak256([addr_bytes, &[0xA2]].concat()).as_slice()); + // + // let slots = vec![ + // StorageEntry { key: key1, value: value1 }, + // StorageEntry { key: key2, value: value2 }, + // ]; + // + // (*address, slots) + // }) + // .collect(); + + let accounts_for_hashing = accounts + .iter() + .map(|(address, account)| (*address, Some(*account))); + + provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); + // provider_rw.insert_storage_for_hashing(storage_entries).unwrap(); + provider_rw.commit().unwrap(); + + // Traditional root + let traditional_root = { + let provider_rw = provider_factory.database_provider_rw().unwrap(); + compute_state_root(&provider_rw, None).unwrap() + }; + + // TrieDB root + let triedb_root = { + let provider_ro = provider_factory.database_provider_ro().unwrap(); + let tx = provider_ro.tx_ref(); + let hashed_cursor_factory = DatabaseHashedCursorFactory::new(tx); + let tmp_dir = TempDir::new("test_triedb_deterministic").unwrap(); + let file_path = tmp_dir.path().join("test.db"); + let trie_ext_db = TrieExtDatabase::new(file_path); + let state_root_ext = StateRootTrieDb::new(hashed_cursor_factory, trie_ext_db); + state_root_ext.calculate_commit().unwrap() + }; + + assert_eq!(triedb_root, traditional_root, "Deterministic state roots should match"); + } +} diff --git a/crates/storage/db-common/src/lib.rs b/crates/storage/db-common/src/lib.rs index 22e49abfb05..5080687ff98 100644 --- a/crates/storage/db-common/src/lib.rs +++ b/crates/storage/db-common/src/lib.rs @@ -11,4 +11,5 @@ pub mod init; mod db_tool; + pub use db_tool::*; diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index a6306723847..4257c32fb0f 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -159,6 +159,14 @@ pub mod test_utils { (temp_dir, path) } + /// Create `triedbdb` path for testing + #[track_caller] + pub fn create_test_triedb_dir() -> (TempDir, PathBuf) { + let temp_dir = TempDir::with_prefix("reth-test-triedb-").expect(ERROR_TEMPDIR); + let path = temp_dir.path().to_path_buf(); + (temp_dir, path) + } + /// Get a temporary directory path to use for the database pub fn tempdir_path() -> PathBuf { let builder = tempfile::Builder::new().prefix("reth-test-").rand_bytes(8).tempdir(); diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index e8599a89706..cba0917d922 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -37,9 +37,11 @@ alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true +alloy-trie.workspace = true revm-database.workspace = true revm-state = { workspace = true, optional = true } +triedb.workspace = true # tracing tracing.workspace = true @@ -54,7 +56,8 @@ parking_lot.workspace = true dashmap = { workspace = true, features = ["inline"] } strum.workspace = true eyre.workspace = true - +fixed-cache.workspace=true +rapidhash.workspace = true # test-utils reth-ethereum-engine-primitives = { workspace = true, optional = true } tokio = { workspace = true, features = ["sync"], optional = true } @@ -75,6 +78,7 @@ revm-database-interface.workspace = true revm-state.workspace = true tempfile.workspace = true +tempdir.workspace = true assert_matches.workspace = true rand.workspace = true diff --git a/crates/storage/provider/src/providers/database/builder.rs b/crates/storage/provider/src/providers/database/builder.rs index 4bc8569432e..d532370543c 100644 --- a/crates/storage/provider/src/providers/database/builder.rs +++ b/crates/storage/provider/src/providers/database/builder.rs @@ -15,6 +15,7 @@ use std::{ path::{Path, PathBuf}, sync::Arc, }; +use crate::providers::triedb::TriedbProvider; /// Helper type to create a [`ProviderFactory`]. /// @@ -105,12 +106,13 @@ impl ProviderFactoryBuilder { where N: NodeTypes, { - let ReadOnlyConfig { db_dir, db_args, static_files_dir, watch_static_files } = + let ReadOnlyConfig { db_dir, db_args, static_files_dir, triedb_dir, watch_static_files } = config.into(); Ok(self .db(Arc::new(open_db_read_only(db_dir, db_args)?)) .chainspec(chainspec) .static_file(StaticFileProvider::read_only(static_files_dir, watch_static_files)?) + .triedb_provider(TriedbProvider::new(triedb_dir)) .build_provider_factory()) } } @@ -133,6 +135,8 @@ pub struct ReadOnlyConfig { pub db_args: DatabaseArguments, /// The path to the static file dir pub static_files_dir: PathBuf, + /// thE PATH TO THE `TrieDB` directory + pub triedb_dir: PathBuf, /// Whether the static files should be watched for changes. pub watch_static_files: bool, } @@ -146,13 +150,14 @@ impl ReadOnlyConfig { /// -`datadir` /// |__db /// |__static_files + /// |__triedb /// ``` /// /// By default this watches the static file directory for changes, see also /// [`StaticFileProvider::read_only`] pub fn from_datadir(datadir: impl AsRef) -> Self { let datadir = datadir.as_ref(); - Self::from_dirs(datadir.join("db"), datadir.join("static_files")) + Self::from_dirs(datadir.join("db"), datadir.join("static_files"), datadir.join("triedb")) } /// Disables long-lived read transaction safety guarantees. @@ -181,13 +186,10 @@ impl ReadOnlyConfig { /// If the path does not exist pub fn from_db_dir(db_dir: impl AsRef) -> Self { let db_dir = db_dir.as_ref(); - let static_files_dir = std::fs::canonicalize(db_dir) - .unwrap() - .parent() - .unwrap() - .to_path_buf() - .join("static_files"); - Self::from_dirs(db_dir, static_files_dir) + let datadir = std::fs::canonicalize(db_dir).unwrap().parent().unwrap().to_path_buf(); + let static_files_dir = datadir.join("static_files"); + let triedb_dir = datadir.join("triedb"); + Self::from_dirs(db_dir, static_files_dir, triedb_dir) } /// Creates the config for the given paths. @@ -195,9 +197,10 @@ impl ReadOnlyConfig { /// /// By default this watches the static file directory for changes, see also /// [`StaticFileProvider::read_only`] - pub fn from_dirs(db_dir: impl AsRef, static_files_dir: impl AsRef) -> Self { + pub fn from_dirs(db_dir: impl AsRef, static_files_dir: impl AsRef, triedb_dir: impl AsRef) -> Self { Self { static_files_dir: static_files_dir.as_ref().into(), + triedb_dir: triedb_dir.as_ref().into(), db_dir: db_dir.as_ref().into(), db_args: Default::default(), watch_static_files: true, @@ -318,14 +321,58 @@ impl TypesAnd3 { } } -impl TypesAnd3, StaticFileProvider> +impl TypesAnd3, StaticFileProvider> +where + N: NodeTypes, +{ + /// Configures the `TrieDB` provider. + pub fn triedb_provider( + self, + triedb_provider: TriedbProvider, + ) -> TypesAnd4, StaticFileProvider, Arc> { + TypesAnd4::new(self.val_1, self.val_2, self.val_3, Arc::new(triedb_provider)) + } +} + +// impl TypesAnd3, StaticFileProvider> +// where +// N: NodeTypes, +// DB: Database + DatabaseMetrics + Clone + Unpin + 'static, +// { +// /// Creates the [`ProviderFactory`]. +// pub fn build_provider_factory(self) -> ProviderFactory> { +// let Self { _types, val_1, val_2, val_3 } = self; +// ProviderFactory::new(val_1, val_2, val_3) +// } +// } + +/// This is staging type that contains the configured types and _four_ values. +#[derive(Debug)] +pub struct TypesAnd4 { + _types: PhantomData, + val_1: Val1, + val_2: Val2, + val_3: Val3, + val_4: Val4, +} + +impl TypesAnd4 { + /// Creates a new instance with the given types and four values. + pub fn new(val_1: Val1, val_2: Val2, val_3: Val3, val_4: Val4) -> Self { + Self { _types: Default::default(), val_1, val_2, val_3, val_4 } + } +} + +impl TypesAnd4, StaticFileProvider, Arc> where N: NodeTypes, DB: Database + DatabaseMetrics + Clone + Unpin + 'static, { /// Creates the [`ProviderFactory`]. - pub fn build_provider_factory(self) -> ProviderFactory> { - let Self { _types, val_1, val_2, val_3 } = self; - ProviderFactory::new(val_1, val_2, val_3) + pub fn build_provider_factory( + self, + ) -> ProviderFactory> { + let Self { _types, val_1, val_2, val_3, val_4 } = self; + ProviderFactory::new(val_1, val_2, val_3, val_4) } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index d670836d2d6..0a7af495fee 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -33,14 +33,15 @@ use std::{ path::Path, sync::Arc, }; - +use std::ops::Add; use tracing::trace; mod provider; pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW}; - +use fixed_cache::Cache; use super::ProviderNodeTypes; use reth_trie::KeccakKeyHasher; +use triedb::path::AddressPath; mod builder; pub use builder::{ProviderFactoryBuilder, ReadOnlyConfig}; @@ -49,6 +50,7 @@ mod metrics; mod chain; pub use chain::*; +use crate::providers::triedb::TriedbProvider; /// A common provider that fetches data from a database or static file. /// @@ -64,6 +66,8 @@ pub struct ProviderFactory { prune_modes: PruneModes, /// The node storage handler. storage: Arc, + + triedb_provider: Arc } impl ProviderFactory>> { @@ -79,13 +83,20 @@ impl ProviderFactory { db: N::DB, chain_spec: Arc, static_file_provider: StaticFileProvider, + triedb_provider: Arc ) -> Self { + // Initialize the static triedb_provider + let _ = crate::providers::state::latest::set_triedb_provider(triedb_provider.clone()); + let cache: Cache = Cache::new(65536, Default::default()); + let _ = crate::providers::state::latest::set_fixed_cache(cache); + Self { db, chain_spec, static_file_provider, prune_modes: PruneModes::default(), storage: Default::default(), + triedb_provider } } @@ -127,13 +138,18 @@ impl>> ProviderFactory { chain_spec: Arc, args: DatabaseArguments, static_file_provider: StaticFileProvider, + triedb_provider: Arc ) -> RethResult { + // Initialize the static triedb_provider + let _ = crate::providers::state::latest::set_triedb_provider(triedb_provider.clone()); + Ok(Self { db: Arc::new(init_db(path, args).map_err(RethError::msg)?), chain_spec, static_file_provider, prune_modes: PruneModes::default(), storage: Default::default(), + triedb_provider }) } } @@ -153,6 +169,7 @@ impl ProviderFactory { self.static_file_provider.clone(), self.prune_modes.clone(), self.storage.clone(), + Some(self.triedb_provider.clone()), )) } @@ -168,6 +185,7 @@ impl ProviderFactory { self.static_file_provider.clone(), self.prune_modes.clone(), self.storage.clone(), + Some(self.triedb_provider.clone()), ))) } @@ -551,11 +569,12 @@ where N: NodeTypesWithDB, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let Self { db, chain_spec, static_file_provider, prune_modes, storage } = self; + let Self { db, chain_spec, static_file_provider, prune_modes, storage, triedb_provider } = self; f.debug_struct("ProviderFactory") .field("db", &db) .field("chain_spec", &chain_spec) .field("static_file_provider", &static_file_provider) + .field("triedb_provider", &triedb_provider) .field("prune_modes", &prune_modes) .field("storage", &storage) .finish() @@ -568,6 +587,7 @@ impl Clone for ProviderFactory { db: self.db.clone(), chain_spec: self.chain_spec.clone(), static_file_provider: self.static_file_provider.clone(), + triedb_provider: self.triedb_provider.clone(), prune_modes: self.prune_modes.clone(), storage: self.storage.clone(), } @@ -596,6 +616,7 @@ mod tests { use reth_storage_errors::provider::ProviderError; use reth_testing_utils::generators::{self, random_block, random_header, BlockParams}; use std::{ops::RangeInclusive, sync::Arc}; + use reth_db::test_utils::create_test_triedb_dir; #[test] fn common_history_provider() { @@ -627,11 +648,13 @@ mod tests { fn provider_factory_with_database_path() { let chain_spec = ChainSpecBuilder::mainnet().build(); let (_static_dir, static_dir_path) = create_test_static_files_dir(); + let (_trie_dir, trie_dir_path) = create_test_triedb_dir(); let factory = ProviderFactory::>::new_with_database_path( tempfile::TempDir::new().expect(ERROR_TEMPDIR).keep(), Arc::new(chain_spec), DatabaseArguments::new(Default::default()), StaticFileProvider::read_write(static_dir_path).unwrap(), + Arc::new(TriedbProvider::new(trie_dir_path)) ) .unwrap(); let provider = factory.provider().unwrap(); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 1f0a0aa391a..53cd933d3ad 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -28,7 +28,7 @@ use alloy_eips::BlockHashOrNumber; use alloy_primitives::{ keccak256, map::{hash_map, B256Map, HashMap, HashSet}, - Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, + Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256,StorageKey,StorageValue }; use itertools::Itertools; use rayon::slice::ParallelSliceMut; @@ -57,7 +57,7 @@ use reth_prune_types::{ use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ - BlockBodyIndicesProvider, BlockBodyReader, NodePrimitivesProvider, StateProvider, + BlockBodyIndicesProvider, BlockBodyReader, NodePrimitivesProvider, PlainPostState, StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::ProviderResult; @@ -83,7 +83,14 @@ use std::{ ops::{Deref, DerefMut, Not, Range, RangeBounds, RangeFrom, RangeInclusive}, sync::Arc, }; -use tracing::{debug, trace}; +use tracing::{debug, info, trace}; +use alloy_consensus::constants::KECCAK_EMPTY; +use alloy_trie::EMPTY_ROOT_HASH; +use tokio::time::Instant; +use triedb::{ + account::Account as TrieDBAccount, + path::{AddressPath, StoragePath}, +}; /// A [`DatabaseProvider`] that holds a read-only database transaction. pub type DatabaseProviderRO = DatabaseProvider<::TX, N>; @@ -153,6 +160,8 @@ pub struct DatabaseProvider { prune_modes: PruneModes, /// Node storage handler. storage: Arc, + /// TrieDB provider for triedb operations + triedb_provider: Option>, } impl DatabaseProvider { @@ -160,6 +169,17 @@ impl DatabaseProvider { pub const fn prune_modes_ref(&self) -> &PruneModes { &self.prune_modes } + + /// Returns reference to TrieDB provider if available + pub fn triedb_provider(&self) -> Option<&Arc> { + self.triedb_provider.as_ref() + } +} + +impl crate::providers::state::latest::TriedbProviderAccess for DatabaseProvider { + fn triedb_provider(&self) -> Option<&Arc> { + self.triedb_provider.as_ref() + } } impl DatabaseProvider { @@ -242,14 +262,15 @@ impl> C impl DatabaseProvider { /// Creates a provider with an inner read-write transaction. - pub const fn new_rw( + pub fn new_rw( tx: TX, chain_spec: Arc, static_file_provider: StaticFileProvider, prune_modes: PruneModes, storage: Arc, + triedb_provider: Option>, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes, storage } + Self { tx, chain_spec, static_file_provider, prune_modes, storage, triedb_provider } } } @@ -284,10 +305,32 @@ impl DatabaseProvider()); + storage_map.insert(slot_b256, storage_slot.present_value); + } + } + } + let block_number = recovered_block.number(); self.insert_block(Arc::unwrap_or_clone(recovered_block))?; @@ -310,6 +353,66 @@ impl DatabaseProvider().as_slice() + ); + tx.set_storage_slot(storage_path, Some(storage_value_triedb)) + .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to set triedb storage slot: {e:?}")))?; + } + } + } + + // Then, write accounts (storage roots will be computed automatically by triedb) + for (address, account_opt) in &merged_plain_state.accounts { + let address_path = AddressPath::for_address(*address); + + if let Some(account) = account_opt { + // Account exists or is being updated + // Storage root will be computed from the storage we just wrote + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, // Will be computed from storage + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + ); + tx.set_account(address_path, Some(trie_account)) + .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to set triedb account: {e:?}")))?; + } else { + // Account is being destroyed + tx.set_account(address_path, None) + .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to delete triedb account: {e:?}")))?; + } + } + + // Commit the triedb transaction + tx.commit() + .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to commit triedb transaction: {e:?}")))?; + } + let elapsed = start.elapsed().as_millis(); + info!("save to trie db elapsed {:?}", elapsed); debug!(target: "providers::db", range = ?first_number..=last_block_number, "Appended block data"); Ok(()) @@ -488,14 +591,15 @@ where impl DatabaseProvider { /// Creates a provider with an inner read-only transaction. - pub const fn new( + pub fn new( tx: TX, chain_spec: Arc, static_file_provider: StaticFileProvider, prune_modes: PruneModes, storage: Arc, + triedb_provider: Option>, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes, storage } + Self { tx, chain_spec, static_file_provider, prune_modes, storage, triedb_provider } } /// Consume `DbTx` or `DbTxMut`. @@ -3124,7 +3228,7 @@ impl DBProvider for DatabaseProvider fn tx_ref(&self) -> &Self::Tx { &self.tx } - + fn tx_mut(&mut self) -> &mut Self::Tx { &mut self.tx } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 41e8121991b..772e891f1dd 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -13,7 +13,9 @@ pub use static_file::{ StaticFileProviderRWRefMut, StaticFileWriter, }; -mod state; +pub mod triedb; + +pub mod state; pub use state::{ historical::{HistoricalStateProvider, HistoricalStateProviderRef, LowestAvailableBlocks}, latest::{LatestStateProvider, LatestStateProviderRef}, diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 666138fae7b..3ea8ba386b6 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -14,7 +14,8 @@ use reth_db_api::{ }; use reth_primitives_traits::{Account, Bytecode}; use reth_storage_api::{ - BlockNumReader, BytecodeReader, DBProvider, StateProofProvider, StorageRootProvider, + BlockNumReader, BytecodeReader, DBProvider, PlainPostState, StateProofProvider, + StorageRootProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ @@ -316,6 +317,14 @@ impl StateRootProvider StateRoot::overlay_root_from_nodes_with_updates(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } + + fn state_root_with_updates_triedb( + &self, + _plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + tracing::debug!("latest_state_provider state_root_with_updates_triedb"); + todo!() + } } impl StorageRootProvider diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 092feb37c43..fb435842ae3 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -1,12 +1,15 @@ +use std::ops::Add; use crate::{ providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, HashedPostStateProvider, StateProvider, StateRootProvider, }; -use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; +use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, U256}; use reth_db_api::{cursor::DbDupCursorRO, tables, transaction::DbTx}; use reth_primitives_traits::{Account, Bytecode}; -use reth_storage_api::{BytecodeReader, DBProvider, StateProofProvider, StorageRootProvider}; +use reth_storage_api::{BytecodeReader, DBProvider, PlainPostState, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use std::sync::{Arc, OnceLock}; +use std::time::Instant; use reth_trie::{ proof::{Proof, StorageProof}, updates::TrieUpdates, @@ -18,6 +21,32 @@ use reth_trie_db::{ DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, }; +use alloy_consensus::constants::KECCAK_EMPTY; +use alloy_trie::EMPTY_ROOT_HASH; +use triedb::{ + account::Account as TrieDBAccount, + overlay::{OverlayStateMut, OverlayValue}, + path::{AddressPath, StoragePath}, +}; +use fixed_cache::{static_cache,Cache}; + +/// Static storage for the triedb provider instance +static TRIEDB_PROVIDER: OnceLock> = OnceLock::new(); +static FIXED_CACHE: OnceLock> = OnceLock::new(); + +/// Initialize the static triedb provider +pub fn set_triedb_provider(provider: Arc) -> Result<(), Arc> { + TRIEDB_PROVIDER.set(provider) +} + +/// Get the static triedb provider +pub fn get_triedb_provider() -> Option<&'static Arc> { + TRIEDB_PROVIDER.get() +} +pub fn set_fixed_cache(cache: Cache) -> Result<(), Cache> { + tracing::info!("set_fixed_cache"); + FIXED_CACHE.set(cache) +} /// State provider over latest state that takes tx reference. /// @@ -84,8 +113,105 @@ impl StateRootProvider for LatestStateProviderRef<' StateRoot::overlay_root_from_nodes_with_updates(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } + + fn state_root_with_updates_triedb( + &self, + plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + tracing::info!("latest_state_provider state_root_with_updates_triedb"); + let triedb_provider = get_triedb_provider() + .ok_or_else(|| ProviderError::UnsupportedProvider)?; + let start = Instant::now(); + let address_cache = FIXED_CACHE.get().unwrap(); + let mut overlay_mut = OverlayStateMut::new(); + + for (address, account_opt) in &plain_state.accounts { + let address_path = address_cache.get_or_insert_with(*address, |address| { + AddressPath::for_address(*address) + }); + if let Some(account) = account_opt { + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + EMPTY_ROOT_HASH, + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + ); + overlay_mut.insert(address_path.clone().into(), Some(OverlayValue::Account(trie_account))); + } else { + overlay_mut.insert(address_path.clone().into(), None); + } + } + let total_accts = plain_state.accounts.len(); + tracing::info!("latest_state_provider total acct: {total_accts:?}"); + + let mut total_storage = 0; + + for (address, storage) in &plain_state.storages { + let address_path = address_cache.get_or_insert_with(*address, |address| { + AddressPath::for_address(*address) + }); + + total_storage += storage.len(); + for (storage_key, storage_value) in storage { + let raw_slot = U256::from_be_slice(storage_key.as_slice()); + let storage_key_typed = StorageKey::from(raw_slot); + + // let storage_path = storage_path_cache.get_or_insert_with( + // (*address, storage_key_typed), + // |(address, key)| { + // StoragePath::for_address_path_and_slot( + // address_path.clone(), + // *key, + // ) + // } + // ); + + let storage_path = StoragePath::for_address_path_and_slot( + address_path.clone(), + storage_key_typed, + ); + + if storage_value.is_zero() { + overlay_mut.insert(storage_path.clone().into(), None); + } else { + overlay_mut.insert( + storage_path.clone().into(), + Some(OverlayValue::Storage(StorageValue::from_be_slice( + storage_value.to_be_bytes::<32>().as_slice() + ))), + ); + } + } + } + tracing::info!("latest_state_provider total storage: {total_storage:?}"); + let elapsed = start.elapsed().as_millis(); + tracing::info!("latest_state_provider overlay prepare elapsed: {elapsed:?}"); + + let start = Instant::now(); + let overlay = overlay_mut.freeze(); + let elapsed = start.elapsed().as_millis(); + tracing::info!("latest_state_provider overlay freeze elapsed: {elapsed:?}"); + + + let start = Instant::now(); + let mut tx = triedb_provider.inner.begin_ro() + .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to begin triedb transaction: {e:?}")))?; + + let result = tx.compute_root_with_overlay(overlay) + .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to compute triedb root: {e:?}")))?; + let elapsed = start.elapsed().as_millis(); + tracing::info!("latest_state_provider compute_root_with_overlay elapsed: {elapsed:?}"); + + let start = Instant::now(); + tx.commit() + .map_err(|e| ProviderError::TrieWitnessError(format!("Failed to commit triedb transaction: {e:?}")))?; + let elapsed = start.elapsed().as_millis(); + tracing::info!("latest_state_provider commit elapsed: {elapsed:?}"); + Ok((result.root, TrieUpdates::default())) + } } + impl StorageRootProvider for LatestStateProviderRef<'_, Provider> { fn storage_root( &self, @@ -178,6 +304,12 @@ impl BytecodeReader } } +/// Trait for accessing TrieDB provider +pub trait TriedbProviderAccess { + /// Returns reference to TrieDB provider if available + fn triedb_provider(&self) -> Option<&Arc>; +} + /// State provider for the latest state. #[derive(Debug)] pub struct LatestStateProvider(Provider); @@ -193,6 +325,14 @@ impl LatestStateProvider { const fn as_ref(&self) -> LatestStateProviderRef<'_, Provider> { LatestStateProviderRef::new(&self.0) } + + /// Returns reference to TrieDB provider if available + pub fn triedb_provider(&self) -> Option<&Arc> + where + Provider: TriedbProviderAccess, + { + self.0.triedb_provider() + } } // Delegates all provider impls to [LatestStateProviderRef] diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index 74bb371819f..35f0cfee49b 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -48,6 +48,7 @@ macro_rules! delegate_provider_impls { fn state_root_from_nodes(&self, input: reth_trie::TrieInput) -> reth_storage_errors::provider::ProviderResult; fn state_root_with_updates(&self, state: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult<(alloy_primitives::B256, reth_trie::updates::TrieUpdates)>; fn state_root_from_nodes_with_updates(&self, input: reth_trie::TrieInput) -> reth_storage_errors::provider::ProviderResult<(alloy_primitives::B256, reth_trie::updates::TrieUpdates)>; + fn state_root_with_updates_triedb(&self, plain_state: reth_storage_api::PlainPostState) -> reth_storage_errors::provider::ProviderResult<(alloy_primitives::B256, reth_trie::updates::TrieUpdates)>; } StorageRootProvider $(where [$($generics)*])? { fn storage_root(&self, address: alloy_primitives::Address, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; diff --git a/crates/storage/provider/src/providers/state/mod.rs b/crates/storage/provider/src/providers/state/mod.rs index f26302531eb..f5e83d23a5f 100644 --- a/crates/storage/provider/src/providers/state/mod.rs +++ b/crates/storage/provider/src/providers/state/mod.rs @@ -1,5 +1,5 @@ //! [`StateProvider`](crate::StateProvider) implementations pub(crate) mod historical; -pub(crate) mod latest; +pub mod latest; pub(crate) mod macros; pub(crate) mod overlay; diff --git a/crates/storage/provider/src/providers/triedb/mod.rs b/crates/storage/provider/src/providers/triedb/mod.rs new file mode 100644 index 00000000000..c5aca2c8d29 --- /dev/null +++ b/crates/storage/provider/src/providers/triedb/mod.rs @@ -0,0 +1,166 @@ +use std::{path::Path, sync::Arc}; +use alloy_primitives::{Address, B256, U256}; +use alloy_trie::EMPTY_ROOT_HASH; +use alloy_consensus::constants::KECCAK_EMPTY; +use reth_primitives_traits::Account; +use triedb::{Database as TrieDbDatabase, path::{AddressPath, StoragePath}, account::Account as TrieDBAccount, + transaction::TransactionError, Database}; +#[derive(Debug, Clone)] +pub struct TriedbProvider { + pub inner: Arc +} + +impl TriedbProvider { + pub fn new(db_path: impl AsRef) -> Self { + let db_path = db_path.as_ref(); + let db = if db_path.exists() { + println!("Opening triedb database at {}", db_path.display()); + // Try to open existing database + match TrieDbDatabase::open(db_path) { + Ok(db) => db, + Err(e) => { + println!("Failed to open existing triedb database: {e:?}. Removing and creating new database."); + // Remove the existing directory and create fresh + if db_path.is_dir() { + std::fs::remove_dir_all(db_path).unwrap_or_else(|e| { + panic!("Failed to remove existing triedb directory at {:?}: {e:?}", db_path); + }); + } else { + std::fs::remove_file(db_path).unwrap_or_else(|e| { + panic!("Failed to remove existing triedb file at {:?}: {e:?}", db_path); + }); + } + // Ensure parent directory exists + if let Some(parent) = db_path.parent() { + std::fs::create_dir_all(parent).unwrap(); + } + TrieDbDatabase::create_new(db_path).unwrap() + } + } + } else { + // Ensure parent directory exists + if let Some(parent) = db_path.parent() { + std::fs::create_dir_all(parent).unwrap(); + } + TrieDbDatabase::create_new(db_path).unwrap() + }; + Self { + inner: Arc::new(db), + } + } + pub fn set_account( + &self, + address: Address, + account: Account, + storage_root: Option, + ) -> Result<(), TransactionError> { + let mut tx = self.inner.begin_rw()?; + + let address_path = AddressPath::for_address(address); + let storage_root = storage_root.unwrap_or(EMPTY_ROOT_HASH); + let trie_account = TrieDBAccount::new( + account.nonce, + account.balance, + storage_root, + account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + ); + + tx.set_account(address_path, Some(trie_account))?; + tx.commit()?; + Ok(()) + } + + pub fn get_account(&self, address: Address) -> Result, TransactionError> { + let mut tx = self.inner.begin_ro()?; + let address_path = AddressPath::for_address(address); + + let trie_account_opt = tx.get_account(&address_path)?; + + let account_opt = trie_account_opt.map(|trie_account| { + Account { + nonce: trie_account.nonce, + balance: trie_account.balance, + bytecode_hash: if trie_account.code_hash == KECCAK_EMPTY { + None + } else { + Some(trie_account.code_hash) + }, + } + }); + + Ok(account_opt) + } + +} + +#[cfg(test)] +mod tests { + use super::*; + use tempdir::TempDir; + + #[test] + fn test_triedb_provider_new_set_get() { + let dir = TempDir::new("triedb_test").unwrap(); + let db_path = dir.path().join("triedb"); + let provider = TriedbProvider::new(&db_path); + + let address = Address::with_last_byte(1); + let account = Account { + nonce: 42, + balance: U256::from(1000), + bytecode_hash: None, + }; + + provider.set_account(address, account, None).unwrap(); + + let provider2 = TriedbProvider::new(&db_path); + + let retrieved_account = provider2.get_account(address).unwrap(); + + assert!(retrieved_account.is_some(), "Account should exist"); + let acc = retrieved_account.unwrap(); + assert_eq!(acc.nonce, 42, "Nonce should match"); + assert_eq!(acc.balance, U256::from(1000), "Balance should match"); + assert_eq!(acc.bytecode_hash, None, "Bytecode hash should be None for EOA"); + } + + #[test] + fn test_triedb_provider_with_contract() { + let dir = TempDir::new("triedb_test_contract").unwrap(); + let db_path = dir.path().join("triedb"); + + let provider = TriedbProvider::new(&db_path); + + let address = Address::with_last_byte(2); + let code_hash = B256::with_last_byte(0xFF); + let account = Account { + nonce: 1, + balance: U256::from(5000), + bytecode_hash: Some(code_hash), + }; + + provider.set_account(address, account, None).unwrap(); + + let provider2 = TriedbProvider::new(&db_path); + let retrieved_account = provider2.get_account(address).unwrap(); + + assert!(retrieved_account.is_some(), "Contract account should exist"); + let acc = retrieved_account.unwrap(); + assert_eq!(acc.nonce, 1, "Nonce should match"); + assert_eq!(acc.balance, U256::from(5000), "Balance should match"); + assert_eq!(acc.bytecode_hash, Some(code_hash), "Code hash should match"); + } + + #[test] + fn test_triedb_provider_nonexistent_account() { + let dir = TempDir::new("triedb_test_nonexistent").unwrap(); + let db_path = dir.path().join("triedb"); + + let provider = TriedbProvider::new(&db_path); + + let nonexistent_address = Address::with_last_byte(99); + let result = provider.get_account(nonexistent_address).unwrap(); + + assert!(result.is_none(), "Nonexistent account should return None"); + } +} \ No newline at end of file diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 16388de91ae..d20a52b7aa6 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -33,8 +33,8 @@ use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, BytecodeReader, DBProvider, DatabaseProviderFactory, - HashedPostStateProvider, NodePrimitivesProvider, StageCheckpointReader, StateProofProvider, - StorageRootProvider, TrieReader, + HashedPostStateProvider, NodePrimitivesProvider, PlainPostState, StageCheckpointReader, + StateProofProvider, StorageRootProvider, TrieReader, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ @@ -799,6 +799,13 @@ where let state_root = self.state_roots.lock().pop().unwrap_or_default(); Ok((state_root, Default::default())) } + + fn state_root_with_updates_triedb( + &self, + _plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + todo!() + } } impl StorageRootProvider for MockEthProvider diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index ccda2d60e85..ed1365baef4 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -23,6 +23,8 @@ mod noop; pub use mock::{ExtendedAccount, MockEthProvider}; pub use noop::NoopProvider; pub use reth_chain_state::test_utils::TestCanonStateSubscriptions; +use reth_db::test_utils::create_test_triedb_dir; +use crate::providers::triedb::TriedbProvider; /// Mock [`reth_node_types::NodeTypes`] for testing. pub type MockNodeTypes = reth_node_types::AnyNodeTypesWithEngine< @@ -54,11 +56,13 @@ pub fn create_test_provider_factory_with_node_types( chain_spec: Arc, ) -> ProviderFactory>>> { let (static_dir, _) = create_test_static_files_dir(); + let (triedb_dir, _) = create_test_triedb_dir(); let db = create_test_rw_db(); ProviderFactory::new( db, chain_spec, StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"), + Arc::new(TriedbProvider::new(triedb_dir)), ) } diff --git a/crates/storage/rpc-provider/src/lib.rs b/crates/storage/rpc-provider/src/lib.rs index 6e5bd17218b..a52c6d0e2e1 100644 --- a/crates/storage/rpc-provider/src/lib.rs +++ b/crates/storage/rpc-provider/src/lib.rs @@ -57,7 +57,7 @@ use reth_rpc_convert::{TryFromBlockResponse, TryFromReceiptResponse, TryFromTran use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockReaderIdExt, BlockSource, DBProvider, NodePrimitivesProvider, - ReceiptProviderIdExt, StatsReader, + PlainPostState, ReceiptProviderIdExt, StatsReader, }; use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState, MultiProof, TrieInput}; use std::{ @@ -1197,6 +1197,13 @@ where warn!("state_root_from_nodes_with_updates is not implemented and will return zero"); Ok((B256::ZERO, TrieUpdates::default())) } + + fn state_root_with_updates_triedb( + &self, + _plain_state: PlainPostState, + ) -> Result<(B256, TrieUpdates), ProviderError> { + todo!() + } } impl StorageReader for RpcBlockchainStateProvider diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index a62193a5dd8..ca11dc5ad55 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -32,6 +32,7 @@ alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true auto_impl.workspace = true +triedb.workspace = true [features] default = ["std"] diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index e538e1216e8..16fe844aaed 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -3,10 +3,10 @@ use crate::{ AccountReader, BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, BytecodeReader, ChangeSetReader, - HashedPostStateProvider, HeaderProvider, NodePrimitivesProvider, PruneCheckpointReader, - ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProofProvider, - StateProvider, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, - StorageRootProvider, TransactionVariant, TransactionsProvider, TrieReader, + HashedPostStateProvider, HeaderProvider, NodePrimitivesProvider, PlainPostState, + PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, + StateProofProvider, StateProvider, StateProviderBox, StateProviderFactory, StateReader, + StateRootProvider, StorageRootProvider, TransactionVariant, TransactionsProvider, TrieReader, }; #[cfg(feature = "db-api")] @@ -424,6 +424,13 @@ impl StateRootProvider for NoopProvider ) -> ProviderResult<(B256, TrieUpdates)> { Ok((B256::default(), TrieUpdates::default())) } + + fn state_root_with_updates_triedb( + &self, + _plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + todo!() + } } impl StorageRootProvider for NoopProvider { diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index 9ff02c106e5..d77f95119b4 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -1,12 +1,22 @@ use alloc::vec::Vec; -use alloy_primitives::{Address, BlockNumber, Bytes, B256}; +use alloy_primitives::{Address, BlockNumber, Bytes, B256, U256}; use reth_storage_errors::provider::ProviderResult; use reth_trie_common::{ updates::{StorageTrieUpdatesSorted, TrieUpdates, TrieUpdatesSorted}, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; - +use reth_primitives_traits::Account; +use std::collections::HashMap; + +/// Plain (unhashed) post state updates for TrieDB computation +#[derive(Debug, Clone, Default)] +pub struct PlainPostState { + /// Mapping of address to account info, `None` if destroyed + pub accounts: HashMap>, + /// Mapping of address to storage entries (slot -> value) + pub storages: HashMap>, +} /// A type that can compute the state root of a given post state. #[auto_impl::auto_impl(&, Box, Arc)] pub trait StateRootProvider: Send + Sync { @@ -37,6 +47,11 @@ pub trait StateRootProvider: Send + Sync { &self, input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)>; + + fn state_root_with_updates_triedb( + &self, + plain_state: PlainPostState, + ) -> ProviderResult<(B256, TrieUpdates)>; } /// A type that can compute the storage root for a given account. diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index 09ccd301192..946c7de9b62 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -32,7 +32,8 @@ reth-db = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } reth-trie = { workspace = true, features = ["test-utils"] } - +reth-trie-db.workspace = true +reth-storage-api.workspace = true alloy-consensus.workspace = true alloy-rlp.workspace = true revm.workspace = true @@ -40,7 +41,8 @@ revm-database.workspace = true # trie triehash.workspace = true - +triedb.workspace = true +tempdir = "0.3.7" # misc proptest.workspace = true proptest-arbitrary-interop.workspace = true diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 403d187e46a..691697f025f 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -42,6 +42,8 @@ metrics = { workspace = true, optional = true } # `test-utils` feature triehash = { workspace = true, optional = true } +triedb.workspace = true +nybbles = "0.3.4" [dev-dependencies] # reth @@ -49,6 +51,10 @@ reth-ethereum-primitives = { workspace = true, features = ["arbitrary", "std"] } reth-primitives-traits = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } +reth-provider = { workspace = true, features = ["test-utils"] } +reth-trie-db.workspace = true +reth-chainspec.workspace = true +nybbles.workspace = true # revm revm-state.workspace = true diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index e53049b5872..62d3ff1a5e7 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -65,4 +65,4 @@ pub mod test_utils; pub mod mock; /// Verification of existing stored trie nodes against state data. -pub mod verify; +pub mod verify; \ No newline at end of file diff --git a/crates/trie/trie/src/trie_ext.rs b/crates/trie/trie/src/trie_ext.rs new file mode 100644 index 00000000000..529f00ab673 --- /dev/null +++ b/crates/trie/trie/src/trie_ext.rs @@ -0,0 +1,229 @@ +// use std::path::Path; +// use std::time::Instant; +// use alloy_primitives::B256; +// use alloy_trie::{HashBuilder, EMPTY_ROOT_HASH}; +// use tracing::{debug, trace}; +// use reth_execution_errors::StateRootError; +// use reth_trie_common::{prefix_set::TriePrefixSets}; +// use crate::{IntermediateStateRootState, StateRoot, StateRootProgress, StorageRoot}; +// use crate::hashed_cursor::{HashedCursor, HashedCursorFactory}; +// use crate::node_iter::{TrieElement, TrieNodeIter}; +// use crate::stats::TrieTracker; +// use crate::trie::StateRootContext; +// use crate::trie_cursor::TrieCursorFactory; +// use crate::walker::TrieWalker; +// use triedb::{Database as TrieDbDatabase, path::{AddressPath, StoragePath}, }; +// use nybbles::Nibbles; +// use triedb::account::Account as TrieDbAccount; +// use alloy_consensus::constants::KECCAK_EMPTY; +// #[derive(Debug)] +// pub struct TrieExtDatabase { +// pub inner: TrieDbDatabase, +// } +// +// impl TrieExtDatabase { +// pub fn new(db_path: impl AsRef) -> Self { +// let db_path = db_path.as_ref(); +// let db = TrieDbDatabase::create_new(db_path).unwrap(); +// Self { +// inner: db, +// } +// } +// } +// +// /// `StateRoot` is used to compute the root node of a state trie. +// #[derive(Debug)] +// pub struct StateRootTrieDb { +// /// The factory for hashed cursors. +// pub hashed_cursor_factory: H, +// pub db: TrieExtDatabase +// } +// +// impl StateRootTrieDb { +// /// Creates [`StateRootTrieDb`] with +// pub fn new(hashed_cursor_factory: H, db: TrieExtDatabase) -> Self { +// Self { +// hashed_cursor_factory, +// db +// } +// } +// } +// impl StateRootTrieDb +// where +// H: HashedCursorFactory + Clone, +// { +// pub fn calculate_commit(self) -> Result { +// trace!(target: "trie::state_root", "calculating state root"); +// +// let mut acct_cursor = self.hashed_cursor_factory.hashed_account_cursor()?; +// +// let mut tx = self.db.inner.begin_rw().unwrap(); +// +// // Start from the beginning by seeking to the first account (B256::ZERO) +// let mut account_entry = acct_cursor.next().unwrap(); +// while let Some((hashed_address, account)) = account_entry { +// +// let nibbles = Nibbles::unpack(hashed_address); +// let address_path = AddressPath::new(nibbles); +// +// // Get storage cursor for this account first +// let mut storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor(hashed_address)?; +// +// // Iterate through all storage entries for this account to compute storage root +// // For now, we'll use EMPTY_ROOT_HASH if no storage entries exist +// // TODO: Compute actual storage root from storage entries +// let mut storage_entry = storage_cursor.seek(B256::ZERO)?; +// let storage_root = if storage_entry.is_some() { +// // If there are storage entries, we need to compute the storage root +// // For now, use EMPTY_ROOT_HASH as placeholder +// // In a full implementation, you'd build the storage trie and get its root +// EMPTY_ROOT_HASH +// } else { +// EMPTY_ROOT_HASH +// }; +// +// // Convert reth_primitives_traits::Account to triedb::account::Account +// let triedb_account = TrieDbAccount { +// nonce: account.nonce, +// balance: account.balance, +// code_hash: account.bytecode_hash.unwrap_or(KECCAK_EMPTY), +// storage_root, +// }; +// +// tx.set_account(address_path.clone(), Some(triedb_account)).unwrap(); +// +// // Now set storage slots in TrieDB +// while let Some((hashed_storage_key, storage_value)) = storage_entry { +// let storage_path = StoragePath::for_address_path_and_slot_hash(address_path.clone(), Nibbles::unpack(hashed_storage_key)); +// tx.set_storage_slot(storage_path, Some(storage_value)).unwrap(); +// +// storage_entry = storage_cursor.next()?; +// } +// +// account_entry = acct_cursor.next()?; +// } +// let start_commit = Instant::now(); +// tx.commit().unwrap(); +// println!("commit elapsed: {:?}", start_commit.elapsed()); +// Ok(self.db.inner.state_root()) +// } +// } +// +// #[cfg(test)] +// mod tests { +// use tempdir::TempDir; +// use super::{TrieExtDatabase}; +// use crate::hashed_cursor::{HashedCursor, HashedCursorFactory}; +// use reth_provider::{ +// test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, +// ProviderFactory, HashingWriter, DBProvider +// }; +// use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA}; +// use reth_provider::DatabaseProviderFactory; +// use reth_trie_db::DatabaseHashedCursorFactory; +// use alloy_primitives::{Address, U256, keccak256, B256}; +// use reth_primitives_traits::Account; +// +// #[test] +// pub fn test_triedb() { +// let tmp_dir = TempDir::new("test_triedb").unwrap(); +// let file_path = tmp_dir.path().join("test.db"); +// let trie_db = TrieExtDatabase::new(file_path); +// +// let provider_factory = create_test_provider_factory_with_chain_spec(MAINNET.clone()); +// +// let mut provider_rw = provider_factory.database_provider_rw().unwrap(); +// +// // Generate dummy accounts +// let dummy_accounts: Vec<(Address, Account)> = vec![ +// ( +// Address::with_last_byte(1), +// Account { +// nonce: 10, +// balance: U256::from(1000), +// bytecode_hash: None, +// }, +// ), +// ( +// Address::with_last_byte(2), +// Account { +// nonce: 20, +// balance: U256::from(2000), +// bytecode_hash: None, +// }, +// ), +// ( +// Address::with_last_byte(3), +// Account { +// nonce: 30, +// balance: U256::from(3000), +// bytecode_hash: None, +// }, +// ), +// ]; +// +// // Insert accounts into the database +// let accounts_for_hashing = dummy_accounts +// .iter() +// .map(|(address, account)| (*address, Some(*account))); +// +// provider_rw.insert_account_for_hashing(accounts_for_hashing).unwrap(); +// +// // Commit the transaction (this consumes provider_rw) +// provider_rw.commit().unwrap(); +// +// // Get a new provider to read the committed data +// let provider_rw = provider_factory.database_provider_rw().unwrap(); +// let tx = provider_rw.tx_ref(); +// let hashed_cursor_factory = DatabaseHashedCursorFactory::new(tx); +// println!("hashed cursor factory: {:?}", hashed_cursor_factory); +// let mut account_cursor = hashed_cursor_factory.hashed_account_cursor().unwrap(); +// // +// // // Start from the beginning (seek to B256::ZERO to get the first account) +// // let mut account_entry = account_cursor.seek(B256::ZERO).unwrap(); +// // +// // let mut iterated_accounts = Vec::new(); +// // +// // // Iterate through all accounts +// // while let Some((hashed_address, account)) = account_entry { +// // iterated_accounts.push((hashed_address, account)); +// // +// // // Move to next account +// // account_entry = account_cursor.next().unwrap(); +// // } +// // +// // // Verify we got all the accounts we inserted +// // assert_eq!(iterated_accounts.len(), dummy_accounts.len()); +// // +// // // Verify the accounts match (by checking hashed addresses) +// // let inserted_hashed_addresses: Vec = dummy_accounts +// // .iter() +// // .map(|(address, _)| keccak256(address)) +// // .collect(); +// // +// // let iterated_hashed_addresses: Vec = iterated_accounts +// // .iter() +// // .map(|(hashed_address, _)| *hashed_address) +// // .collect(); +// // +// // // Sort both for comparison +// // let mut inserted_sorted = inserted_hashed_addresses.clone(); +// // inserted_sorted.sort(); +// // let mut iterated_sorted = iterated_hashed_addresses.clone(); +// // iterated_sorted.sort(); +// // +// // assert_eq!(inserted_sorted, iterated_sorted); +// // +// // // Verify account data matches +// // for (hashed_address, account) in &iterated_accounts { +// // let original_account = dummy_accounts +// // .iter() +// // .find(|(addr, _)| keccak256(addr) == *hashed_address) +// // .unwrap(); +// // +// // assert_eq!(account.nonce, original_account.1.nonce); +// // assert_eq!(account.balance, original_account.1.balance); +// // assert_eq!(account.bytecode_hash, original_account.1.bytecode_hash); +// // } +// } +// } \ No newline at end of file diff --git a/triedb.md b/triedb.md new file mode 100644 index 00000000000..3eea2b425ca --- /dev/null +++ b/triedb.md @@ -0,0 +1,8 @@ +rm -rf ~/Library/Application\ Support/reth/dev && rm -rf logs \ +&& cargo run --package op-reth --bin op-reth -- node --dev \ + -vvvv \ + --log.file.filter debug \ + --log.file.directory /Users/cliffyang/dev/okx/reth/logs \ + --log.file.name op-reth.log + +cast send 0x33f34d8b20696780ba07b1ea89f209b4dc51723a --value 1ether --private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 --rpc-url http://localhost:8545 --gas-price 1000gwei \ No newline at end of file