From e5083b238c2de5597d31f1dc36e6539a3a9fe31e Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Wed, 31 Dec 2025 15:12:16 +0800 Subject: [PATCH 01/28] core/vm: fix RequiredGas for P256Verify --- core/vm/contracts.go | 18 ++++++++++++++++-- params/protocol_params.go | 1 - 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 78c2c1ce33..b77b81ce5b 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -340,7 +340,14 @@ var PrecompiledContractsOsaka = PrecompiledContracts{ common.BytesToAddress([]byte{0x10}): &bls12381MapG1{}, common.BytesToAddress([]byte{0x11}): &bls12381MapG2{}, - common.BytesToAddress([]byte{0x1, 0x00}): &p256Verify{}, + common.BytesToAddress([]byte{0x64}): &tmHeaderValidate{}, + common.BytesToAddress([]byte{0x65}): &iavlMerkleProofValidatePlato{}, + common.BytesToAddress([]byte{0x66}): &blsSignatureVerify{}, + common.BytesToAddress([]byte{0x67}): &cometBFTLightBlockValidateHertz{}, + common.BytesToAddress([]byte{0x68}): &verifyDoubleSignEvidence{}, + common.BytesToAddress([]byte{0x69}): &secp256k1SignatureRecover{}, + + common.BytesToAddress([]byte{0x1, 0x00}): &p256Verify{eip7951: true}, } // PrecompiledContractsP256Verify contains the precompiled Ethereum @@ -1735,10 +1742,17 @@ func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash { // P256VERIFY (secp256r1 signature verification) // implemented as a native contract -type p256Verify struct{} +type p256Verify struct { + eip7951 bool +} // RequiredGas returns the gas required to execute the precompiled contract func (c *p256Verify) RequiredGas(input []byte) uint64 { + const P256VerifyGasBeforeOsaka = 3450 + if !c.eip7951 { + return P256VerifyGasBeforeOsaka + } + return params.P256VerifyGas } diff --git a/params/protocol_params.go b/params/protocol_params.go index 88eb84ec38..24d66b4e68 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -176,7 +176,6 @@ const ( Bls12381MapG1Gas uint64 = 5500 // Gas price for BLS12-381 mapping field element to G1 operation Bls12381MapG2Gas uint64 = 23800 // Gas price for BLS12-381 mapping field element to G2 operation - // TODO(Nathan): only change P256VerifyGas to 6900 after Osaka P256VerifyGas uint64 = 6900 // secp256r1 elliptic curve signature verifier gas price // The Refund Quotient is the cap on how much of the used gas can be refunded. Before EIP-3529, From 54fef3b8310dd67c2fe1602a2f0b336d171bb585 Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Mon, 5 Jan 2026 09:50:19 +0800 Subject: [PATCH 02/28] cmd/geth: not support OverrideBPO1 and OverrideBPO2 flags in BSC --- cmd/geth/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index eb2c24cf9a..49828efc7e 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -78,8 +78,8 @@ var ( utils.OverrideFermi, utils.OverrideOsaka, utils.OverrideMendel, - utils.OverrideBPO1, - utils.OverrideBPO2, + // utils.OverrideBPO1, + // utils.OverrideBPO2, utils.OverrideVerkle, utils.OverrideGenesisFlag, utils.OverrideFullImmutabilityThreshold, From 214f49553a28a16c7e07ee3076d76ccb99032169 Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Mon, 5 Jan 2026 09:58:33 +0800 Subject: [PATCH 03/28] cmd/geth: hidden flag PruneAncientDataFlag --- cmd/utils/flags_legacy.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go index 36d3e65221..862c5beb5b 100644 --- a/cmd/utils/flags_legacy.go +++ b/cmd/utils/flags_legacy.go @@ -42,8 +42,8 @@ var DeprecatedFlags = []cli.Flag{ LogBacktraceAtFlag, LogDebugFlag, MinerNewPayloadTimeoutFlag, - PruneAncientDataFlag, EnablePersonal, + PruneAncientDataFlag, } var ( @@ -126,8 +126,9 @@ var ( // Deprecated Dec 2024 PruneAncientDataFlag = &cli.BoolFlag{ Name: "pruneancient", + Hidden: true, Usage: "Prune ancient data, is an optional config and disabled by default. Only keep the latest 9w blocks' data,the older blocks' data will be permanently pruned. Notice:the geth/chaindata/ancient dir will be removed, if restart without the flag, the ancient data will start with the previous point that the oldest unpruned block number. Recommends to the user who don't care about the ancient data.", - Category: flags.BlockHistoryCategory, + Category: flags.DeprecatedCategory, } ) From a5c8d1db56b01136311777c0bf0978e829114274 Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Mon, 5 Jan 2026 13:44:56 +0800 Subject: [PATCH 04/28] cmd/utils: deprecated MetricsEnabledExpensiveFlag --- cmd/geth/config.go | 1 - cmd/utils/flags.go | 4 ---- cmd/utils/flags_legacy.go | 3 ++- core/blockchain.go | 32 ++++++++++++--------------- core/state/state_object.go | 11 ++-------- core/state/statedb.go | 44 ++++++++++---------------------------- metrics/metrics.go | 10 --------- 7 files changed, 29 insertions(+), 76 deletions(-) diff --git a/cmd/geth/config.go b/cmd/geth/config.go index be52d710df..e01043af74 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -406,7 +406,6 @@ func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) { } if ctx.IsSet(utils.MetricsEnabledExpensiveFlag.Name) { log.Warn("Expensive metrics will remain in BSC and may be removed in the future", "flag", utils.MetricsEnabledExpensiveFlag.Name) - cfg.Metrics.EnabledExpensive = ctx.Bool(utils.MetricsEnabledExpensiveFlag.Name) } if ctx.IsSet(utils.MetricsHTTPFlag.Name) { cfg.Metrics.HTTP = ctx.String(utils.MetricsHTTPFlag.Name) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index a2a14f2ad5..ba0c7bdb41 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -2696,10 +2696,6 @@ func SetupMetrics(cfg *metrics.Config, options ...SetupMetricsOption) { } log.Info("Enabling metrics collection") metrics.Enable() - if cfg.EnabledExpensive { - log.Info("Enabling expensive metrics collection") - metrics.EnableExpensive() - } // InfluxDB exporter. var ( diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go index 862c5beb5b..b222cac255 100644 --- a/cmd/utils/flags_legacy.go +++ b/cmd/utils/flags_legacy.go @@ -42,6 +42,7 @@ var DeprecatedFlags = []cli.Flag{ LogBacktraceAtFlag, LogDebugFlag, MinerNewPayloadTimeoutFlag, + MetricsEnabledExpensiveFlag, EnablePersonal, PruneAncientDataFlag, } @@ -112,7 +113,7 @@ var ( } MetricsEnabledExpensiveFlag = &cli.BoolFlag{ Name: "metrics.expensive", - Hidden: true, + Hidden: false, // TODO(Nathan): turn it into true in version v1.8.x Usage: "Enable expensive metrics collection and reporting (deprecated)", Category: flags.DeprecatedCategory, } diff --git a/core/blockchain.go b/core/blockchain.go index 3c0c77ae93..35967633bc 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2637,19 +2637,17 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s proctime := time.Since(startTime) // processing + validation + cross validation // Update the metrics touched during block processing and validation - if metrics.EnabledExpensive() { - accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing) - storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing) - if statedb.AccountLoaded != 0 { - accountReadSingleTimer.Update(statedb.AccountReads / time.Duration(statedb.AccountLoaded)) - } - if statedb.StorageLoaded != 0 { - storageReadSingleTimer.Update(statedb.StorageReads / time.Duration(statedb.StorageLoaded)) - } - accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation) - storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation) - accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation) + accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing) + storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing) + if statedb.AccountLoaded != 0 { + accountReadSingleTimer.Update(statedb.AccountReads / time.Duration(statedb.AccountLoaded)) } + if statedb.StorageLoaded != 0 { + storageReadSingleTimer.Update(statedb.StorageReads / time.Duration(statedb.StorageLoaded)) + } + accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation) + storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation) + accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation) triehash := statedb.AccountHashes // The time spent on tries hashing trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update blockExecutionTimer.Update(ptime - (statedb.AccountReads + statedb.StorageReads)) // The time spent on EVM processing @@ -2676,12 +2674,10 @@ func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, s } // Update the metrics touched during block commit - if metrics.EnabledExpensive() { - accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them - storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them - snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them - triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them - } + accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them + storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them + snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them + triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them blockWriteTimer.Update(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits) elapsed := time.Since(startTime) + 1 // prevent zero division blockInsertTimer.Update(elapsed) diff --git a/core/state/state_object.go b/core/state/state_object.go index d6711c05a2..8f8507e05e 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -23,8 +23,6 @@ import ( "slices" "time" - "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/opcodeCompiler/compiler" "github.com/ethereum/go-ethereum/core/types" @@ -207,18 +205,13 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { } s.db.StorageLoaded++ - var start time.Time - if metrics.EnabledExpensive() { - start = time.Now() - } + start := time.Now() value, err := s.db.reader.Storage(s.address, key) if err != nil { s.db.setError(err) return common.Hash{} } - if metrics.EnabledExpensive() { - s.db.StorageReads += time.Since(start) - } + s.db.StorageReads += time.Since(start) // Schedule the resolved storage slots for prefetching if it's enabled. if s.db.prefetcher != nil && s.data.Root != types.EmptyRootHash { diff --git a/core/state/statedb.go b/core/state/statedb.go index ef5a47de8c..284be41860 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -26,8 +26,6 @@ import ( "sync/atomic" "time" - "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state/snapshot" @@ -714,9 +712,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject { s.setError(fmt.Errorf("getStateObject (%x) error: %w", addr.Bytes(), err)) return nil } - if metrics.EnabledExpensive() { - s.AccountReads += time.Since(start) - } + s.AccountReads += time.Since(start) // Short circuit if the account is not found if acct == nil { @@ -976,9 +972,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { start time.Time workers errgroup.Group ) - if metrics.EnabledExpensive() { - start = time.Now() - } + start = time.Now() if s.db.TrieDB().IsVerkle() { // Whilst MPT storage tries are independent, Verkle has one single trie // for all the accounts and all the storage slots merged together. The @@ -1057,9 +1051,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { } } workers.Wait() - if metrics.EnabledExpensive() { - s.StorageUpdates += time.Since(start) - } + s.StorageUpdates += time.Since(start) // Now we're about to start to write changes to the trie. The trie is so far // _untouched_. We can check with the prefetcher, if it can give us a trie @@ -1068,9 +1060,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Don't check prefetcher if verkle trie has been used. In the context of verkle, // only a single trie is used for state hashing. Replacing a non-nil verkle tree // here could result in losing uncommitted changes from storage. - if metrics.EnabledExpensive() { - start = time.Now() - } + start = time.Now() if s.prefetcher != nil { if trie := s.prefetcher.trie(common.Hash{}, s.originalRoot); trie == nil { log.Debug("Failed to retrieve account pre-fetcher trie") @@ -1110,18 +1100,14 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { s.deleteStateObject(deletedAddr) s.AccountDeleted += 1 } - if metrics.EnabledExpensive() { - s.AccountUpdates += time.Since(start) - } + s.AccountUpdates += time.Since(start) if s.prefetcher != nil && len(usedAddrs) > 0 { s.prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs, nil) } - if metrics.EnabledExpensive() { - // Track the amount of time wasted on hashing the account trie - defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) - } + // Track the amount of time wasted on hashing the account trie + defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) hash := s.trie.Hash() @@ -1441,9 +1427,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum if err := merge(set); err != nil { return err } - if metrics.EnabledExpensive() { - s.AccountCommits = time.Since(start) - } + s.AccountCommits = time.Since(start) return nil }) // Schedule each of the storage tries that need to be updated, so they can @@ -1474,9 +1458,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNum } lock.Lock() updates[obj.addrHash] = update - if metrics.EnabledExpensive() { - s.StorageCommits = time.Since(start) // overwrite with the longest storage commit runtime - } + s.StorageCommits = time.Since(start) // overwrite with the longest storage commit runtime lock.Unlock() return nil }) @@ -1559,9 +1541,7 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorag if err := snap.Cap(ret.root, snap.CapLimit()); err != nil { log.Warn("Failed to cap snapshot tree", "root", ret.root, "layers", TriesInMemory, "err", err) } - if metrics.EnabledExpensive() { - s.SnapshotCommits += time.Since(start) - } + s.SnapshotCommits += time.Since(start) } // If trie database is enabled, commit the state update as a new layer if db := s.db.TrieDB(); db != nil && !s.db.NoTries() { @@ -1569,9 +1549,7 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorag if err := db.Update(ret.root, ret.originRoot, block, ret.nodes, ret.stateSet()); err != nil { return nil, err } - if metrics.EnabledExpensive() { - s.TrieDBCommits += time.Since(start) - } + s.TrieDBCommits += time.Since(start) } } s.reader, _ = s.db.Reader(s.originalRoot) diff --git a/metrics/metrics.go b/metrics/metrics.go index 1e822f1567..d6b966b327 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -38,16 +38,6 @@ func Enable() { startMeterTickerLoop() } -// EnabledExpensive is checked by functions that are deemed 'expensive'. -func EnabledExpensive() bool { - return metricsExpensiveEnabled -} - -// EnableExpensive enables the expensive metrics. -func EnableExpensive() { - metricsExpensiveEnabled = true -} - var threadCreateProfile = pprof.Lookup("threadcreate") type runtimeStats struct { From bd120a599f829a3c32c228eec7d51324602b40f7 Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Tue, 16 Sep 2025 10:01:57 +0800 Subject: [PATCH 05/28] core/state: simplify statedb copy --- core/state/statedb.go | 64 ++++++++++------------------------------ core/state_prefetcher.go | 8 ++--- miner/worker.go | 6 +--- 3 files changed, 21 insertions(+), 57 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 284be41860..afa8c3d2e6 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -79,11 +79,10 @@ func (m *mutation) isDelete() bool { // must be created with new root and updated database for accessing post- // commit states. type StateDB struct { - db Database - prefetcherLock sync.Mutex - prefetcher *triePrefetcher - reader Reader - trie Trie // it's resolved on first access + db Database + prefetcher *triePrefetcher + reader Reader + trie Trie // it's resolved on first access // originalRoot is the pre-state root, before any changes were made. // It will be updated when the Commit is called. @@ -212,25 +211,6 @@ func (s *StateDB) SetNeedBadSharedStorage(needBadSharedStorage bool) { s.needBadSharedStorage = needBadSharedStorage } -// In mining mode, we will try multi-fillTransactions to get the most profitable one. -// StateDB will be created for each fillTransactions with same block height. -// Share a single triePrefetcher to avoid too much prefetch routines. -func (s *StateDB) TransferPrefetcher(prev *StateDB) { - if prev == nil { - return - } - var fetcher *triePrefetcher - - prev.prefetcherLock.Lock() - fetcher = prev.prefetcher - prev.prefetcher = nil - prev.prefetcherLock.Unlock() - - s.prefetcherLock.Lock() - s.prefetcher = fetcher - s.prefetcherLock.Unlock() -} - // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the // commit phase, most of the needed data is already hot. @@ -267,13 +247,11 @@ func (s *StateDB) StopPrefetcher() { if s.db.NoTries() { return } - s.prefetcherLock.Lock() if s.prefetcher != nil { s.prefetcher.terminate(false) s.prefetcher.report() s.prefetcher = nil } - s.prefetcherLock.Unlock() } // Mark that the block is processed by diff layer @@ -795,17 +773,6 @@ func (s *StateDB) StateForPrefetch() *StateDB { return state } -// Copy creates a deep, independent copy of the state. -// Snapshots of the copied state cannot be applied to the copy. -func (s *StateDB) Copy() *StateDB { - return s.copyInternal(false) -} - -// It is mainly for state prefetcher to do trie prefetch right now. -func (s *StateDB) CopyDoPrefetch() *StateDB { - return s.copyInternal(true) -} - func (s *StateDB) TransferBlockAccessList(prev *StateDB) { if prev == nil { return @@ -814,20 +781,20 @@ func (s *StateDB) TransferBlockAccessList(prev *StateDB) { prev.blockAccessList = nil } -// If doPrefetch is true, it tries to reuse the prefetcher, the copied StateDB will do active trie prefetch. -// otherwise, just do inactive copy trie prefetcher. -func (s *StateDB) copyInternal(doPrefetch bool) *StateDB { +// Copy creates a deep, independent copy of the state. +// Snapshots of the copied state cannot be applied to the copy. +func (s *StateDB) Copy() *StateDB { // Copy all the basic fields, initialize the memory ones state := &StateDB{ - db: s.db, - reader: s.reader, - // expectedRoot: s.expectedRoot, + db: s.db, + reader: s.reader, originalRoot: s.originalRoot, + expectedRoot: s.expectedRoot, + needBadSharedStorage: s.needBadSharedStorage, stateObjects: make(map[common.Address]*stateObject, len(s.stateObjects)), stateObjectsDestruct: make(map[common.Address]*stateObject, len(s.stateObjectsDestruct)), mutations: make(map[common.Address]*mutation, len(s.mutations)), dbErr: s.dbErr, - needBadSharedStorage: s.needBadSharedStorage, refund: s.refund, thash: s.thash, txIndex: s.txIndex, @@ -876,7 +843,6 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB { } state.logs[hash] = cpy } - return state } @@ -962,8 +928,11 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // If there was a trie prefetcher operating, terminate it async so that the // individual storage tries can be updated as soon as the disk load finishes. if s.prefetcher != nil { - // s.prefetcher.terminate(true) - defer s.StopPrefetcher() // not async now! + s.prefetcher.terminate(true) + defer func() { + s.prefetcher.report() + s.prefetcher = nil // Pre-byzantium, unset any used up prefetcher + }() } // Process all storage updates concurrently. The state object update root // method will internally call a blocking trie fetch from the prefetcher, @@ -1330,7 +1299,6 @@ func (s *StateDB) GetTrie() Trie { func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool, blockNumber uint64) (*stateUpdate, error) { // Short circuit in case any database failure occurred earlier. if s.dbErr != nil { - s.StopPrefetcher() return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) } // Finalize any pending changes and merge everything into the tries diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go index 377cbc5e70..a82f6646b2 100644 --- a/core/state_prefetcher.go +++ b/core/state_prefetcher.go @@ -72,7 +72,7 @@ func (p *statePrefetcher) Prefetch(transactions types.Transactions, header *type // Iterate over and process the individual transactions for i, tx := range transactions { - stateCpy := statedb.CopyDoPrefetch() + stateCpy := statedb.Copy() // closure workers.Go(func() error { // If block precaching was interrupted, abort if interrupt != nil && interrupt.Load() { @@ -147,7 +147,7 @@ func (p *statePrefetcher) PrefetchBALSnapshot(balPrefetch *types.BlockAccessList // prefetch snapshot cache for i := 0; i < prefetchThreadBALSnapshot; i++ { go func() { - newStatedb := statedb.CopyDoPrefetch() + newStatedb := statedb.Copy() for { select { case accAddr := <-accChan: @@ -205,7 +205,7 @@ func (p *statePrefetcher) PrefetchBALTrie(balPrefetch *types.BlockAccessListPref for i := 0; i < prefetchThreadBALTrie; i++ { go func() { - newStatedb := statedb.CopyDoPrefetch() + newStatedb := statedb.Copy() for { select { case accItem := <-accItemsChan: @@ -294,7 +294,7 @@ func (p *statePrefetcher) PrefetchMining(txs TransactionsByPriceAndNonce, header txCh := make(chan *types.Transaction, 2*threadCount) for i := 0; i < threadCount; i++ { go func(startCh <-chan *types.Transaction, stopCh <-chan struct{}) { - newStatedb := statedb.CopyDoPrefetch() + newStatedb := statedb.Copy() evm := vm.NewEVM(NewEVMBlockContext(header, p.chain, nil), newStatedb, p.config, cfg) idx := 0 // Iterate over and process the individual transactions diff --git a/miner/worker.go b/miner/worker.go index d5f092459d..ab62c7c7df 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -676,11 +676,7 @@ func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase co } state.StartPrefetcher("miner", bundle, nil) } else { - if prevEnv == nil { - state.StartPrefetcher("miner", nil, nil) - } else { - state.TransferPrefetcher(prevEnv.state) - } + state.StartPrefetcher("miner", nil, nil) } // Note the passed coinbase may be different with header.Coinbase. From 012d7bb9fd78a35d4f91aa5d317b6dc93d3aa3da Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Mon, 5 Jan 2026 16:25:52 +0800 Subject: [PATCH 06/28] all: upgrade go version to 1.25 for workflows and dockers --- .github/workflows/build-test.yml | 2 +- .github/workflows/evm-tests.yml | 2 +- .github/workflows/lint.yml | 2 +- .github/workflows/nancy.yml | 2 +- .github/workflows/pre-release.yml | 2 +- .github/workflows/release.yml | 2 +- .github/workflows/unit-test.yml | 2 +- Dockerfile | 2 +- Dockerfile.alltools | 2 +- core/state/snapshot/generate_test.go | 2 +- docker/Dockerfile | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 6eb097132a..4dcb01f56f 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -15,7 +15,7 @@ jobs: unit-test: strategy: matrix: - go-version: [1.24.x] + go-version: [1.25.x] os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: diff --git a/.github/workflows/evm-tests.yml b/.github/workflows/evm-tests.yml index 59102195fe..f8c9f722e5 100644 --- a/.github/workflows/evm-tests.yml +++ b/.github/workflows/evm-tests.yml @@ -15,7 +15,7 @@ jobs: evm-test: strategy: matrix: - go-version: [1.24.x] + go-version: [1.25.x] os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b75d50479b..920ce922fd 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -15,7 +15,7 @@ jobs: golang-lint: strategy: matrix: - go-version: [1.24.x] + go-version: [1.25.x] os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: diff --git a/.github/workflows/nancy.yml b/.github/workflows/nancy.yml index e7a901bc89..fe95006719 100644 --- a/.github/workflows/nancy.yml +++ b/.github/workflows/nancy.yml @@ -15,7 +15,7 @@ jobs: if: github.event.pull_request.head.repo.full_name == github.repository strategy: matrix: - go-version: [1.24.x] + go-version: [1.25.x] os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 71d80a1830..4805f31108 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -15,7 +15,7 @@ jobs: name: Build Release strategy: matrix: - go-version: [1.24.x] + go-version: [1.25.x] os: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.os }} steps: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1b20a3fb81..bcba6ef99d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -14,7 +14,7 @@ jobs: name: Build Release strategy: matrix: - go-version: [1.24.x] + go-version: [1.25.x] os: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.os }} steps: diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index 4455e57b44..0c6a9b949a 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -15,7 +15,7 @@ jobs: unit-test: strategy: matrix: - go-version: [1.24.x] + go-version: [1.25.x] os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: diff --git a/Dockerfile b/Dockerfile index f495b44e0a..e9f8c04650 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ ARG VERSION="" ARG BUILDNUM="" # Build Geth in a stock Go builder container -FROM golang:1.24-alpine AS builder +FROM golang:1.25-alpine AS builder RUN apk add --no-cache make cmake gcc musl-dev linux-headers git bash build-base libc-dev # Get dependencies - will also be cached if we won't change go.mod/go.sum diff --git a/Dockerfile.alltools b/Dockerfile.alltools index a3b10ceac4..1b18ab6660 100644 --- a/Dockerfile.alltools +++ b/Dockerfile.alltools @@ -4,7 +4,7 @@ ARG VERSION="" ARG BUILDNUM="" # Build Geth in a stock Go builder container -FROM golang:1.24-alpine AS builder +FROM golang:1.25-alpine AS builder RUN apk add --no-cache gcc musl-dev linux-headers git # Get dependencies - will also be cached if we won't change go.mod/go.sum diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go index 4488630095..5e4b4efba0 100644 --- a/core/state/snapshot/generate_test.go +++ b/core/state/snapshot/generate_test.go @@ -621,7 +621,7 @@ func testGenerateWithExtraAccounts(t *testing.T, scheme string) { } func enableLogging() { - log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true))) + log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true))) } // Tests that snapshot generation when an extra account with storage exists in the snap state. diff --git a/docker/Dockerfile b/docker/Dockerfile index c4a3a2be1d..ef9d1e0851 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.24-alpine as bsc +FROM golang:1.25-alpine as bsc RUN apk add --no-cache make cmake gcc musl-dev linux-headers git bash build-base libc-dev From 6a55e5668b54a3e7d36465b0dac62bb09034ecb1 Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Mon, 5 Jan 2026 17:06:37 +0800 Subject: [PATCH 07/28] all: handle golang lint --- consensus/beacon/consensus.go | 3 +-- core/rawdb/database.go | 4 ++-- core/vm/evm.go | 3 --- core/vm/interpreter.go | 2 ++ crypto/secp256r1/publickey.go | 21 --------------------- metrics/metrics.go | 5 ----- triedb/pathdb/journal.go | 18 ------------------ 7 files changed, 5 insertions(+), 51 deletions(-) delete mode 100644 crypto/secp256r1/publickey.go diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index eef43d0890..9d631beb70 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -61,8 +61,7 @@ var ( // is only used for necessary consensus checks. The legacy consensus engine can be any // engine implements the consensus interface (except the beacon itself). type Beacon struct { - ethone consensus.Engine // Original consensus engine used in eth1, e.g. ethash or clique - ttdblock *uint64 // Merge block-number for testchain generation without TTDs + ethone consensus.Engine // Original consensus engine used in eth1, e.g. ethash or clique } // New creates a consensus engine with the given embedded eth1 engine. diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 6a64022168..3b4abf71b7 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -876,11 +876,11 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { } count.Add(1) if count.Load()%1000 == 0 && time.Since(logged) > 8*time.Second { - log.Info("Inspecting separate state database", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) + log.Info("Inspecting separate state database", "count", count.Load(), "elapsed", common.PrettyDuration(time.Since(start))) logged = time.Now() } } - log.Info("Inspecting separate state database", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) + log.Info("Inspecting separate state database", "count", count.Load(), "elapsed", common.PrettyDuration(time.Since(start))) } var ( diff --git a/core/vm/evm.go b/core/vm/evm.go index 6c683c3f4a..1d494a1976 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -97,9 +97,6 @@ type EVM struct { // StateDB gives access to the underlying state StateDB StateDB - // table holds the opcode specific handlers - table *JumpTable - // depth is the current call stack depth int diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index b5e7567d28..69d0a2493b 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -108,6 +108,8 @@ func NewEVMInterpreter(evm *EVM) *EVMInterpreter { // If jump table was not initialised we set the default one. var table *JumpTable switch { + case evm.chainRules.IsOsaka: + table = &osakaInstructionSet case evm.chainRules.IsVerkle: // TODO replace with proper instruction set when fork is specified table = &verkleInstructionSet diff --git a/crypto/secp256r1/publickey.go b/crypto/secp256r1/publickey.go deleted file mode 100644 index 885a3e1a62..0000000000 --- a/crypto/secp256r1/publickey.go +++ /dev/null @@ -1,21 +0,0 @@ -package secp256r1 - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "math/big" -) - -// Generates appropriate public key format from given coordinates -func newPublicKey(x, y *big.Int) *ecdsa.PublicKey { - // Check if the given coordinates are valid - if x == nil || y == nil || !elliptic.P256().IsOnCurve(x, y) { - return nil - } - - return &ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: x, - Y: y, - } -} diff --git a/metrics/metrics.go b/metrics/metrics.go index d6b966b327..85f51ae185 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -14,11 +14,6 @@ import ( var ( metricsEnabled = false - - // metricsExpensiveEnabled is a soft-flag meant for external packages to check if costly - // metrics gathering is allowed or not. The goal is to separate standard metrics - // for health monitoring and debug metrics that might impact runtime performance. - metricsExpensiveEnabled = false ) // Enabled is checked by functions that are deemed 'expensive', e.g. if a diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go index 7ee6442717..c28e66d5ea 100644 --- a/triedb/pathdb/journal.go +++ b/triedb/pathdb/journal.go @@ -130,24 +130,6 @@ func (kr *JournalKVReader) Read(p []byte) (n int, err error) { func (kr *JournalKVReader) Close() { } -func newJournalWriter(file string, db ethdb.Database, journalType JournalType) JournalWriter { - if journalType == JournalKVType { - log.Info("New journal writer for journal kv") - return &JournalKVWriter{ - diskdb: db, - } - } else { - log.Info("New journal writer for journal file", "path", file) - fd, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) - if err != nil { - return nil - } - return &JournalFileWriter{ - file: fd, - } - } -} - func newJournalReader(file string, db ethdb.Database, journalType JournalType) (JournalReader, error) { if journalType == JournalKVType { log.Info("New journal reader for journal kv") From f16dce41d5983c838f69ea1a3186d2739bda000f Mon Sep 17 00:00:00 2001 From: qybdyx Date: Mon, 5 Jan 2026 17:48:16 +0800 Subject: [PATCH 08/28] bid_simulator.go: disable blob version1 in bid simulator --- core/types/tx_blob.go | 3 +-- miner/bid_simulator.go | 4 ++++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/core/types/tx_blob.go b/core/types/tx_blob.go index 0f47aa895c..7350e7eb37 100644 --- a/core/types/tx_blob.go +++ b/core/types/tx_blob.go @@ -220,8 +220,7 @@ func (btx *blobTxWithBlobsV1) tx() *BlobTx { func (btx *blobTxWithBlobsV1) assign(sc *BlobTxSidecar) error { // NOTE(BSC): Upstream geth supports both Version 0 and 1 sidecars. // BSC only supports Version 0, as EIP-7594 (cell proofs) is not enabled yet. - disableEIP7594 := true - if disableEIP7594 || btx.Version != BlobSidecarVersion1 { + if btx.Version != BlobSidecarVersion1 { return fmt.Errorf("unsupported blob tx version %d", btx.Version) } sc.Version = BlobSidecarVersion1 diff --git a/miner/bid_simulator.go b/miner/bid_simulator.go index 4c5d0a2be5..0cf9bf0dc5 100644 --- a/miner/bid_simulator.go +++ b/miner/bid_simulator.go @@ -1057,6 +1057,10 @@ func (r *BidRuntime) commitTransaction(chain *core.BlockChain, chainConfig *para if sc == nil { return errors.New("blob transaction without blobs in miner") } + + if sc.Version == types.BlobSidecarVersion1 { + return errors.New("cell proof is not supported yet") + } // Checking against blob gas limit: It's kind of ugly to perform this check here, but there // isn't really a better place right now. The blob gas limit is checked at block validation time // and not during execution. This means core.ApplyTransaction will not return an error if the From 9a59b84b25b58ce9325d412c88f7c8720e9dbf3d Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Tue, 6 Jan 2026 10:12:17 +0800 Subject: [PATCH 09/28] triedb: align signature of func Size() --- core/blockchain.go | 10 +++++----- core/blockchain_insert.go | 3 +-- eth/state_accessor.go | 4 ++-- eth/tracers/api.go | 4 ++-- triedb/database.go | 12 ++++++------ triedb/hashdb/database.go | 4 ++-- triedb/pathdb/database.go | 6 +++--- triedb/pathdb/disklayer.go | 7 +++---- 8 files changed, 24 insertions(+), 26 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 35967633bc..0c6d46cb0e 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1573,7 +1573,7 @@ func (bc *BlockChain) Stop() { for !bc.triegc.Empty() { triedb.Dereference(bc.triegc.PopItem()) } - if _, size, _, _ := triedb.Size(); size != 0 { + if _, size, _ := triedb.Size(); size != 0 { log.Error("Dangling trie nodes after full cleanup") } } @@ -1968,8 +1968,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } // If we exceeded our memory allowance, flush matured singleton nodes to disk var ( - _, nodes, _, imgs = bc.triedb.Size() - limit = common.StorageSize(bc.cfg.TrieDirtyLimit) * 1024 * 1024 + _, nodes, imgs = bc.triedb.Size() + limit = common.StorageSize(bc.cfg.TrieDirtyLimit) * 1024 * 1024 ) if nodes > limit || imgs > 4*1024*1024 { bc.triedb.Cap(limit - ethdb.IdealBatchSize) @@ -2389,8 +2389,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness if bc.snaps != nil { snapDiffItems, snapBufItems, _ = bc.snaps.Size() } - trieDiffNodes, trieBufNodes, trieImmutableBufNodes, _ := bc.triedb.Size() - stats.report(chain, it.index, snapDiffItems, snapBufItems, trieDiffNodes, trieBufNodes, trieImmutableBufNodes, res.status == CanonStatTy) + trieDiffNodes, trieBufNodes, _ := bc.triedb.Size() + stats.report(chain, it.index, snapDiffItems, snapBufItems, trieDiffNodes, trieBufNodes, res.status == CanonStatTy) // Print confirmation that a future fork is scheduled, but not yet active. bc.logForkReadiness(block) diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go index 1c9ba3e991..acfa58d3c6 100644 --- a/core/blockchain_insert.go +++ b/core/blockchain_insert.go @@ -39,7 +39,7 @@ const statsReportLimit = 8 * time.Second // report prints statistics if some number of blocks have been processed // or more than a few seconds have passed since the last message. -func (st *insertStats) report(chain []*types.Block, index int, snapDiffItems, snapBufItems, trieDiffNodes, trieBufNodes, trieImmutableBufNodes common.StorageSize, setHead bool) { +func (st *insertStats) report(chain []*types.Block, index int, snapDiffItems, snapBufItems, trieDiffNodes, trieBufNodes common.StorageSize, setHead bool) { // Fetch the timings for the batch var ( now = mclock.Now() @@ -78,7 +78,6 @@ func (st *insertStats) report(chain []*types.Block, index int, snapDiffItems, sn context = append(context, []interface{}{"triediffs", trieDiffNodes}...) } context = append(context, []interface{}{"triedirty", trieBufNodes}...) - context = append(context, []interface{}{"trieimutabledirty", trieImmutableBufNodes}...) if st.ignored > 0 { context = append(context, []interface{}{"ignored", st.ignored}...) diff --git a/eth/state_accessor.go b/eth/state_accessor.go index bfe13da4b8..3f0f0a03aa 100644 --- a/eth/state_accessor.go +++ b/eth/state_accessor.go @@ -177,8 +177,8 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u parent = root } if report { - diff, nodes, immutablenodes, imgs := tdb.Size() // all memory is contained within the nodes return in hashdb - log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "layer", diff, "nodes", nodes, "immutablenodes", immutablenodes, "preimages", imgs) + diff, nodes, imgs := tdb.Size() // all memory is contained within the nodes return in hashdb + log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "layer", diff, "nodes", nodes, "preimages", imgs) } return statedb, func() { tdb.Dereference(block.Root()) }, nil } diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 09d8263fe8..28f3276441 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -395,8 +395,8 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed // if the relevant state is available in disk. var preferDisk bool if statedb != nil { - s1, s2, s3, s4 := statedb.Database().TrieDB().Size() - preferDisk = s1+s2+s3+s4 > defaultTracechainMemLimit + s1, s2, s3 := statedb.Database().TrieDB().Size() + preferDisk = s1+s2+s3 > defaultTracechainMemLimit } statedb, release, err = api.backend.StateAtBlock(ctx, block, reexec, statedb, false, preferDisk) if err != nil { diff --git a/triedb/database.go b/triedb/database.go index ba87cf63fe..1ffd004e24 100644 --- a/triedb/database.go +++ b/triedb/database.go @@ -71,7 +71,7 @@ type backend interface { // // For hash scheme, there is no differentiation between diff layer nodes // and dirty disk layer nodes, so both are merged into the second return. - Size() (common.StorageSize, common.StorageSize, common.StorageSize) + Size() (common.StorageSize, common.StorageSize) // Commit writes all relevant trie nodes belonging to the specified state // to disk. Report specifies whether logs will be displayed in info level. @@ -214,16 +214,16 @@ func (db *Database) Commit(root common.Hash, report bool) error { // Size returns the storage size of diff layer nodes above the persistent disk // layer, the dirty nodes buffered within the disk layer, and the size of cached // preimages. -func (db *Database) Size() (common.StorageSize, common.StorageSize, common.StorageSize, common.StorageSize) { +func (db *Database) Size() (common.StorageSize, common.StorageSize, common.StorageSize) { var ( - diffs, nodes, immutablenodes common.StorageSize - preimages common.StorageSize + diffs, nodes common.StorageSize + preimages common.StorageSize ) - diffs, nodes, immutablenodes = db.backend.Size() + diffs, nodes = db.backend.Size() if db.preimages != nil { preimages = db.preimages.size() } - return diffs, nodes, immutablenodes, preimages + return diffs, nodes, preimages } // Scheme returns the node scheme used in the database. diff --git a/triedb/hashdb/database.go b/triedb/hashdb/database.go index 31f19be28d..38392aa519 100644 --- a/triedb/hashdb/database.go +++ b/triedb/hashdb/database.go @@ -590,7 +590,7 @@ func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, n // // The first return will always be 0, representing the memory stored in unbounded // diff layers above the dirty cache. This is only available in pathdb. -func (db *Database) Size() (common.StorageSize, common.StorageSize, common.StorageSize) { +func (db *Database) Size() (common.StorageSize, common.StorageSize) { db.lock.RLock() defer db.lock.RUnlock() @@ -598,7 +598,7 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize, common.Stora // the total memory consumption, the maintenance metadata is also needed to be // counted. var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize) - return 0, db.dirtiesSize + db.childrenSize + metadataSize, 0 + return 0, db.dirtiesSize + db.childrenSize + metadataSize } // Close closes the trie database and releases all held resources. diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go index b908a7214c..79ae6998b2 100644 --- a/triedb/pathdb/database.go +++ b/triedb/pathdb/database.go @@ -713,16 +713,16 @@ func (db *Database) Close() error { // Size returns the current storage size of the memory cache in front of the // persistent database layer. -func (db *Database) Size() (diffs common.StorageSize, nodes common.StorageSize, immutableNodes common.StorageSize) { +func (db *Database) Size() (diffs common.StorageSize, nodes common.StorageSize) { db.tree.forEach(func(layer layer) { if diff, ok := layer.(*diffLayer); ok { diffs += common.StorageSize(diff.size()) } if disk, ok := layer.(*diskLayer); ok { - nodes, immutableNodes = disk.size() + nodes += disk.size() } }) - return diffs, nodes, immutableNodes + return diffs, nodes } // Scheme returns the node scheme used in the database. diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go index 28073d51c5..14f2219dc1 100644 --- a/triedb/pathdb/disklayer.go +++ b/triedb/pathdb/disklayer.go @@ -580,15 +580,14 @@ func (dl *diskLayer) revert(h *stateHistory) (*diskLayer, error) { } // size returns the approximate size of cached nodes in the disk layer. -func (dl *diskLayer) size() (common.StorageSize, common.StorageSize) { +func (dl *diskLayer) size() common.StorageSize { dl.lock.RLock() defer dl.lock.RUnlock() if dl.stale { - return 0, 0 + return 0 } - dirtyNodes, dirtyimmutableNodes := dl.buffer.size(), 0 - return common.StorageSize(dirtyNodes), common.StorageSize(dirtyimmutableNodes) + return common.StorageSize(dl.buffer.size()) } // resetCache releases the memory held by clean cache to prevent memory leak. From 8a317945a7dbf6bcd3c782de98923881f4d37654 Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Tue, 6 Jan 2026 10:26:49 +0800 Subject: [PATCH 10/28] triedb/pathdb: read old journal file --- core/blockchain_test.go | 2 +- triedb/pathdb/config.go | 3 - triedb/pathdb/database.go | 5 -- triedb/pathdb/journal.go | 121 ++++---------------------------------- 4 files changed, 14 insertions(+), 117 deletions(-) diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 3473f33a97..bc9ef33ac8 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -1678,7 +1678,7 @@ func TestTrieForkGC(t *testing.T) { chain.TrieDB().Dereference(blocks[len(blocks)-1-i].Root()) chain.TrieDB().Dereference(forks[len(blocks)-1-i].Root()) } - if _, nodes, _, _ := chain.TrieDB().Size(); nodes > 0 { // all memory is returned in the nodes return for hashdb + if _, nodes, _ := chain.TrieDB().Size(); nodes > 0 { // all memory is returned in the nodes return for hashdb t.Fatalf("stale tries still alive after garbase collection") } } diff --git a/triedb/pathdb/config.go b/triedb/pathdb/config.go index 2dcec5b785..63f3d0bdad 100644 --- a/triedb/pathdb/config.go +++ b/triedb/pathdb/config.go @@ -109,9 +109,6 @@ func (c *Config) fields() []interface{} { if c.ReadOnly { list = append(list, "readonly", true) } - if c.SnapshotNoBuild { - list = append(list, "snapshot", false) - } list = append(list, "triecache", common.StorageSize(c.TrieCleanSize)) list = append(list, "statecache", common.StorageSize(c.StateCleanSize)) list = append(list, "buffer", common.StorageSize(c.WriteBufferSize)) diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go index 79ae6998b2..3457af01e6 100644 --- a/triedb/pathdb/database.go +++ b/triedb/pathdb/database.go @@ -725,11 +725,6 @@ func (db *Database) Size() (diffs common.StorageSize, nodes common.StorageSize) return diffs, nodes } -// Scheme returns the node scheme used in the database. -func (db *Database) Scheme() string { - return rawdb.PathScheme -} - // Head return the top non-fork difflayer/disklayer root hash for rewinding. func (db *Database) Head() common.Hash { db.lock.Lock() diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go index c28e66d5ea..c42c25ee5d 100644 --- a/triedb/pathdb/journal.go +++ b/triedb/pathdb/journal.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "io" - "io/fs" "os" "time" @@ -53,108 +52,6 @@ var ( // - Version 3: a flag has been added to indicate whether the storage slot key is the raw key or a hash const journalVersion uint64 = 3 -type JournalWriter interface { - io.Writer - - Close() - Size() uint64 -} - -type JournalReader interface { - io.Reader - Close() -} - -type JournalFileWriter struct { - file *os.File -} - -type JournalFileReader struct { - file *os.File -} - -type JournalKVWriter struct { - journalBuf bytes.Buffer - diskdb ethdb.Database -} - -type JournalKVReader struct { - journalBuf *bytes.Buffer -} - -// Write appends b directly to the encoder output. -func (fw *JournalFileWriter) Write(b []byte) (int, error) { - return fw.file.Write(b) -} - -func (fw *JournalFileWriter) Close() { - fw.file.Close() -} - -func (fw *JournalFileWriter) Size() uint64 { - if fw.file == nil { - return 0 - } - fileInfo, err := fw.file.Stat() - if err != nil { - log.Crit("Failed to stat journal", "err", err) - } - return uint64(fileInfo.Size()) -} - -func (kw *JournalKVWriter) Write(b []byte) (int, error) { - return kw.journalBuf.Write(b) -} - -func (kw *JournalKVWriter) Close() { - rawdb.WriteTrieJournal(kw.diskdb, kw.journalBuf.Bytes()) - kw.journalBuf.Reset() -} - -func (kw *JournalKVWriter) Size() uint64 { - return uint64(kw.journalBuf.Len()) -} - -func (fr *JournalFileReader) Read(p []byte) (n int, err error) { - return fr.file.Read(p) -} - -func (fr *JournalFileReader) Close() { - fr.file.Close() -} - -func (kr *JournalKVReader) Read(p []byte) (n int, err error) { - return kr.journalBuf.Read(p) -} - -func (kr *JournalKVReader) Close() { -} - -func newJournalReader(file string, db ethdb.Database, journalType JournalType) (JournalReader, error) { - if journalType == JournalKVType { - log.Info("New journal reader for journal kv") - journal := rawdb.ReadTrieJournal(db) - if len(journal) == 0 { - return nil, errMissJournal - } - return &JournalKVReader{ - journalBuf: bytes.NewBuffer(journal), - }, nil - } else { - log.Info("New journal reader for journal file", "path", file) - fd, err := os.Open(file) - if errors.Is(err, fs.ErrNotExist) { - return nil, errMissJournal - } - if err != nil { - return nil, err - } - return &JournalFileReader{ - file: fd, - }, nil - } -} - // loadJournal tries to parse the layer journal from the disk. func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) { var reader io.Reader @@ -167,6 +64,15 @@ func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) { } defer f.Close() reader = f + } else if path := db.config.JournalFilePath; path != "" && common.FileExist(path) { // TODO(Nathan): delete this branch in v1.8.x + // If a journal file is specified, read it from there + log.Info("Load database journal from file", "path", path) + f, err := os.OpenFile(path, os.O_RDONLY, 0644) + if err != nil { + return nil, fmt.Errorf("failed to read journal file %s: %w", path, err) + } + defer f.Close() + reader = f } else { log.Info("Load database journal from disk") journal := rawdb.ReadTrieJournal(db.diskdb) @@ -341,7 +247,6 @@ func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) { if err := stateSet.decode(r); err != nil { return nil, err } - return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, &nodes, &stateSet), r) } @@ -411,10 +316,6 @@ func (dl *diffLayer) journal(w io.Writer) error { // // The supplied root must be a valid trie hash value. func (db *Database) Journal(root common.Hash) error { - // Run the journaling - db.lock.Lock() - defer db.lock.Unlock() - // Retrieve the head layer to journal from. l := db.tree.get(root) if l == nil { @@ -433,6 +334,10 @@ func (db *Database) Journal(root common.Hash) error { } start := time.Now() + // Run the journaling + db.lock.Lock() + defer db.lock.Unlock() + // Short circuit if the database is in read only mode. if db.readOnly { return errDatabaseReadOnly From 22e998fd527d9d7032e87f2674bc9346cd8a822c Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Tue, 6 Jan 2026 11:21:59 +0800 Subject: [PATCH 11/28] cmd/utils: deprecated JournalFileFlag --- cmd/utils/flags.go | 7 ------- cmd/utils/flags_legacy.go | 8 ++++++++ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index ba0c7bdb41..ab1443ad07 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -397,12 +397,6 @@ var ( Value: false, Category: flags.StateCategory, } - JournalFileFlag = &cli.BoolFlag{ - Name: "journalfile", - Usage: "Enable using journal file to store the TrieJournal instead of KVDB in pbss (default = true)", - Value: true, - Category: flags.StateCategory, - } StateSizeTrackingFlag = &cli.BoolFlag{ Name: "state.size-tracking", Usage: "Enable state size tracking, retrieve state size with debug_stateSize.", @@ -3017,7 +3011,6 @@ func MakeTrieDatabase(ctx *cli.Context, stack *node.Node, disk ethdb.Database, p } pathConfig.JournalDirectory = stack.ResolvePath("triedb") config.PathDB = &pathConfig - // TODO(Nathan): delete JournalFilePath ? config.PathDB.JournalFilePath = fmt.Sprintf("%s/%s", stack.ResolvePath("chaindata"), eth.JournalFileName) return triedb.NewDatabase(disk, config) } diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go index b222cac255..2d15fcfa53 100644 --- a/cmd/utils/flags_legacy.go +++ b/cmd/utils/flags_legacy.go @@ -45,6 +45,7 @@ var DeprecatedFlags = []cli.Flag{ MetricsEnabledExpensiveFlag, EnablePersonal, PruneAncientDataFlag, + JournalFileFlag, } var ( @@ -131,6 +132,13 @@ var ( Usage: "Prune ancient data, is an optional config and disabled by default. Only keep the latest 9w blocks' data,the older blocks' data will be permanently pruned. Notice:the geth/chaindata/ancient dir will be removed, if restart without the flag, the ancient data will start with the previous point that the oldest unpruned block number. Recommends to the user who don't care about the ancient data.", Category: flags.DeprecatedCategory, } + JournalFileFlag = &cli.BoolFlag{ + Name: "journalfile", + Hidden: false, // TODO(Nathan): turn it into true in version v1.8.x + Usage: "Enable using journal file to store the TrieJournal instead of KVDB in pbss (default = true, deprecated)", + Value: true, + Category: flags.DeprecatedCategory, + } ) // showDeprecated displays deprecated flags that will be soon removed from the codebase. From a1f6e349d2eda71264a6c66806ee6656dc6d353d Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Tue, 6 Jan 2026 13:49:11 +0800 Subject: [PATCH 12/28] triedb/pathdb: remove JournalFile field in config --- cmd/utils/flags.go | 2 -- core/blockchain.go | 2 -- eth/backend.go | 1 - eth/ethconfig/config.go | 2 +- triedb/pathdb/config.go | 1 - 5 files changed, 1 insertion(+), 7 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index ab1443ad07..852280ab10 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -2205,8 +2205,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { cfg.PathSyncFlush = true } - cfg.JournalFileEnabled = ctx.Bool(JournalFileFlag.Name) - if ctx.String(GCModeFlag.Name) == "archive" { if cfg.TransactionHistory != 0 { cfg.TransactionHistory = 0 diff --git a/core/blockchain.go b/core/blockchain.go index 0c6d46cb0e..f99e51640c 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -181,7 +181,6 @@ type BlockChainConfig struct { NoTries bool // Insecure settings. Do not have any tries in databases if enabled. PathSyncFlush bool // Whether sync flush the trienodebuffer of pathdb to disk. JournalFilePath string // The path to store journal file which is used in pathdb - JournalFile bool // Whether to use single file to store journal data in pathdb EnableIncr bool // Flag whether the freezer db stores incremental block and state history IncrHistoryPath string // The path to store incremental block and chain files IncrHistory uint64 // Amount of block and state history stored in incremental freezer db @@ -284,7 +283,6 @@ func (cfg *BlockChainConfig) triedbConfig(isVerkle bool) *triedb.Config { if cfg.StateScheme == rawdb.PathScheme { config.PathDB = &pathdb.Config{ JournalFilePath: cfg.JournalFilePath, - JournalFile: cfg.JournalFile, EnableIncr: cfg.EnableIncr, IncrHistoryPath: cfg.IncrHistoryPath, IncrHistory: cfg.IncrHistory, diff --git a/eth/backend.go b/eth/backend.go index 39b9ba271d..9865ed4a1c 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -353,7 +353,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { StateScheme: config.StateScheme, PathSyncFlush: config.PathSyncFlush, JournalFilePath: journalFilePath, - JournalFile: config.JournalFileEnabled, EnableIncr: config.EnableIncrSnapshots, IncrHistoryPath: config.IncrSnapshotPath, IncrHistory: config.IncrSnapshotBlockInterval, diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 46caba7c3f..1721dd3269 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -134,7 +134,7 @@ type Config struct { // consistent with persistent state. StateScheme string `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top PathSyncFlush bool `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top - JournalFileEnabled bool // Whether the TrieJournal is stored using journal file + JournalFileEnabled bool // TODO(Nathan): deprecated, will delete together with JournalFileFlag in v1.8.x DisableTxIndexer bool `toml:",omitempty"` // Whether to enable the transaction indexer diff --git a/triedb/pathdb/config.go b/triedb/pathdb/config.go index 63f3d0bdad..d7cf7a5422 100644 --- a/triedb/pathdb/config.go +++ b/triedb/pathdb/config.go @@ -82,7 +82,6 @@ type Config struct { NoAsyncGeneration bool // Flag whether the background generation is disabled JournalFilePath string // The path of journal file - JournalFile bool // Flag whether store memory diffLayer into file EnableIncr bool // Flag whether the freezer db stores incr block and state history MergeIncr bool // Flag to merge incr snapshots From 4a1f70a3d61cbd62aa9d81e5ca355b36c1ad19aa Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Tue, 6 Jan 2026 15:08:20 +0800 Subject: [PATCH 13/28] eth: fix BroadcastTransactions --- core/state/snapshot/generate_test.go | 2 +- eth/handler.go | 2 +- eth/peerset.go | 28 +++++++--------------------- 3 files changed, 9 insertions(+), 23 deletions(-) diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go index 5e4b4efba0..4488630095 100644 --- a/core/state/snapshot/generate_test.go +++ b/core/state/snapshot/generate_test.go @@ -621,7 +621,7 @@ func testGenerateWithExtraAccounts(t *testing.T, scheme string) { } func enableLogging() { - log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true))) + log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true))) } // Tests that snapshot generation when an extra account with storage exists in the snap state. diff --git a/eth/handler.go b/eth/handler.go index b8108062db..586a4fab61 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -965,7 +965,7 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) { signer = types.LatestSigner(h.chain.Config()) choice = newBroadcastChoice(h.nodeID, h.txBroadcastKey) - peers = h.peers.all() + peers = h.peers.allNonEVNPeers() ) for _, tx := range txs { diff --git a/eth/peerset.go b/eth/peerset.go index 82241087de..3d76d62df4 100644 --- a/eth/peerset.go +++ b/eth/peerset.go @@ -19,9 +19,7 @@ package eth import ( "errors" "fmt" - "maps" "math/big" - "slices" "sync" "time" @@ -447,32 +445,20 @@ func (ps *peerSet) peersWithoutBlock(hash common.Hash) []*ethPeer { return list } -// peersWithoutTransaction retrieves a list of peers that do not have a given -// transaction in their set of known hashes. -func (ps *peerSet) peersWithoutTransaction(hash common.Hash) []*ethPeer { +// allNonEVNPeers returns a slice of all registered peers that do not have +// the EVNPeerFlag set. +func (ps *peerSet) allNonEVNPeers() []*ethPeer { ps.lock.RLock() defer ps.lock.RUnlock() - list := make([]*ethPeer, 0, len(ps.peers)) + nonEVNPeers := make([]*ethPeer, 0, len(ps.peers)) for _, p := range ps.peers { - // it can be optimized in the future, to make it more clear that only when both peers of a connection are EVN nodes, will enable no tx broadcast. - if p.EVNPeerFlag.Load() { - log.Debug("skip EVN peer with no tx forwarding feature", "peer", p.ID()) - continue - } - if !p.KnownTransaction(hash) { - list = append(list, p) + if !p.EVNPeerFlag.Load() { + nonEVNPeers = append(nonEVNPeers, p) } } - return list -} - -// all returns all current peers. -func (ps *peerSet) all() []*ethPeer { - ps.lock.RLock() - defer ps.lock.RUnlock() - return slices.Collect(maps.Values(ps.peers)) + return nonEVNPeers } // peersWithoutVote retrieves a list of peers that do not have a given From ef741d26e8a14420165cb8a14987ca7b09400573 Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Tue, 6 Jan 2026 15:52:37 +0800 Subject: [PATCH 14/28] core/txpool/blobpool: only accept BlobSidecarVersion0 in BSC --- core/txpool/blobpool/blobpool.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 668610c801..9b78356509 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -1663,6 +1663,7 @@ func (p *BlobPool) preCheck(tx *types.Transaction) error { var ( head = p.head.Load() isOsaka = p.chain.Config().IsOsaka(head.Number, head.Time) + isMendel = p.chain.Config().IsMendel(head.Number, head.Time) deadline time.Time ) if isOsaka { @@ -1674,7 +1675,7 @@ func (p *BlobPool) preCheck(tx *types.Transaction) error { return err } // Before the Osaka fork, reject the blob txs with cell proofs - if !isOsaka { + if !isOsaka || isMendel { // always true(assuming MendelTime == OsakaTime) if tx.BlobTxSidecar().Version == types.BlobSidecarVersion0 { return nil } else { From b65edcac8ffdabea6d75986220bd1a18f3295cad Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Tue, 6 Jan 2026 16:32:36 +0800 Subject: [PATCH 15/28] core/txpool/blobpool: not convert blobsidecar to V1 in BSC --- cmd/evm/internal/t8ntool/transition.go | 2 +- consensus/misc/eip1559/eip1559.go | 4 ++-- core/blockchain.go | 4 ++-- core/chain_makers.go | 8 ++++---- core/genesis.go | 8 ++++---- core/state_processor.go | 4 ++-- core/state_processor_test.go | 2 +- core/state_transition.go | 2 +- core/txpool/blobpool/blobpool.go | 7 +++---- eth/ethconfig/config.go | 2 +- internal/ethapi/api.go | 2 +- miner/worker.go | 8 ++++---- params/config.go | 14 +++++++++++--- 13 files changed, 37 insertions(+), 30 deletions(-) diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 618b4b7a2e..b30c4666f8 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -212,7 +212,7 @@ func applyShanghaiChecks(env *stEnv, chainConfig *params.ChainConfig) error { if !chainConfig.IsShanghai(big.NewInt(int64(env.Number)), env.Timestamp) { return nil } - if chainConfig.Parlia == nil && env.Withdrawals == nil { + if chainConfig.IsNotInBSC() && env.Withdrawals == nil { return NewError(ErrorConfig, errors.New("Shanghai config but missing 'withdrawals' in env section")) } return nil diff --git a/consensus/misc/eip1559/eip1559.go b/consensus/misc/eip1559/eip1559.go index 9ab4acee4d..97bc61ea95 100644 --- a/consensus/misc/eip1559/eip1559.go +++ b/consensus/misc/eip1559/eip1559.go @@ -31,7 +31,7 @@ import ( // - gas limit check // - basefee check func VerifyEIP1559Header(config *params.ChainConfig, parent, header *types.Header) error { - if config.Parlia == nil { + if config.IsNotInBSC() { // Verify that the gas limit remains within allowed bounds parentGasLimit := parent.GasLimit if !config.IsLondon(parent.Number) { @@ -57,7 +57,7 @@ func VerifyEIP1559Header(config *params.ChainConfig, parent, header *types.Heade // CalcBaseFee calculates the basefee of the header. func CalcBaseFee(config *params.ChainConfig, parent *types.Header) *big.Int { - if config.Parlia != nil { + if config.IsInBSC() { return new(big.Int).SetUint64(params.InitialBaseFeeForBSC) } diff --git a/core/blockchain.go b/core/blockchain.go index f99e51640c..40e0b73ea0 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1658,7 +1658,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ } // check DA after cancun lastBlk := blockChain[len(blockChain)-1] - if bc.chainConfig.Parlia != nil && bc.chainConfig.IsCancun(lastBlk.Number(), lastBlk.Time()) { + if bc.chainConfig.IsInBSC() && bc.chainConfig.IsCancun(lastBlk.Number(), lastBlk.Time()) { if _, err := CheckDataAvailableInBatch(bc, blockChain); err != nil { log.Debug("CheckDataAvailableInBatch", "err", err) return 0, err @@ -2204,7 +2204,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness }() // check block data available first - if bc.chainConfig.Parlia != nil { + if bc.chainConfig.IsInBSC() { if index, err := CheckDataAvailableInBatch(bc, chain); err != nil { return nil, index, err } diff --git a/core/chain_makers.go b/core/chain_makers.go index a1eb0d318f..3acd63a5cc 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -262,7 +262,7 @@ func (b *BlockGen) AddUncle(h *types.Header) { h.GasLimit = parent.GasLimit if b.cm.config.IsLondon(h.Number) { h.BaseFee = eip1559.CalcBaseFee(b.cm.config, parent) - if b.cm.config.Parlia == nil && !b.cm.config.IsLondon(parent.Number) { + if b.cm.config.IsNotInBSC() && !b.cm.config.IsLondon(parent.Number) { parentGasLimit := parent.GasLimit * b.cm.config.ElasticityMultiplier() h.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit) } @@ -336,7 +336,7 @@ func (b *BlockGen) collectRequests(readonly bool) (requests [][]byte) { statedb = statedb.Copy() } - if b.cm.config.IsPrague(b.header.Number, b.header.Time) && b.cm.config.Parlia == nil { + if b.cm.config.IsPrague(b.header.Number, b.header.Time) && b.cm.config.IsNotInBSC() { requests = [][]byte{} // EIP-6110 deposits var blockLogs []*types.Log @@ -641,7 +641,7 @@ func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engi if cm.config.IsLondon(header.Number) { header.BaseFee = eip1559.CalcBaseFee(cm.config, parentHeader) - if cm.config.Parlia == nil && !cm.config.IsLondon(parent.Number()) { + if cm.config.IsNotInBSC() && !cm.config.IsLondon(parent.Number()) { parentGasLimit := parent.GasLimit() * cm.config.ElasticityMultiplier() header.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit) } @@ -650,7 +650,7 @@ func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engi excessBlobGas := eip4844.CalcExcessBlobGas(cm.config, parentHeader, time) header.ExcessBlobGas = &excessBlobGas header.BlobGasUsed = new(uint64) - if cm.config.Parlia == nil { + if cm.config.IsNotInBSC() { header.ParentBeaconRoot = new(common.Hash) } else { header.WithdrawalsHash = &types.EmptyWithdrawalsHash diff --git a/core/genesis.go b/core/genesis.go index 9a617e4251..123f04d4b8 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -534,7 +534,7 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block { if g.BaseFee != nil { head.BaseFee = g.BaseFee } else { - if g.Config.Parlia != nil { + if g.Config.IsInBSC() { head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFeeForBSC) } else { head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFee) @@ -546,12 +546,12 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block { ) if conf := g.Config; conf != nil { num := big.NewInt(int64(g.Number)) - if conf.Parlia == nil && conf.IsShanghai(num, g.Timestamp) { + if conf.IsNotInBSC() && conf.IsShanghai(num, g.Timestamp) { head.WithdrawalsHash = &types.EmptyWithdrawalsHash withdrawals = make([]*types.Withdrawal, 0) } if conf.IsCancun(num, g.Timestamp) { - if conf.Parlia != nil { + if conf.IsInBSC() { head.WithdrawalsHash = &types.EmptyWithdrawalsHash withdrawals = make([]*types.Withdrawal, 0) } @@ -559,7 +559,7 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block { // EIP-4788: The parentBeaconBlockRoot of the genesis block is always // the zero hash. This is because the genesis block does not have a parent // by definition. - if conf.Parlia == nil || conf.IsBohr(num, g.Timestamp) { + if conf.IsNotInBSC() || conf.IsBohr(num, g.Timestamp) { head.ParentBeaconRoot = new(common.Hash) } diff --git a/core/state_processor.go b/core/state_processor.go index 6e3e2d67d2..4299f37666 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -157,7 +157,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg // Read requests if Prague is enabled. var requests [][]byte - if config.IsPrague(block.Number(), block.Time()) && config.Parlia == nil { + if config.IsPrague(block.Number(), block.Time()) && config.IsNotInBSC() { var allCommonLogs []*types.Log for _, receipt := range receipts { allCommonLogs = append(allCommonLogs, receipt.Logs...) @@ -293,7 +293,7 @@ func ApplyTransaction(evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header * func ProcessBeaconBlockRoot(beaconRoot common.Hash, evm *vm.EVM) { // Return immediately if beaconRoot equals the zero hash when using the Parlia engine. if beaconRoot == (common.Hash{}) { - if chainConfig := evm.ChainConfig(); chainConfig != nil && chainConfig.Parlia != nil { + if chainConfig := evm.ChainConfig(); chainConfig != nil && chainConfig.IsInBSC() { return } } diff --git a/core/state_processor_test.go b/core/state_processor_test.go index d7cc96128a..30eb009922 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -421,7 +421,7 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr header.BlobGasUsed = &used beaconRoot := common.HexToHash("0xbeac00") - if config.Parlia == nil { + if config.IsNotInBSC() { header.ParentBeaconRoot = &beaconRoot } } diff --git a/core/state_transition.go b/core/state_transition.go index ad14acaf76..13c6433b75 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -565,7 +565,7 @@ func (st *stateTransition) execute() (*ExecutionResult, error) { fee := new(uint256.Int).SetUint64(st.gasUsed()) fee.Mul(fee, effectiveTipU256) // consensus engine is parlia - if st.evm.ChainConfig().Parlia != nil { + if st.evm.ChainConfig().IsInBSC() { st.state.AddBalance(consensus.SystemAddress, fee, tracing.BalanceIncreaseRewardTransactionFee) // add extra blob fee reward if rules.IsCancun { diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 9b78356509..06f94354d9 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -890,7 +890,7 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) { p.updateStorageMetrics() // Perform the conversion logic at the fork boundary - if !p.chain.Config().IsOsaka(oldHead.Number, oldHead.Time) && p.chain.Config().IsOsaka(newHead.Number, newHead.Time) { + if !p.chain.Config().IsOsaka(oldHead.Number, oldHead.Time) && p.chain.Config().IsOsaka(newHead.Number, newHead.Time) && p.chain.Config().IsNotInBSC() { // Deep copy all indexed transaction metadata. var ( ids = make(map[common.Address]map[uint64]uint64) @@ -1209,7 +1209,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error { // could theoretically halt a Geth node for ~1.2s by reorging per block. However, // this attack is financially inefficient to execute. head := p.head.Load() - if p.chain.Config().IsOsaka(head.Number, head.Time) && tx.BlobTxSidecar().Version == types.BlobSidecarVersion0 { + if p.chain.Config().IsOsaka(head.Number, head.Time) && tx.BlobTxSidecar().Version == types.BlobSidecarVersion0 && p.chain.Config().IsNotInBSC() { if err := tx.BlobTxSidecar().ToV1(); err != nil { log.Error("Failed to convert the legacy sidecar", "err", err) return err @@ -1663,7 +1663,6 @@ func (p *BlobPool) preCheck(tx *types.Transaction) error { var ( head = p.head.Load() isOsaka = p.chain.Config().IsOsaka(head.Number, head.Time) - isMendel = p.chain.Config().IsMendel(head.Number, head.Time) deadline time.Time ) if isOsaka { @@ -1675,7 +1674,7 @@ func (p *BlobPool) preCheck(tx *types.Transaction) error { return err } // Before the Osaka fork, reject the blob txs with cell proofs - if !isOsaka || isMendel { // always true(assuming MendelTime == OsakaTime) + if !isOsaka || p.chain.Config().IsInBSC() { if tx.BlobTxSidecar().Version == types.BlobSidecarVersion0 { return nil } else { diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 1721dd3269..fea899e786 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -260,7 +260,7 @@ type Config struct { // Clique is allowed for now to live standalone, but ethash is forbidden and can // only exist on already merged networks. func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database, ee *ethapi.BlockChainAPI, genesisHash common.Hash) (consensus.Engine, error) { - if config.Parlia != nil { + if config.IsInBSC() { return parlia.New(config, db, ee, genesisHash), nil } if config.TerminalTotalDifficulty == nil { diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 2b5c355848..ecd5ab7f89 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -2154,7 +2154,7 @@ func (api *TransactionAPI) FillTransaction(ctx context.Context, args Transaction func (api *TransactionAPI) currentBlobSidecarVersion() byte { h := api.b.CurrentHeader() - if api.b.ChainConfig().IsOsaka(h.Number, h.Time) { + if api.b.ChainConfig().IsOsaka(h.Number, h.Time) && api.b.ChainConfig().IsNotInBSC() { return types.BlobSidecarVersion1 } return types.BlobSidecarVersion0 diff --git a/miner/worker.go b/miner/worker.go index ab62c7c7df..da9429fa8f 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -450,7 +450,7 @@ func (w *worker) newWorkLoop(recommit time.Duration) { // If sealing is running resubmit a new work cycle periodically to pull in // higher priced transactions. Disable this overhead for pending blocks. if w.isRunning() && ((w.chainConfig.Clique != nil && - w.chainConfig.Clique.Period > 0) || (w.chainConfig.Parlia != nil)) { + w.chainConfig.Clique.Period > 0) || (w.chainConfig.IsInBSC())) { // Short circuit if no new transaction arrives. commit(commitInterruptResubmit) } @@ -980,7 +980,7 @@ func (w *worker) prepareWork(genParams *generateParams, witness bool) (*environm // Set baseFee and GasLimit if we are on an EIP-1559 chain if w.chainConfig.IsLondon(header.Number) { header.BaseFee = eip1559.CalcBaseFee(w.chainConfig, parent) - if w.chainConfig.Parlia == nil && !w.chainConfig.IsLondon(parent.Number) { + if w.chainConfig.IsNotInBSC() && !w.chainConfig.IsLondon(parent.Number) { parentGasLimit := parent.GasLimit * w.chainConfig.ElasticityMultiplier() header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil) } @@ -999,7 +999,7 @@ func (w *worker) prepareWork(genParams *generateParams, witness bool) (*environm } header.BlobGasUsed = new(uint64) header.ExcessBlobGas = &excessBlobGas - if w.chainConfig.Parlia == nil { + if w.chainConfig.IsNotInBSC() { header.ParentBeaconRoot = genParams.beaconRoot } else { header.WithdrawalsHash = &types.EmptyWithdrawalsHash @@ -1165,7 +1165,7 @@ func (w *worker) generateWork(genParam *generateParams, witness bool) *newPayloa } // Collect consensus-layer requests if Prague is enabled. var requests [][]byte - if w.chainConfig.IsPrague(work.header.Number, work.header.Time) && w.chainConfig.Parlia == nil { + if w.chainConfig.IsPrague(work.header.Number, work.header.Time) && w.chainConfig.IsNotInBSC() { requests = [][]byte{} // EIP-6110 deposits if err := core.ParseDepositLogs(&requests, allLogs, w.chainConfig); err != nil { diff --git a/params/config.go b/params/config.go index 8b99b8bbdd..a952095930 100644 --- a/params/config.go +++ b/params/config.go @@ -799,7 +799,7 @@ func (c *ChainConfig) Description() string { } banner += fmt.Sprintf("Chain ID: %v (%s)\n", c.ChainID, network) switch { - case c.Parlia != nil: + case c.IsInBSC(): banner += "Consensus: Parlia (proof-of-staked--authority)\n" case c.Ethash != nil: banner += "Consensus: Beacon (proof-of-stake), merged from Ethash (proof-of-work)\n" @@ -821,7 +821,7 @@ func (c *ChainConfig) String() string { engine = c.Ethash case c.Clique != nil: engine = c.Clique - case c.Parlia != nil: + case c.IsInBSC(): engine = c.Parlia default: engine = "unknown" @@ -1335,6 +1335,14 @@ func (c *ChainConfig) IsOnPrague(currentBlockNumber *big.Int, lastBlockTime uint return !c.IsPrague(lastBlockNumber, lastBlockTime) && c.IsPrague(currentBlockNumber, currentBlockTime) } +func (c *ChainConfig) IsInBSC() bool { + return c.Parlia != nil +} + +func (c *ChainConfig) IsNotInBSC() bool { + return c.Parlia == nil +} + // IsLorentz returns whether time is either equal to the Lorentz fork time or greater. func (c *ChainConfig) IsLorentz(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.LorentzTime, time) @@ -1488,7 +1496,7 @@ func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, time u // to guarantee that forks can be implemented in a different order than on official networks func (c *ChainConfig) CheckConfigForkOrder() error { // skip checking for non-Parlia egine - if c.Parlia == nil { + if c.IsNotInBSC() { return nil } type fork struct { From b5d8de59213a6e32904f170e39006291f420cacb Mon Sep 17 00:00:00 2001 From: flywukong <19421226+flywukong@users.noreply.github.com> Date: Tue, 6 Jan 2026 15:44:06 +0800 Subject: [PATCH 16/28] fix: fix eip-7910 test case --- .../ethapi/testdata/eth_config-current.json | 11 +++++-- .../testdata/eth_config-next-and-last.json | 32 +++++++++++++++---- 2 files changed, 35 insertions(+), 8 deletions(-) diff --git a/internal/ethapi/testdata/eth_config-current.json b/internal/ethapi/testdata/eth_config-current.json index 0597c23e39..c9b3396394 100644 --- a/internal/ethapi/testdata/eth_config-current.json +++ b/internal/ethapi/testdata/eth_config-current.json @@ -7,7 +7,7 @@ "baseFeeUpdateFraction": 5007716 }, "chainId": "0x88bb0", - "forkId": "0x0929e24e", + "forkId": "0x738a7ec5", "precompiles": { "BLAKE2F": "0x0000000000000000000000000000000000000009", "BLS12_G1ADD": "0x000000000000000000000000000000000000000b", @@ -17,15 +17,22 @@ "BLS12_MAP_FP2_TO_G2": "0x0000000000000000000000000000000000000011", "BLS12_MAP_FP_TO_G1": "0x0000000000000000000000000000000000000010", "BLS12_PAIRING_CHECK": "0x000000000000000000000000000000000000000f", + "BLS_SIGNATURE_VERIFY": "0x0000000000000000000000000000000000000066", "BN254_ADD": "0x0000000000000000000000000000000000000006", "BN254_MUL": "0x0000000000000000000000000000000000000007", "BN254_PAIRING": "0x0000000000000000000000000000000000000008", + "COMET_BFT_LIGHT_BLOCK_VALIDATE_HERTZ": "0x0000000000000000000000000000000000000067", "ECREC": "0x0000000000000000000000000000000000000001", + "HEADER_VALIDATE": "0x0000000000000000000000000000000000000064", + "IAVL_MERKLE_PROOF_VALIDATE_PLATO": "0x0000000000000000000000000000000000000065", "ID": "0x0000000000000000000000000000000000000004", "KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a", "MODEXP": "0x0000000000000000000000000000000000000005", + "P256VERIFY": "0x0000000000000000000000000000000000000100", "RIPEMD160": "0x0000000000000000000000000000000000000003", - "SHA256": "0x0000000000000000000000000000000000000002" + "SECP256K1_SIGNATURE_RECOVER": "0x0000000000000000000000000000000000000069", + "SHA256": "0x0000000000000000000000000000000000000002", + "VERIFY_DOUBLE_SIGN_EVIDENCE": "0x0000000000000000000000000000000000000068" }, "systemContracts": { "BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02", diff --git a/internal/ethapi/testdata/eth_config-next-and-last.json b/internal/ethapi/testdata/eth_config-next-and-last.json index 81869ba174..5b2247f296 100644 --- a/internal/ethapi/testdata/eth_config-next-and-last.json +++ b/internal/ethapi/testdata/eth_config-next-and-last.json @@ -7,18 +7,24 @@ "target": 3 }, "chainId": "0x88bb0", - "forkId": "0xbef71d30", + "forkId": "0xcd36a67e", "precompiles": { "BLAKE2F": "0x0000000000000000000000000000000000000009", + "BLS_SIGNATURE_VERIFY": "0x0000000000000000000000000000000000000066", "BN254_ADD": "0x0000000000000000000000000000000000000006", "BN254_MUL": "0x0000000000000000000000000000000000000007", "BN254_PAIRING": "0x0000000000000000000000000000000000000008", + "COMET_BFT_LIGHT_BLOCK_VALIDATE_HERTZ": "0x0000000000000000000000000000000000000067", "ECREC": "0x0000000000000000000000000000000000000001", + "HEADER_VALIDATE": "0x0000000000000000000000000000000000000064", + "IAVL_MERKLE_PROOF_VALIDATE_PLATO": "0x0000000000000000000000000000000000000065", "ID": "0x0000000000000000000000000000000000000004", "KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a", "MODEXP": "0x0000000000000000000000000000000000000005", "RIPEMD160": "0x0000000000000000000000000000000000000003", - "SHA256": "0x0000000000000000000000000000000000000002" + "SECP256K1_SIGNATURE_RECOVER": "0x0000000000000000000000000000000000000069", + "SHA256": "0x0000000000000000000000000000000000000002", + "VERIFY_DOUBLE_SIGN_EVIDENCE": "0x0000000000000000000000000000000000000068" }, "systemContracts": { "BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02" @@ -32,7 +38,7 @@ "target": 6 }, "chainId": "0x88bb0", - "forkId": "0x0929e24e", + "forkId": "0x738a7ec5", "precompiles": { "BLAKE2F": "0x0000000000000000000000000000000000000009", "BLS12_G1ADD": "0x000000000000000000000000000000000000000b", @@ -42,15 +48,22 @@ "BLS12_MAP_FP2_TO_G2": "0x0000000000000000000000000000000000000011", "BLS12_MAP_FP_TO_G1": "0x0000000000000000000000000000000000000010", "BLS12_PAIRING_CHECK": "0x000000000000000000000000000000000000000f", + "BLS_SIGNATURE_VERIFY": "0x0000000000000000000000000000000000000066", "BN254_ADD": "0x0000000000000000000000000000000000000006", "BN254_MUL": "0x0000000000000000000000000000000000000007", "BN254_PAIRING": "0x0000000000000000000000000000000000000008", + "COMET_BFT_LIGHT_BLOCK_VALIDATE_HERTZ": "0x0000000000000000000000000000000000000067", "ECREC": "0x0000000000000000000000000000000000000001", + "HEADER_VALIDATE": "0x0000000000000000000000000000000000000064", + "IAVL_MERKLE_PROOF_VALIDATE_PLATO": "0x0000000000000000000000000000000000000065", "ID": "0x0000000000000000000000000000000000000004", "KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a", "MODEXP": "0x0000000000000000000000000000000000000005", + "P256VERIFY": "0x0000000000000000000000000000000000000100", "RIPEMD160": "0x0000000000000000000000000000000000000003", - "SHA256": "0x0000000000000000000000000000000000000002" + "SECP256K1_SIGNATURE_RECOVER": "0x0000000000000000000000000000000000000069", + "SHA256": "0x0000000000000000000000000000000000000002", + "VERIFY_DOUBLE_SIGN_EVIDENCE": "0x0000000000000000000000000000000000000068" }, "systemContracts": { "BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02", @@ -68,7 +81,7 @@ "target": 6 }, "chainId": "0x88bb0", - "forkId": "0x0929e24e", + "forkId": "0x738a7ec5", "precompiles": { "BLAKE2F": "0x0000000000000000000000000000000000000009", "BLS12_G1ADD": "0x000000000000000000000000000000000000000b", @@ -78,15 +91,22 @@ "BLS12_MAP_FP2_TO_G2": "0x0000000000000000000000000000000000000011", "BLS12_MAP_FP_TO_G1": "0x0000000000000000000000000000000000000010", "BLS12_PAIRING_CHECK": "0x000000000000000000000000000000000000000f", + "BLS_SIGNATURE_VERIFY": "0x0000000000000000000000000000000000000066", "BN254_ADD": "0x0000000000000000000000000000000000000006", "BN254_MUL": "0x0000000000000000000000000000000000000007", "BN254_PAIRING": "0x0000000000000000000000000000000000000008", + "COMET_BFT_LIGHT_BLOCK_VALIDATE_HERTZ": "0x0000000000000000000000000000000000000067", "ECREC": "0x0000000000000000000000000000000000000001", + "HEADER_VALIDATE": "0x0000000000000000000000000000000000000064", + "IAVL_MERKLE_PROOF_VALIDATE_PLATO": "0x0000000000000000000000000000000000000065", "ID": "0x0000000000000000000000000000000000000004", "KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a", "MODEXP": "0x0000000000000000000000000000000000000005", + "P256VERIFY": "0x0000000000000000000000000000000000000100", "RIPEMD160": "0x0000000000000000000000000000000000000003", - "SHA256": "0x0000000000000000000000000000000000000002" + "SECP256K1_SIGNATURE_RECOVER": "0x0000000000000000000000000000000000000069", + "SHA256": "0x0000000000000000000000000000000000000002", + "VERIFY_DOUBLE_SIGN_EVIDENCE": "0x0000000000000000000000000000000000000068" }, "systemContracts": { "BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02", From 6d79b77d1d37640f87580ffaa267930eee8738a0 Mon Sep 17 00:00:00 2001 From: flywukong <19421226+flywukong@users.noreply.github.com> Date: Tue, 6 Jan 2026 16:39:02 +0800 Subject: [PATCH 17/28] fix: fix api --- .../testdata/eth_getBlockReceipts-tag-pending.json | 12 +++++++----- .../testdata/eth_getHeaderByNumber-tag-pending.json | 2 +- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-tag-pending.json b/internal/ethapi/testdata/eth_getBlockReceipts-tag-pending.json index 75f9f3ad99..ea2f92e024 100644 --- a/internal/ethapi/testdata/eth_getBlockReceipts-tag-pending.json +++ b/internal/ethapi/testdata/eth_getBlockReceipts-tag-pending.json @@ -1,18 +1,20 @@ [ { - "blockHash": "0xc74cf882395ec92eec3673d93a57f9a3bf1a5e696fae3e52f252059af62756c8", + "blobGasPrice": "0x1", + "blobGasUsed": "0x20000", + "blockHash": "0xfd2966f1ceea73371953979b55ec9f2b4285f9284eb6fb81393bf2d09a6ec665", "blockNumber": "0x7", "contractAddress": null, "cumulativeGasUsed": "0x5208", - "effectiveGasPrice": "0x17b07ddf", + "effectiveGasPrice": "0x17b07de0", "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", "gasUsed": "0x5208", "logs": [], "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "status": "0x1", "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e", - "transactionHash": "0xa7eeffe8111539a8f9725eb4d49e341efa1287d33190300adab220929daa5fac", + "transactionHash": "0xe5fe7f5e376f9d9d7cbf727049bad0bdf0a066f169a7269262129e6f0b650b5a", "transactionIndex": "0x0", - "type": "0x0" + "type": "0x3" } -] \ No newline at end of file +] diff --git a/internal/ethapi/testdata/eth_getHeaderByNumber-tag-pending.json b/internal/ethapi/testdata/eth_getHeaderByNumber-tag-pending.json index 86023bddf8..19e724f546 100644 --- a/internal/ethapi/testdata/eth_getHeaderByNumber-tag-pending.json +++ b/internal/ethapi/testdata/eth_getHeaderByNumber-tag-pending.json @@ -16,5 +16,5 @@ "stateRoot": "0xce0e05397e548614a5b93254662174329466f8f4b1b391eb36fec9a7a591e58e", "timestamp": "0x6e", "transactionsRoot": "0x59abb8ec0655f66e66450d1502618bc64022ae2d2950fa471eec6e8da2846264", - "totalDifficulty": null, + "totalDifficulty": null } \ No newline at end of file From 5d1a60a5cd7df188d20ecb4de1ac3171c7bc048b Mon Sep 17 00:00:00 2001 From: qybdyx Date: Tue, 6 Jan 2026 17:54:54 +0800 Subject: [PATCH 18/28] legacypool_test.go: fix TestDualHeapEviction --- core/txpool/legacypool/legacypool_test.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index 796419c4b2..ddae5d441b 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -1883,8 +1883,12 @@ func TestDualHeapEviction(t *testing.T) { pool, _ := setupPoolWithConfig(eip1559Config) defer pool.Close() - pool.config.GlobalSlots = 2 - pool.config.GlobalQueue = 2 + // Use larger pool size to ensure floating heap has transactions. + // With pool size = 4, floatingCount = 4 * 1 / 5 = 0 (integer division), + // so no high fee cap transactions would be protected. + // With pool size = 20, floatingCount = 20 * 1 / 5 = 4, which is sufficient. + pool.config.GlobalSlots = 10 + pool.config.GlobalQueue = 10 pool.config.OverflowPoolSlots = 1 var ( @@ -1901,7 +1905,7 @@ func TestDualHeapEviction(t *testing.T) { } add := func(urgent bool) { - for i := 0; i < 4; i++ { + for i := 0; i < 20; i++ { var tx *types.Transaction // Create a test accounts and fund it key, _ := crypto.GenerateKey() @@ -1922,8 +1926,8 @@ func TestDualHeapEviction(t *testing.T) { pool.addRemotesSync([]*types.Transaction{tx}) } pending, queued := pool.Stats() - if pending+queued != 4 { - t.Fatalf("transaction count mismatch: have %d, want %d, pending %d, queued %d, OverflowPool %d", pending+queued, 5, pending, queued, pool.localBufferPool.Size()) + if pending+queued != 20 { + t.Fatalf("transaction count mismatch: have %d, want %d", pending+queued, 20) } } From 4c01a16e2fcf2950c3a0062f6a43f0ae908a6268 Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Wed, 7 Jan 2026 09:32:51 +0800 Subject: [PATCH 19/28] eth/ethconfig: delete field JournalFileEnabled --- cmd/utils/flags_legacy.go | 4 ++-- eth/ethconfig/config.go | 5 ++--- eth/ethconfig/gen_config.go | 18 ++++++------------ 3 files changed, 10 insertions(+), 17 deletions(-) diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go index 2d15fcfa53..a3a50e95b3 100644 --- a/cmd/utils/flags_legacy.go +++ b/cmd/utils/flags_legacy.go @@ -114,7 +114,7 @@ var ( } MetricsEnabledExpensiveFlag = &cli.BoolFlag{ Name: "metrics.expensive", - Hidden: false, // TODO(Nathan): turn it into true in version v1.8.x + Hidden: true, Usage: "Enable expensive metrics collection and reporting (deprecated)", Category: flags.DeprecatedCategory, } @@ -134,7 +134,7 @@ var ( } JournalFileFlag = &cli.BoolFlag{ Name: "journalfile", - Hidden: false, // TODO(Nathan): turn it into true in version v1.8.x + Hidden: true, Usage: "Enable using journal file to store the TrieJournal instead of KVDB in pbss (default = true, deprecated)", Value: true, Category: flags.DeprecatedCategory, diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index fea899e786..499511311f 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -132,9 +132,8 @@ type Config struct { // State scheme represents the scheme used to store ethereum states and trie // nodes on top. It can be 'hash', 'path', or none which means use the scheme // consistent with persistent state. - StateScheme string `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top - PathSyncFlush bool `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top - JournalFileEnabled bool // TODO(Nathan): deprecated, will delete together with JournalFileFlag in v1.8.x + StateScheme string `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top + PathSyncFlush bool `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top DisableTxIndexer bool `toml:",omitempty"` // Whether to enable the transaction indexer diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index b4a629749f..57d0457aa7 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -40,10 +40,9 @@ func (c Config) MarshalTOML() (interface{}, error) { LogHistory uint64 `toml:",omitempty"` LogNoHistory bool `toml:",omitempty"` LogExportCheckpoints string - StateHistory uint64 `toml:",omitempty"` - StateScheme string `toml:",omitempty"` - PathSyncFlush bool `toml:",omitempty"` - JournalFileEnabled bool + StateHistory uint64 `toml:",omitempty"` + StateScheme string `toml:",omitempty"` + PathSyncFlush bool `toml:",omitempty"` DisableTxIndexer bool `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` SkipBcVersionCheck bool `toml:"-"` @@ -121,7 +120,6 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.StateHistory = c.StateHistory enc.StateScheme = c.StateScheme enc.PathSyncFlush = c.PathSyncFlush - enc.JournalFileEnabled = c.JournalFileEnabled enc.DisableTxIndexer = c.DisableTxIndexer enc.RequiredBlocks = c.RequiredBlocks enc.SkipBcVersionCheck = c.SkipBcVersionCheck @@ -200,10 +198,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { LogHistory *uint64 `toml:",omitempty"` LogNoHistory *bool `toml:",omitempty"` LogExportCheckpoints *string - StateHistory *uint64 `toml:",omitempty"` - StateScheme *string `toml:",omitempty"` - PathSyncFlush *bool `toml:",omitempty"` - JournalFileEnabled *bool + StateHistory *uint64 `toml:",omitempty"` + StateScheme *string `toml:",omitempty"` + PathSyncFlush *bool `toml:",omitempty"` DisableTxIndexer *bool `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` SkipBcVersionCheck *bool `toml:"-"` @@ -334,9 +331,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.PathSyncFlush != nil { c.PathSyncFlush = *dec.PathSyncFlush } - if dec.JournalFileEnabled != nil { - c.JournalFileEnabled = *dec.JournalFileEnabled - } if dec.DisableTxIndexer != nil { c.DisableTxIndexer = *dec.DisableTxIndexer } From 5e13c69ba51a205eaf7b6feb3ac2073258b1eee6 Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Wed, 7 Jan 2026 09:50:29 +0800 Subject: [PATCH 20/28] core/vm: revert comments --- core/vm/instructions.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 3d818bdca3..b8139b6d75 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -737,7 +737,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { stack := scope.Stack - // Pop gas. The actual gas in evm.callGasTemp. + // Pop gas. The actual gas in interpreter.evm.callGasTemp. // We can use this as a temporary value temp := stack.pop() gas := interpreter.evm.callGasTemp @@ -772,7 +772,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt } func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - // Pop gas. The actual gas is in evm.callGasTemp. + // Pop gas. The actual gas is in interpreter.evm.callGasTemp. stack := scope.Stack // We use it as a temporary value temp := stack.pop() @@ -806,7 +806,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { stack := scope.Stack - // Pop gas. The actual gas is in evm.callGasTemp. + // Pop gas. The actual gas is in interpreter.evm.callGasTemp. // We use it as a temporary value temp := stack.pop() gas := interpreter.evm.callGasTemp @@ -834,7 +834,7 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext } func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - // Pop gas. The actual gas is in evm.callGasTemp. + // Pop gas. The actual gas is in interpreter.evm.callGasTemp. stack := scope.Stack // We use it as a temporary value temp := stack.pop() From d1aba50407fdebe322cbbf03951a4c09777d917d Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Wed, 7 Jan 2026 10:04:45 +0800 Subject: [PATCH 21/28] params: fix Rules for ChainConfig --- params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/config.go b/params/config.go index a952095930..9d197968b8 100644 --- a/params/config.go +++ b/params/config.go @@ -2071,7 +2071,7 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules chainID = new(big.Int) } // disallow setting Merge out of order - isMerge = isMerge && c.IsLondon(num) + isMerge = isMerge && c.IsLondon(num) || c.IsInBSC() isVerkle := isMerge && c.IsVerkle(num, timestamp) return Rules{ ChainID: new(big.Int).Set(chainID), From 0e5f7aaae9b5bab9f5a9c948be66aa2b3d0f493c Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Wed, 7 Jan 2026 15:12:21 +0800 Subject: [PATCH 22/28] params: fix Rules for ChainConfig --- params/config.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/params/config.go b/params/config.go index 9d197968b8..ee3414f893 100644 --- a/params/config.go +++ b/params/config.go @@ -2071,7 +2071,7 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules chainID = new(big.Int) } // disallow setting Merge out of order - isMerge = isMerge && c.IsLondon(num) || c.IsInBSC() + isMerge = isMerge && c.IsLondon(num) // always false in BSC isVerkle := isMerge && c.IsVerkle(num, timestamp) return Rules{ ChainID: new(big.Int).Set(chainID), @@ -2094,20 +2094,20 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules IsPlato: c.IsPlato(num), IsHertz: c.IsHertz(num), IsHertzfix: c.IsHertzfix(num), - IsShanghai: isMerge && c.IsShanghai(num, timestamp), + IsShanghai: (isMerge || c.IsInBSC()) && c.IsShanghai(num, timestamp), IsKepler: c.IsKepler(num, timestamp), IsFeynman: c.IsFeynman(num, timestamp), - IsCancun: isMerge && c.IsCancun(num, timestamp), + IsCancun: (isMerge || c.IsInBSC()) && c.IsCancun(num, timestamp), IsHaber: c.IsHaber(num, timestamp), IsBohr: c.IsBohr(num, timestamp), IsPascal: c.IsPascal(num, timestamp), - IsPrague: isMerge && c.IsPrague(num, timestamp), + IsPrague: (isMerge || c.IsInBSC()) && c.IsPrague(num, timestamp), IsLorentz: c.IsLorentz(num, timestamp), IsMaxwell: c.IsMaxwell(num, timestamp), IsFermi: c.IsFermi(num, timestamp), - IsOsaka: isMerge && c.IsOsaka(num, timestamp), + IsOsaka: (isMerge || c.IsInBSC()) && c.IsOsaka(num, timestamp), IsMendel: c.IsMendel(num, timestamp), - IsAmsterdam: isMerge && c.IsAmsterdam(num, timestamp), + IsAmsterdam: (isMerge || c.IsInBSC()) && c.IsAmsterdam(num, timestamp), IsVerkle: c.IsVerkle(num, timestamp), IsEIP4762: isVerkle, } From 2fbbb4fc647f80606df6767830dfda8ea1c2e417 Mon Sep 17 00:00:00 2001 From: flywukong <19421226+flywukong@users.noreply.github.com> Date: Wed, 7 Jan 2026 15:12:32 +0800 Subject: [PATCH 23/28] fix: fix transaction rollback --- ethclient/simulated/rollback_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ethclient/simulated/rollback_test.go b/ethclient/simulated/rollback_test.go index 093467d291..a06774fe57 100644 --- a/ethclient/simulated/rollback_test.go +++ b/ethclient/simulated/rollback_test.go @@ -24,6 +24,8 @@ import ( "time" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/node" ) // TestTransactionRollbackBehavior tests that calling Rollback on the simulated backend doesn't prevent subsequent @@ -34,6 +36,11 @@ func TestTransactionRollbackBehavior(t *testing.T) { testAddr: {Balance: big.NewInt(10000000000000000)}, testAddr2: {Balance: big.NewInt(10000000000000000)}, }, + // Disable Osaka to prevent blob v0→v1 conversion in blobpool. + // BSC does not support blob sidecar v1. + func(nodeConf *node.Config, ethConf *ethconfig.Config) { + ethConf.Genesis.Config.OsakaTime = nil + }, ) defer sim.Close() client := sim.Client() From 21eee8c85b8a8f8c2eeb3ab36eb382f731909f86 Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Wed, 7 Jan 2026 16:41:45 +0800 Subject: [PATCH 24/28] internal/ethapi: fix eth_config --- params/config.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/params/config.go b/params/config.go index ee3414f893..f5e22d5c20 100644 --- a/params/config.go +++ b/params/config.go @@ -1847,7 +1847,7 @@ func (c *ChainConfig) BlobConfig(fork forks.Fork) *BlobConfig { return c.BlobScheduleConfig.BPO1 case forks.Osaka: return c.BlobScheduleConfig.Osaka - case forks.Prague: + case forks.Fermi, forks.Maxwell, forks.Lorentz, forks.Prague: return c.BlobScheduleConfig.Prague case forks.Cancun: return c.BlobScheduleConfig.Cancun @@ -1864,6 +1864,12 @@ func (c *ChainConfig) ActiveSystemContracts(time uint64) map[string]common.Addre if fork >= forks.Osaka { // no new system contracts } + if c.IsInBSC() { + if fork >= forks.Prague { + active["HISTORY_STORAGE_ADDRESS"] = HistoryStorageAddress + } + return active + } if fork >= forks.Prague { active["CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS"] = ConsolidationQueueAddress active["DEPOSIT_CONTRACT_ADDRESS"] = c.DepositContractAddress From 970aaf902a6b27fff44b44890bc4003b05a5526e Mon Sep 17 00:00:00 2001 From: qybdyx Date: Wed, 7 Jan 2026 15:01:47 +0800 Subject: [PATCH 25/28] core: fix core ut --- core/rawdb/chain_freezer.go | 6 +++--- core/vm/contracts_test.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go index 699e879ef3..b2e65db2a5 100644 --- a/core/rawdb/chain_freezer.go +++ b/core/rawdb/chain_freezer.go @@ -245,7 +245,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore, continueFreeze bool) { last uint64 // the last block to freeze hash common.Hash - number *uint64 + number uint64 head *types.Header err error ) @@ -442,9 +442,9 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore, continueFreeze bool) { env, _ := f.freezeEnv.Load().(*ethdb.FreezerEnv) // try prune blob data after cancun fork if isCancun(env, head.Number, head.Time) { - f.tryPruneBlobAncientTable(env, *number) + f.tryPruneBlobAncientTable(env, number) } - f.tryPruneHistoryBlock(*number) + f.tryPruneHistoryBlock(number) // TODO(galaio): Temporarily comment that the current BSC is suitable for small-volume writes, // and then the large-volume mode will be enabled after optimizing the freeze performance of ancient. diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go index 5004ba9948..c27464ea77 100644 --- a/core/vm/contracts_test.go +++ b/core/vm/contracts_test.go @@ -59,7 +59,7 @@ var allPrecompiles = map[common.Address]PrecompiledContract{ common.BytesToAddress([]byte{9}): &blake2F{}, common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{}, - common.BytesToAddress([]byte{0x01, 0x00}): &p256Verify{}, + common.BytesToAddress([]byte{0x01, 0x00}): &p256Verify{eip7951: true}, common.BytesToAddress([]byte{0x0f, 0x0a}): &bls12381G1Add{}, common.BytesToAddress([]byte{0x0f, 0x0b}): &bls12381G1MultiExp{}, common.BytesToAddress([]byte{0x0f, 0x0c}): &bls12381G2Add{}, From 6f976ffb399c97714797fb8e8cf8b3d0248d3d46 Mon Sep 17 00:00:00 2001 From: qybdyx Date: Wed, 7 Jan 2026 18:04:59 +0800 Subject: [PATCH 26/28] core: fix TestWaitDeployedCornerCases --- accounts/abi/bind/v2/util_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/accounts/abi/bind/v2/util_test.go b/accounts/abi/bind/v2/util_test.go index 5beb0a4fae..b2514140b2 100644 --- a/accounts/abi/bind/v2/util_test.go +++ b/accounts/abi/bind/v2/util_test.go @@ -107,7 +107,7 @@ func TestWaitDeployedCornerCases(t *testing.T) { }, ) head, _ = backend.Client().HeaderByNumber(t.Context(), nil) // Should be child's, good enough - gasPrice = new(big.Int).Add(head.BaseFee, big.NewInt(1)) + gasPrice = new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei)) signer = types.LatestSigner(params.AllDevChainProtocolChanges) code = common.FromHex("6060604052600a8060106000396000f360606040526008565b00") ctx, cancel = context.WithCancel(t.Context()) From 2d5ecd97f671bb8fd157b7496ccda612ed447713 Mon Sep 17 00:00:00 2001 From: qybdyx Date: Thu, 8 Jan 2026 10:45:07 +0800 Subject: [PATCH 27/28] fix suit_test: only set HistoryStorageAddress nonce on BSC chains during Prague activation --- core/systemcontracts/upgrade.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/systemcontracts/upgrade.go b/core/systemcontracts/upgrade.go index 0751897c27..0ef6eef11c 100644 --- a/core/systemcontracts/upgrade.go +++ b/core/systemcontracts/upgrade.go @@ -1063,7 +1063,7 @@ func TryUpdateBuildInSystemContract(config *params.ChainConfig, blockNumber *big upgradeBuildInSystemContract(config, blockNumber, lastBlockTime, blockTime, statedb) } // HistoryStorageAddress is a special system contract in bsc, which can't be upgraded - if config.IsOnPrague(blockNumber, lastBlockTime, blockTime) { + if config.IsInBSC() && config.IsOnPrague(blockNumber, lastBlockTime, blockTime) { statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode, tracing.CodeChangeSystemContractUpgrade) statedb.SetNonce(params.HistoryStorageAddress, 1, tracing.NonceChangeNewContract) log.Info("Set code for HistoryStorageAddress", "blockNumber", blockNumber.Int64(), "blockTime", blockTime) From 1c5f12619f1f0b1657c2842246b030ecd408ed67 Mon Sep 17 00:00:00 2001 From: allformless <213398294+allformless@users.noreply.github.com> Date: Thu, 8 Jan 2026 10:58:43 +0800 Subject: [PATCH 28/28] eth/syncer: disable debug_sync for now --- eth/syncer/syncer.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/eth/syncer/syncer.go b/eth/syncer/syncer.go index 6b33ec54ba..fc7f6f2f27 100644 --- a/eth/syncer/syncer.go +++ b/eth/syncer/syncer.go @@ -194,5 +194,7 @@ func NewAPI(s *Syncer) *API { // Sync initiates a full sync to the target block hash. func (api *API) Sync(target common.Hash) error { - return api.s.Sync(target) + // TODO(Nathan): Re-enable this functionality once supported. + return errors.New("syncing to a specific target is currently not supported by BSC") + // return api.s.Sync(target) }