diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 8ada24829cf4d..0dcb38358f687 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -81,6 +81,9 @@ var (
utils.TxPoolAccountQueueFlag,
utils.TxPoolGlobalQueueFlag,
utils.TxPoolLifetimeFlag,
+ utils.BlobPoolDataDirFlag,
+ utils.BlobPoolDataCapFlag,
+ utils.BlobPoolPriceBumpFlag,
utils.SyncModeFlag,
utils.SyncTargetFlag,
utils.ExitWhenSyncedFlag,
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 8961e350ae5a8..e0c7a42670e15 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -368,6 +368,25 @@ var (
Value: ethconfig.Defaults.TxPool.Lifetime,
Category: flags.TxPoolCategory,
}
+ // Blob transaction pool settings
+ BlobPoolDataDirFlag = &cli.StringFlag{
+ Name: "blobpool.datadir",
+ Usage: "Data directory to store blob transactions in",
+ Value: ethconfig.Defaults.BlobPool.Datadir,
+ Category: flags.BlobPoolCategory,
+ }
+ BlobPoolDataCapFlag = &cli.Uint64Flag{
+ Name: "blobpool.datacap",
+ Usage: "Disk space to allocate for pending blob transactions (soft limit)",
+ Value: ethconfig.Defaults.BlobPool.Datacap,
+ Category: flags.BlobPoolCategory,
+ }
+ BlobPoolPriceBumpFlag = &cli.Uint64Flag{
+ Name: "blobpool.pricebump",
+ Usage: "Price bump percentage to replace an already existing blob transaction",
+ Value: ethconfig.Defaults.BlobPool.PriceBump,
+ Category: flags.BlobPoolCategory,
+ }
// Performance tuning settings
CacheFlag = &cli.IntFlag{
Name: "cache",
diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go
new file mode 100644
index 0000000000000..baf5e54667e36
--- /dev/null
+++ b/core/txpool/blobpool/blobpool.go
@@ -0,0 +1,1533 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package blobpool implements the EIP-4844 blob transaction pool.
+package blobpool
+
+import (
+ "container/heap"
+ "fmt"
+ "math"
+ "math/big"
+ "os"
+ "path/filepath"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/misc"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/txpool"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/holiman/billy"
+ "github.com/holiman/uint256"
+)
+
+const (
+ // blobSize is the protocol constrained byte size of a single blob in a
+ // transaction. There can be multiple of these embedded into a single tx.
+ blobSize = params.BlobTxFieldElementsPerBlob * params.BlobTxBytesPerFieldElement
+
+ // maxBlobsPerTransaction is the maximum number of blobs a single transaction
+ // is allowed to contain. Whilst the spec states it's unlimited, the block
+ // data slots are protocol bound, which implicitly also limit this.
+ maxBlobsPerTransaction = params.BlobTxMaxDataGasPerBlock / params.BlobTxDataGasPerBlob
+
+ // txAvgSize is an approximate byte size of a transaction metadata to avoid
+ // tiny overflows causing all txs to move a shelf higher, wasting disk space.
+ txAvgSize = 4 * 1024
+
+ // txMaxSize is the maximum size a single transaction can have, outside
+ // the included blobs. Since blob transactions are pulled instead of pushed,
+ // and only a small metadata is kept in ram, the rest is on disk, there is
+ // no critical limit that should be enforced. Still, capping it to some sane
+ // limit can never hurt.
+ txMaxSize = 1024 * 1024
+
+ // maxTxsPerAccount is the maximum number of blob transactions admitted from
+ // a single account. The limit is enforced to minimize the DoS potential of
+ // a private tx cancelling publicly propagated blobs.
+ //
+ // Note, transactions resurrected by a reorg are also subject to this limit,
+ // so pushing it down too aggressively might make resurrections non-functional.
+ maxTxsPerAccount = 16
+
+ // pendingTransactionStore is the subfolder containing the currently queued
+ // blob transactions.
+ pendingTransactionStore = "queue"
+
+ // limboedTransactionStore is the subfolder containing the currently included
+ // but not yet finalized transaction blobs.
+ limboedTransactionStore = "limbo"
+)
+
+// blobTx is a wrapper around types.BlobTx which also contains the literal blob
+// data along with all the transaction metadata.
+type blobTx struct {
+ Tx *types.Transaction
+
+ Blobs []kzg4844.Blob
+ Commits []kzg4844.Commitment
+ Proofs []kzg4844.Proof
+}
+
+// blobTxMeta is the minimal subset of types.BlobTx necessary to validate and
+// schedule the blob transactions into the following blocks. Only ever add the
+// bare minimum needed fields to keep the size down (and thus number of entries
+// larger with the same memory consumption).
+type blobTxMeta struct {
+ hash common.Hash // Transaction hash to maintain the lookup table
+ id uint64 // Storage ID in the pool's persistent store
+ size uint32 // Byte size in the pool's persistent store
+
+ nonce uint64 // Needed to prioritize inclusion order within an account
+ costCap *uint256.Int // Needed to validate cumulative balance sufficiency
+ execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump
+ execFeeCap *uint256.Int // Needed to validate replacement price bump
+ blobFeeCap *uint256.Int // Needed to validate replacement price bump
+
+ basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap
+ blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap
+
+ evictionExecTip *uint256.Int // Worst gas tip across all previous nonces
+ evictionExecFeeJumps float64 // Worst base fee (converted to fee jumps) across all previous nonces
+ evictionBlobFeeJumps float64 // Worse blob fee (converted to fee jumps) across all previous nonces
+}
+
+// newBlobTxMeta retrieves the indexed metadata fields from a blob transaction
+// and assembles a helper struct to track in memory.
+func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta {
+ meta := &blobTxMeta{
+ hash: tx.Hash(),
+ id: id,
+ size: size,
+ nonce: tx.Nonce(),
+ costCap: uint256.MustFromBig(tx.Cost()),
+ execTipCap: uint256.MustFromBig(tx.GasTipCap()),
+ execFeeCap: uint256.MustFromBig(tx.GasFeeCap()),
+ blobFeeCap: uint256.MustFromBig(tx.BlobGasFeeCap()),
+ }
+ meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap)
+ meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap)
+
+ return meta
+}
+
+// BlobPool is the transaction pool dedicated to EIP-4844 blob transactions.
+//
+// Blob transactions are special snowflakes that are designed for a very specific
+// purpose (rollups) and are expected to adhere to that specific use case. These
+// behavioural expectations allow us to design a transaction pool that is more robust
+// (i.e. resending issues) and more resilient to DoS attacks (e.g. replace-flush
+// attacks) than the generic tx pool. These improvements will also mean, however,
+// that we enforce a significantly more aggressive strategy on entering and exiting
+// the pool:
+//
+// - Blob transactions are large. With the initial design aiming for 128KB blobs,
+// we must ensure that these only traverse the network the absolute minimum
+// number of times. Broadcasting to sqrt(peers) is out of the question, rather
+// these should only ever be announced and the remote side should request it if
+// it wants to.
+//
+// - Block blob-space is limited. With blocks being capped to a few blob txs, we
+// can make use of the very low expected churn rate within the pool. Notably,
+// we should be able to use a persistent disk backend for the pool, solving
+// the tx resend issue that plagues the generic tx pool, as long as there's no
+// artificial churn (i.e. pool wars).
+//
+// - Purpose of blobs are layer-2s. Layer-2s are meant to use blob transactions to
+// commit to their own current state, which is independent of Ethereum mainnet
+// (state, txs). This means that there's no reason for blob tx cancellation or
+// replacement, apart from a potential basefee / miner tip adjustment.
+//
+// - Replacements are expensive. Given their size, propagating a replacement
+// blob transaction to an existing one should be aggressively discouraged.
+// Whilst generic transactions can start at 1 Wei gas cost and require a 10%
+// fee bump to replace, we suggest requiring a higher min cost (e.g. 1 gwei)
+// and a more aggressive bump (100%).
+//
+// - Cancellation is prohibitive. Evicting an already propagated blob tx is a huge
+// DoS vector. As such, a) replacement (higher-fee) blob txs mustn't invalidate
+// already propagated (future) blob txs (cumulative fee); b) nonce-gapped blob
+// txs are disallowed; c) the presence of blob transactions exclude non-blob
+// transactions.
+//
+// - Malicious cancellations are possible. Although the pool might prevent txs
+// that cancel blobs, blocks might contain such transaction (malicious miner
+// or flashbotter). The pool should cap the total number of blob transactions
+// per account as to prevent propagating too much data before cancelling it
+// via a normal transaction. It should nonetheless be high enough to support
+// resurrecting reorged transactions. Perhaps 4-16.
+//
+// - Local txs are meaningless. Mining pools historically used local transactions
+// for payouts or for backdoor deals. With 1559 in place, the basefee usually
+// dominates the final price, so 0 or non-0 tip doesn't change much. Blob txs
+// retain the 1559 2D gas pricing (and introduce on top a dynamic data gas fee),
+// so locality is moot. With a disk backed blob pool avoiding the resend issue,
+// there's also no need to save own transactions for later.
+//
+// - No-blob blob-txs are bad. Theoretically there's no strong reason to disallow
+// blob txs containing 0 blobs. In practice, admitting such txs into the pool
+// breaks the low-churn invariant as blob constraints don't apply anymore. Even
+// though we could accept blocks containing such txs, a reorg would require moving
+// them back into the blob pool, which can break invariants.
+//
+// - Dropping blobs needs delay. When normal transactions are included, they
+// are immediately evicted from the pool since they are contained in the
+// including block. Blobs however are not included in the execution chain,
+// so a mini reorg cannot re-pool "lost" blob transactions. To support reorgs,
+// blobs are retained on disk until they are finalised.
+//
+// - Blobs can arrive via flashbots. Blocks might contain blob transactions we
+// have never seen on the network. Since we cannot recover them from blocks
+// either, the engine_newPayload needs to give them to us, and we cache them
+// until finality to support reorgs without tx losses.
+//
+// Whilst some constraints above might sound overly aggressive, the general idea is
+// that the blob pool should work robustly for its intended use case and whilst
+// anyone is free to use blob transactions for arbitrary non-rollup use cases,
+// they should not be allowed to run amok the network.
+//
+// Implementation wise there are a few interesting design choices:
+//
+// - Adding a transaction to the pool blocks until persisted to disk. This is
+// viable because TPS is low (2-4 blobs per block initially, maybe 8-16 at
+// peak), so natural churn is a couple MB per block. Replacements doing O(n)
+// updates are forbidden and transaction propagation is pull based (i.e. no
+// pileup of pending data).
+//
+// - When transactions are chosen for inclusion, the primary criteria is the
+// signer tip (and having a basefee/data fee high enough of course). However,
+// same-tip transactions will be split by their basefee/datafee, preferring
+// those that are closer to the current network limits. The idea being that
+// very relaxed ones can be included even if the fees go up, when the closer
+// ones could already be invalid.
+//
+// When the pool eventually reaches saturation, some old transactions - that may
+// never execute - will need to be evicted in favor of newer ones. The eviction
+// strategy is quite complex:
+//
+// - Exceeding capacity evicts the highest-nonce of the account with the lowest
+// paying blob transaction anywhere in the pooled nonce-sequence, as that tx
+// would be executed the furthest in the future and is thus blocking anything
+// after it. The smallest is deliberately not evicted to avoid a nonce-gap.
+//
+// - Analogously, if the pool is full, the consideration price of a new tx for
+// evicting an old one is the smallest price in the entire nonce-sequence of
+// the account. This avoids malicious users DoSing the pool with seemingly
+// high paying transactions hidden behind a low-paying blocked one.
+//
+// - Since blob transactions have 3 price parameters: execution tip, execution
+// fee cap and data fee cap, there's no singular parameter to create a total
+// price ordering on. What's more, since the base fee and blob fee can move
+// independently of one another, there's no pre-defined way to combine them
+// into a stable order either. This leads to a multi-dimensional problem to
+// solve after every block.
+//
+// - The first observation is that comparing 1559 base fees or 4844 blob fees
+// needs to happen in the context of their dynamism. Since these fees jump
+// up or down in ~1.125 multipliers (at max) across blocks, comparing fees
+// in two transactions should be based on log1.125(fee) to eliminate noise.
+//
+// - The second observation is that the basefee and blobfee move independently,
+// so there's no way to split mixed txs on their own (A has higher base fee,
+// B has higher blob fee). Rather than look at the absolute fees, the useful
+// metric is the max time it can take to exceed the transaction's fee caps.
+// Specifically, we're interested in the number of jumps needed to go from
+// the current fee to the transaction's cap:
+//
+// jumps = log1.125(txfee) - log1.125(basefee)
+//
+// - The third observation is that the base fee tends to hover around rather
+// than swing wildly. The number of jumps needed from the current fee starts
+// to get less relevant the higher it is. To remove the noise here too, the
+// pool will use log(jumps) as the delta for comparing transactions.
+//
+// delta = sign(jumps) * log(abs(jumps))
+//
+// - To establish a total order, we need to reduce the dimensionality of the
+// two base fees (log jumps) to a single value. The interesting aspect from
+// the pool's perspective is how fast will a tx get executable (fees going
+// down, crossing the smaller negative jump counter) or non-executable (fees
+// going up, crossing the smaller positive jump counter). As such, the pool
+// cares only about the min of the two delta values for eviction priority.
+//
+// priority = min(delta-basefee, delta-blobfee)
+//
+// - The above very aggressive dimensionality and noise reduction should result
+// in transaction being grouped into a small number of buckets, the further
+// the fees the larger the buckets. This is good because it allows us to use
+// the miner tip meaningfully as a splitter.
+//
+// - For the scenario where the pool does not contain non-executable blob txs
+// anymore, it does not make sense to grant a later eviction priority to txs
+// with high fee caps since it could enable pool wars. As such, any positive
+// priority will be grouped together.
+//
+// priority = min(delta-basefee, delta-blobfee, 0)
+//
+// Optimisation tradeoffs:
+//
+// - Eviction relies on 3 fee minimums per account (exec tip, exec cap and blob
+// cap). Maintaining these values across all transactions from the account is
+// problematic as each transaction replacement or inclusion would require a
+// rescan of all other transactions to recalculate the minimum. Instead, the
+// pool maintains a rolling minimum across the nonce range. Updating all the
+// minimums will need to be done only starting at the swapped in/out nonce
+// and leading up to the first no-change.
+type BlobPool struct {
+ config Config // Pool configuration
+ reserve txpool.AddressReserver // Address reserver to ensure exclusivity across subpools
+
+ store billy.Database // Persistent data store for the tx metadata and blobs
+ stored uint64 // Useful data size of all transactions on disk
+ limbo *limbo // Persistent data store for the non-finalized blobs
+
+ signer types.Signer // Transaction signer to use for sender recovery
+ chain BlockChain // Chain object to access the state through
+
+ head *types.Header // Current head of the chain
+ state *state.StateDB // Current state at the head of the chain
+ gasTip *uint256.Int // Currently accepted minimum gas tip
+
+ lookup map[common.Hash]uint64 // Lookup table mapping hashes to tx billy entries
+ index map[common.Address][]*blobTxMeta // Blob transactions grouped by accounts, sorted by nonce
+ spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts
+ evict *evictHeap // Heap of cheapest accounts for eviction when full
+
+ eventFeed event.Feed // Event feed to send out new tx events on pool inclusion
+ eventScope event.SubscriptionScope // Event scope to track and mass unsubscribe on termination
+
+ lock sync.RWMutex // Mutex protecting the pool during reorg handling
+}
+
+// New creates a new blob transaction pool to gather, sort and filter inbound
+// blob transactions from the network.
+func New(config Config, chain BlockChain) *BlobPool {
+ // Sanitize the input to ensure no vulnerable gas prices are set
+ config = (&config).sanitize()
+
+ // Create the transaction pool with its initial settings
+ return &BlobPool{
+ config: config,
+ signer: types.LatestSigner(chain.Config()),
+ chain: chain,
+ lookup: make(map[common.Hash]uint64),
+ index: make(map[common.Address][]*blobTxMeta),
+ spent: make(map[common.Address]*uint256.Int),
+ }
+}
+
+// Filter returns whether the given transaction can be consumed by the blob pool.
+func (p *BlobPool) Filter(tx *types.Transaction) bool {
+ return tx.Type() == types.BlobTxType
+}
+
+// Init sets the gas price needed to keep a transaction in the pool and the chain
+// head to allow balance / nonce checks. The transaction journal will be loaded
+// from disk and filtered based on the provided starting settings.
+func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error {
+ p.reserve = reserve
+
+ var (
+ queuedir string
+ limbodir string
+ )
+ if p.config.Datadir != "" {
+ queuedir = filepath.Join(p.config.Datadir, pendingTransactionStore)
+ if err := os.MkdirAll(queuedir, 0700); err != nil {
+ return err
+ }
+ limbodir = filepath.Join(p.config.Datadir, limboedTransactionStore)
+ if err := os.MkdirAll(limbodir, 0700); err != nil {
+ return err
+ }
+ }
+ state, err := p.chain.StateAt(head.Root)
+ if err != nil {
+ return err
+ }
+ p.head, p.state = head, state
+
+ // Index all transactions on disk and delete anything inprocessable
+ var fails []uint64
+ index := func(id uint64, size uint32, blob []byte) {
+ if p.parseTransaction(id, size, blob) != nil {
+ fails = append(fails, id)
+ }
+ }
+ store, err := billy.Open(billy.Options{Path: queuedir}, newSlotter(), index)
+ if err != nil {
+ return err
+ }
+ p.store = store
+
+ if len(fails) > 0 {
+ log.Warn("Dropping invalidated blob transactions", "ids", fails)
+ for _, id := range fails {
+ if err := p.store.Delete(id); err != nil {
+ p.Close()
+ return err
+ }
+ }
+ }
+ // Sort the indexed transactions by nonce and delete anything gapped, create
+ // the eviction heap of anyone still standing
+ for addr := range p.index {
+ p.recheck(addr, nil)
+ }
+ var (
+ basefee = uint256.MustFromBig(misc.CalcBaseFee(p.chain.Config(), p.head))
+ blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinDataGasprice))
+ )
+ if p.head.ExcessDataGas != nil {
+ blobfee = uint256.MustFromBig(misc.CalcBlobFee(*p.head.ExcessDataGas))
+ }
+ p.evict = newPriceHeap(basefee, blobfee, &p.index)
+
+ // Pool initialized, attach the blob limbo to it to track blobs included
+ // recently but not yet finalized
+ p.limbo, err = newLimbo(limbodir)
+ if err != nil {
+ p.Close()
+ return err
+ }
+ // Set the configured gas tip, triggering a filtering of anything just loaded
+ basefeeGauge.Update(int64(basefee.Uint64()))
+ blobfeeGauge.Update(int64(blobfee.Uint64()))
+
+ p.SetGasTip(gasTip)
+
+ // Since the user might have modified their pool's capacity, evict anything
+ // above the current allowance
+ for p.stored > p.config.Datacap {
+ p.drop()
+ }
+ // Update the metrics and return the constructed pool
+ datacapGauge.Update(int64(p.config.Datacap))
+ p.updateStorageMetrics()
+ return nil
+}
+
+// Close closes down the underlying persistent store.
+func (p *BlobPool) Close() error {
+ var errs []error
+ if err := p.limbo.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := p.store.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ p.eventScope.Close()
+
+ switch {
+ case errs == nil:
+ return nil
+ case len(errs) == 1:
+ return errs[0]
+ default:
+ return fmt.Errorf("%v", errs)
+ }
+}
+
+// parseTransaction is a callback method on pool creation that gets called for
+// each transaction on disk to create the in-memory metadata index.
+func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
+ item := new(blobTx)
+ if err := rlp.DecodeBytes(blob, item); err != nil {
+ // This path is impossible unless the disk data representation changes
+ // across restarts. For that ever unprobable case, recover gracefully
+ // by ignoring this data entry.
+ log.Error("Failed to decode blob pool entry", "id", id, "err", err)
+ return err
+ }
+ meta := newBlobTxMeta(id, size, item.Tx)
+
+ sender, err := p.signer.Sender(item.Tx)
+ if err != nil {
+ // This path is impossible unless the signature validity changes across
+ // restarts. For that ever unprobable case, recover gracefully by ignoring
+ // this data entry.
+ log.Error("Failed to recover blob tx sender", "id", id, "hash", item.Tx.Hash(), "err", err)
+ return err
+ }
+ if _, ok := p.index[sender]; !ok {
+ if err := p.reserve(sender, true); err != nil {
+ return err
+ }
+ p.index[sender] = []*blobTxMeta{}
+ p.spent[sender] = new(uint256.Int)
+ }
+ p.index[sender] = append(p.index[sender], meta)
+ p.spent[sender] = new(uint256.Int).Add(p.spent[sender], meta.costCap)
+
+ p.lookup[meta.hash] = meta.id
+ p.stored += uint64(meta.size)
+
+ return nil
+}
+
+// recheck verifies the pool's content for a specific account and drops anything
+// that does not fit anymore (dangling or filled nonce, overdraft).
+func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint64) {
+ // Sort the transactions belonging to the account so reinjects can be simpler
+ txs := p.index[addr]
+ if inclusions != nil && txs == nil { // during reorgs, we might find new accounts
+ return
+ }
+ sort.Slice(txs, func(i, j int) bool {
+ return txs[i].nonce < txs[j].nonce
+ })
+ // If there is a gap between the chain state and the blob pool, drop
+ // all the transactions as they are non-executable. Similarly, if the
+ // entire tx range was included, drop all.
+ var (
+ next = p.state.GetNonce(addr)
+ gapped = txs[0].nonce > next
+ filled = txs[len(txs)-1].nonce < next
+ )
+ if gapped || filled {
+ var (
+ ids []uint64
+ nonces []uint64
+ )
+ for i := 0; i < len(txs); i++ {
+ ids = append(ids, txs[i].id)
+ nonces = append(nonces, txs[i].nonce)
+
+ p.stored -= uint64(txs[i].size)
+ delete(p.lookup, txs[i].hash)
+
+ // Included transactions blobs need to be moved to the limbo
+ if filled && inclusions != nil {
+ p.offload(addr, txs[i].nonce, txs[i].id, inclusions)
+ }
+ }
+ delete(p.index, addr)
+ delete(p.spent, addr)
+ if inclusions != nil { // only during reorgs will the heap will be initialized
+ heap.Remove(p.evict, p.evict.index[addr])
+ }
+ p.reserve(addr, false)
+
+ if gapped {
+ log.Warn("Dropping dangling blob transactions", "from", addr, "missing", next, "drop", nonces, "ids", ids)
+ } else {
+ log.Trace("Dropping filled blob transactions", "from", addr, "filled", nonces, "ids", ids)
+ }
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ }
+ return
+ }
+ // If there is overlap between the chain state and the blob pool, drop
+ // anything below the current state
+ if txs[0].nonce < next {
+ var (
+ ids []uint64
+ nonces []uint64
+ )
+ for txs[0].nonce < next {
+ ids = append(ids, txs[0].id)
+ nonces = append(nonces, txs[0].nonce)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[0].costCap)
+ p.stored -= uint64(txs[0].size)
+ delete(p.lookup, txs[0].hash)
+
+ // Included transactions blobs need to be moved to the limbo
+ if inclusions != nil {
+ p.offload(addr, txs[0].nonce, txs[0].id, inclusions)
+ }
+ txs = txs[1:]
+ }
+ log.Trace("Dropping overlapped blob transactions", "from", addr, "overlapped", nonces, "ids", ids, "left", len(txs))
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ }
+ p.index[addr] = txs
+ }
+ // Iterate over the transactions to initialize their eviction thresholds
+ // and to detect any nonce gaps
+ txs[0].evictionExecTip = txs[0].execTipCap
+ txs[0].evictionExecFeeJumps = txs[0].basefeeJumps
+ txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps
+
+ for i := 1; i < len(txs); i++ {
+ // If there's no nonce gap, initialize the evicion thresholds as the
+ // minimum between the cumulative thresholds and the current tx fees
+ if txs[i].nonce == txs[i-1].nonce+1 {
+ txs[i].evictionExecTip = txs[i-1].evictionExecTip
+ if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 {
+ txs[i].evictionExecTip = txs[i].execTipCap
+ }
+ txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps
+ if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps {
+ txs[i].evictionExecFeeJumps = txs[i].basefeeJumps
+ }
+ txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps
+ if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps {
+ txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps
+ }
+ continue
+ }
+ // Sanity check that there's no double nonce. This case would be a coding
+ // error, but better know about it
+ if txs[i].nonce == txs[i-1].nonce {
+ log.Error("Duplicate nonce blob transaction", "from", addr, "nonce", txs[i].nonce)
+ }
+ // Otherwise if there's a nonce gap evict all later transactions
+ var (
+ ids []uint64
+ nonces []uint64
+ )
+ for j := i; j < len(txs); j++ {
+ ids = append(ids, txs[j].id)
+ nonces = append(nonces, txs[j].nonce)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[j].costCap)
+ p.stored -= uint64(txs[j].size)
+ delete(p.lookup, txs[j].hash)
+ }
+ txs = txs[:i]
+
+ log.Error("Dropping gapped blob transactions", "from", addr, "missing", txs[i-1].nonce+1, "drop", nonces, "ids", ids)
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ }
+ p.index[addr] = txs
+ break
+ }
+ // Ensure that there's no over-draft, this is expected to happen when some
+ // transactions get included without publishing on the network
+ var (
+ balance = uint256.MustFromBig(p.state.GetBalance(addr))
+ spent = p.spent[addr]
+ )
+ if spent.Cmp(balance) > 0 {
+ // Evict the highest nonce transactions until the pending set falls under
+ // the account's available balance
+ var (
+ ids []uint64
+ nonces []uint64
+ )
+ for p.spent[addr].Cmp(balance) > 0 {
+ last := txs[len(txs)-1]
+ txs[len(txs)-1] = nil
+ txs = txs[:len(txs)-1]
+
+ ids = append(ids, last.id)
+ nonces = append(nonces, last.nonce)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap)
+ p.stored -= uint64(last.size)
+ delete(p.lookup, last.hash)
+ }
+ if len(txs) == 0 {
+ delete(p.index, addr)
+ delete(p.spent, addr)
+ if inclusions != nil { // only during reorgs will the heap will be initialized
+ heap.Remove(p.evict, p.evict.index[addr])
+ }
+ p.reserve(addr, false)
+ } else {
+ p.index[addr] = txs
+ }
+ log.Warn("Dropping overdrafted blob transactions", "from", addr, "balance", balance, "spent", spent, "drop", nonces, "ids", ids)
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ }
+ }
+ // Sanity check that no account can have more queued transactions than the
+ // DoS protection threshold.
+ if len(txs) > maxTxsPerAccount {
+ // Evict the highest nonce transactions until the pending set falls under
+ // the account's transaction cap
+ var (
+ ids []uint64
+ nonces []uint64
+ )
+ for len(txs) > maxTxsPerAccount {
+ last := txs[len(txs)-1]
+ txs[len(txs)-1] = nil
+ txs = txs[:len(txs)-1]
+
+ ids = append(ids, last.id)
+ nonces = append(nonces, last.nonce)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap)
+ p.stored -= uint64(last.size)
+ delete(p.lookup, last.hash)
+ }
+ p.index[addr] = txs
+
+ log.Warn("Dropping overcapped blob transactions", "from", addr, "kept", len(txs), "drop", nonces, "ids", ids)
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ }
+ }
+ // Included cheap transactions might have left the remaining ones better from
+ // an eviction point, fix any potential issues in the heap.
+ if _, ok := p.index[addr]; ok && inclusions != nil {
+ heap.Fix(p.evict, p.evict.index[addr])
+ }
+}
+
+// offload removes a tracked blob transaction from the pool and moves it into the
+// limbo for tracking until finality.
+//
+// The method may log errors for various unexpcted scenarios but will not return
+// any of it since there's no clear error case. Some errors may be due to coding
+// issues, others caused by signers mining MEV stuff or swapping transactions. In
+// all cases, the pool needs to continue operating.
+func (p *BlobPool) offload(addr common.Address, nonce uint64, id uint64, inclusions map[common.Hash]uint64) {
+ data, err := p.store.Get(id)
+ if err != nil {
+ log.Error("Blobs missing for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
+ return
+ }
+ item := new(blobTx)
+ if err = rlp.DecodeBytes(data, item); err != nil {
+ log.Error("Blobs corrupted for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
+ return
+ }
+ block, ok := inclusions[item.Tx.Hash()]
+ if !ok {
+ log.Warn("Blob transaction swapped out by signer", "from", addr, "nonce", nonce, "id", id)
+ return
+ }
+ if err := p.limbo.push(item.Tx.Hash(), block, item.Blobs, item.Commits, item.Proofs); err != nil {
+ log.Warn("Failed to offload blob tx into limbo", "err", err)
+ return
+ }
+}
+
+// Reset implements txpool.SubPool, allowing the blob pool's internal state to be
+// kept in sync with the main transacion pool's internal state.
+func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
+ waitStart := time.Now()
+ p.lock.Lock()
+ resetwaitHist.Update(time.Since(waitStart).Nanoseconds())
+ defer p.lock.Unlock()
+
+ defer func(start time.Time) {
+ resettimeHist.Update(time.Since(start).Nanoseconds())
+ }(time.Now())
+
+ statedb, err := p.chain.StateAt(newHead.Root)
+ if err != nil {
+ log.Error("Failed to reset blobpool state", "err", err)
+ return
+ }
+ p.head = newHead
+ p.state = statedb
+
+ // Run the reorg between the old and new head and figure out which accounts
+ // need to be rechecked and which transactions need to be readded
+ if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil {
+ for addr, txs := range reinject {
+ // Blindly push all the lost transactions back into the pool
+ for _, tx := range txs {
+ p.reinject(addr, tx)
+ }
+ // Recheck the account's pooled transactions to drop included and
+ // invalidated one
+ p.recheck(addr, inclusions)
+ }
+ }
+ // Flush out any blobs from limbo that are older than the latest finality
+ p.limbo.finalize(p.chain.CurrentFinalBlock())
+
+ // Reset the price heap for the new set of basefee/blobfee pairs
+ var (
+ basefee = uint256.MustFromBig(misc.CalcBaseFee(p.chain.Config(), newHead))
+ blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinDataGasprice))
+ )
+ if newHead.ExcessDataGas != nil {
+ blobfee = uint256.MustFromBig(misc.CalcBlobFee(*newHead.ExcessDataGas))
+ }
+ p.evict.reinit(basefee, blobfee, false)
+
+ basefeeGauge.Update(int64(basefee.Uint64()))
+ blobfeeGauge.Update(int64(blobfee.Uint64()))
+ p.updateStorageMetrics()
+}
+
+// reorg assembles all the transactors and missing transactions between an old
+// and new head to figure out which account's tx set needs to be rechecked and
+// which transactions need to be requeued.
+//
+// The transactionblock inclusion infos are also returned to allow tracking any
+// just-included blocks by block number in the limbo.
+func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]*types.Transaction, map[common.Hash]uint64) {
+ // If the pool was not yet initialized, don't do anything
+ if oldHead == nil {
+ return nil, nil
+ }
+ // If the reorg is too deep, avoid doing it (will happen during snap sync)
+ oldNum := oldHead.Number.Uint64()
+ newNum := newHead.Number.Uint64()
+
+ if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
+ return nil, nil
+ }
+ // Reorg seems shallow enough to pull in all transactions into memory
+ var (
+ transactors = make(map[common.Address]struct{})
+ discarded = make(map[common.Address][]*types.Transaction)
+ included = make(map[common.Address][]*types.Transaction)
+ inclusions = make(map[common.Hash]uint64)
+
+ rem = p.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
+ add = p.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
+ )
+ if add == nil {
+ // if the new head is nil, it means that something happened between
+ // the firing of newhead-event and _now_: most likely a
+ // reorg caused by sync-reversion or explicit sethead back to an
+ // earlier block.
+ log.Warn("Blobpool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash())
+ return nil, nil
+ }
+ if rem == nil {
+ // This can happen if a setHead is performed, where we simply discard
+ // the old head from the chain. If that is the case, we don't have the
+ // lost transactions anymore, and there's nothing to add.
+ if newNum >= oldNum {
+ // If we reorged to a same or higher number, then it's not a case
+ // of setHead
+ log.Warn("Blobpool reset with missing old head",
+ "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
+ return nil, nil
+ }
+ // If the reorg ended up on a lower number, it's indicative of setHead
+ // being the cause
+ log.Debug("Skipping blobpool reset caused by setHead",
+ "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
+ return nil, nil
+ }
+ // Both old and new blocks exist, traverse through the progression chain
+ // and accumulate the transactors and transactions
+ for rem.NumberU64() > add.NumberU64() {
+ for _, tx := range rem.Transactions() {
+ from, _ := p.signer.Sender(tx)
+
+ discarded[from] = append(discarded[from], tx)
+ transactors[from] = struct{}{}
+ }
+ if rem = p.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
+ log.Error("Unrooted old chain seen by blobpool", "block", oldHead.Number, "hash", oldHead.Hash())
+ return nil, nil
+ }
+ }
+ for add.NumberU64() > rem.NumberU64() {
+ for _, tx := range add.Transactions() {
+ from, _ := p.signer.Sender(tx)
+
+ included[from] = append(included[from], tx)
+ inclusions[tx.Hash()] = add.NumberU64()
+ transactors[from] = struct{}{}
+ }
+ if add = p.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
+ log.Error("Unrooted new chain seen by blobpool", "block", newHead.Number, "hash", newHead.Hash())
+ return nil, nil
+ }
+ }
+ for rem.Hash() != add.Hash() {
+ for _, tx := range rem.Transactions() {
+ from, _ := p.signer.Sender(tx)
+
+ discarded[from] = append(discarded[from], tx)
+ transactors[from] = struct{}{}
+ }
+ if rem = p.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
+ log.Error("Unrooted old chain seen by blobpool", "block", oldHead.Number, "hash", oldHead.Hash())
+ return nil, nil
+ }
+ for _, tx := range add.Transactions() {
+ from, _ := p.signer.Sender(tx)
+
+ included[from] = append(included[from], tx)
+ inclusions[tx.Hash()] = add.NumberU64()
+ transactors[from] = struct{}{}
+ }
+ if add = p.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
+ log.Error("Unrooted new chain seen by blobpool", "block", newHead.Number, "hash", newHead.Hash())
+ return nil, nil
+ }
+ }
+ // Generate the set of transactions per address to pull back into the pool,
+ // also updating the rest along the way
+ reinject := make(map[common.Address][]*types.Transaction)
+ for addr := range transactors {
+ // Generate the set that was lost to reinject into the pool
+ lost := make([]*types.Transaction, 0, len(discarded[addr]))
+ for _, tx := range types.TxDifference(discarded[addr], included[addr]) {
+ if p.Filter(tx) {
+ lost = append(lost, tx)
+ }
+ }
+ reinject[addr] = lost
+
+ // Update the set that was already reincluded to track the blocks in limbo
+ for _, tx := range types.TxDifference(included[addr], discarded[addr]) {
+ if p.Filter(tx) {
+ p.limbo.update(tx.Hash(), inclusions[tx.Hash()])
+ }
+ }
+ }
+ return reinject, inclusions
+}
+
+// reinject blindly pushes a transaction previously included in the chain - and
+// just reorged out - into the pool. The transaction is assumed valid (having
+// been in the chain), thus the only validation needed is nonce sorting and over-
+// draft checks after injection.
+//
+// Note, the method will not initialize the eviction cache values as those will
+// be done once for all transactions belonging to an account after all individual
+// transactions are injected back into the pool.
+func (p *BlobPool) reinject(addr common.Address, tx *types.Transaction) {
+ // Retrieve the associated blob from the limbo. Without the blobs, we cannot
+ // add the transaction back into the pool as it is not mineable.
+ blobs, commits, proofs, err := p.limbo.pull(tx.Hash())
+ if err != nil {
+ log.Error("Blobs unavailable, dropping reorged tx", "err", err)
+ return
+ }
+ // Serialize the transaction back into the primary datastore
+ blob, err := rlp.EncodeToBytes(&blobTx{Tx: tx, Blobs: blobs, Commits: commits, Proofs: proofs})
+ if err != nil {
+ log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
+ return
+ }
+ id, err := p.store.Put(blob)
+ if err != nil {
+ log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err)
+ return
+ }
+ // Update the indixes and metrics
+ meta := newBlobTxMeta(id, p.store.Size(id), tx)
+
+ if _, ok := p.index[addr]; !ok {
+ if err := p.reserve(addr, true); err != nil {
+ log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err)
+ return
+ }
+ p.index[addr] = []*blobTxMeta{meta}
+ p.spent[addr] = meta.costCap
+ p.evict.Push(addr)
+ } else {
+ p.index[addr] = append(p.index[addr], meta)
+ p.spent[addr] = new(uint256.Int).Add(p.spent[addr], meta.costCap)
+ }
+ p.lookup[meta.hash] = meta.id
+ p.stored += uint64(meta.size)
+}
+
+// SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements
+// to be kept in sync with the main transacion pool's gas requirements.
+func (p *BlobPool) SetGasTip(tip *big.Int) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ // Store the new minimum gas tip
+ old := p.gasTip
+ p.gasTip = uint256.MustFromBig(tip)
+
+ // If the min miner fee increased, remove transactions below the new threshold
+ if old == nil || p.gasTip.Cmp(old) > 0 {
+ for addr, txs := range p.index {
+ for i, tx := range txs {
+ if tx.execTipCap.Cmp(p.gasTip) < 0 {
+ // Drop the offending transaction
+ var (
+ ids = []uint64{tx.id}
+ nonces = []uint64{tx.nonce}
+ )
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[i].costCap)
+ p.stored -= uint64(tx.size)
+ delete(p.lookup, tx.hash)
+ txs[i] = nil
+
+ // Drop everything afterwards, no gaps allowed
+ for j, tx := range txs[i+1:] {
+ ids = append(ids, tx.id)
+ nonces = append(nonces, tx.nonce)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], tx.costCap)
+ p.stored -= uint64(tx.size)
+ delete(p.lookup, tx.hash)
+ txs[i+1+j] = nil
+ }
+ // Clear out the dropped transactions from the index
+ if i > 0 {
+ p.index[addr] = txs[:i]
+ heap.Fix(p.evict, p.evict.index[addr])
+ } else {
+ delete(p.index, addr)
+ delete(p.spent, addr)
+
+ heap.Remove(p.evict, p.evict.index[addr])
+ p.reserve(addr, false)
+ }
+ // Clear out the transactions from the data store
+ log.Warn("Dropping underpriced blob transaction", "from", addr, "rejected", tx.nonce, "tip", tx.execTipCap, "want", tip, "drop", nonces, "ids", ids)
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete dropped transaction", "id", id, "err", err)
+ }
+ }
+ break
+ }
+ }
+ }
+ }
+ log.Debug("Blobpool tip threshold updated", "tip", tip)
+ pooltipGauge.Update(tip.Int64())
+ p.updateStorageMetrics()
+}
+
+// validateTx checks whether a transaction is valid according to the consensus
+// rules and adheres to some heuristic limits of the local node (price and size).
+func (p *BlobPool) validateTx(tx *types.Transaction, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error {
+ // Ensure the transaction adheres to basic pool filters (type, size, tip) and
+ // consensus rules
+ baseOpts := &txpool.ValidationOptions{
+ Config: p.chain.Config(),
+ Accept: 1 << types.BlobTxType,
+ MaxSize: txMaxSize,
+ MinTip: p.gasTip.ToBig(),
+ }
+ if err := txpool.ValidateTransaction(tx, blobs, commits, proofs, p.head, p.signer, baseOpts); err != nil {
+ return err
+ }
+ // Ensure the transaction adheres to the stateful pool filters (nonce, balance)
+ stateOpts := &txpool.ValidationOptionsWithState{
+ State: p.state,
+
+ FirstNonceGap: func(addr common.Address) uint64 {
+ // Nonce gaps are not permitted in the blob pool, the first gap will
+ // be the next nonce shifted by however many transactions we already
+ // have pooled.
+ return p.state.GetNonce(addr) + uint64(len(p.index[addr]))
+ },
+ UsedAndLeftSlots: func(addr common.Address) (int, int) {
+ have := len(p.index[addr])
+ if have >= maxTxsPerAccount {
+ return have, 0
+ }
+ return have, maxTxsPerAccount - have
+ },
+ ExistingExpenditure: func(addr common.Address) *big.Int {
+ if spent := p.spent[addr]; spent != nil {
+ return spent.ToBig()
+ }
+ return new(big.Int)
+ },
+ ExistingCost: func(addr common.Address, nonce uint64) *big.Int {
+ next := p.state.GetNonce(addr)
+ if uint64(len(p.index[addr])) > nonce-next {
+ return p.index[addr][int(tx.Nonce()-next)].costCap.ToBig()
+ }
+ return nil
+ },
+ }
+ if err := txpool.ValidateTransactionWithState(tx, p.signer, stateOpts); err != nil {
+ return err
+ }
+ // If the transaction replaces an existing one, ensure that price bumps are
+ // adhered to.
+ var (
+ from, _ = p.signer.Sender(tx) // already validated above
+ next = p.state.GetNonce(from)
+ )
+ if uint64(len(p.index[from])) > tx.Nonce()-next {
+ // Account can support the replacement, but the price bump must also be met
+ prev := p.index[from][int(tx.Nonce()-next)]
+ switch {
+ case tx.GasFeeCapIntCmp(prev.execFeeCap.ToBig()) <= 0:
+ return fmt.Errorf("%w: new tx gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap)
+ case tx.GasTipCapIntCmp(prev.execTipCap.ToBig()) <= 0:
+ return fmt.Errorf("%w: new tx gas tip cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasTipCap(), prev.execTipCap)
+ case tx.BlobGasFeeCapIntCmp(prev.blobFeeCap.ToBig()) <= 0:
+ return fmt.Errorf("%w: new tx blob gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.BlobGasFeeCap(), prev.blobFeeCap)
+ }
+ var (
+ multiplier = uint256.NewInt(100 + p.config.PriceBump)
+ onehundred = uint256.NewInt(100)
+
+ minGasFeeCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.execFeeCap), onehundred)
+ minGasTipCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.execTipCap), onehundred)
+ minBlobGasFeeCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.blobFeeCap), onehundred)
+ )
+ switch {
+ case tx.GasFeeCapIntCmp(minGasFeeCap.ToBig()) < 0:
+ return fmt.Errorf("%w: new tx gas fee cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap, p.config.PriceBump)
+ case tx.GasTipCapIntCmp(minGasTipCap.ToBig()) < 0:
+ return fmt.Errorf("%w: new tx gas tip cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.GasTipCap(), prev.execTipCap, p.config.PriceBump)
+ case tx.BlobGasFeeCapIntCmp(minBlobGasFeeCap.ToBig()) < 0:
+ return fmt.Errorf("%w: new tx blob gas fee cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.BlobGasFeeCap(), prev.blobFeeCap, p.config.PriceBump)
+ }
+ }
+ return nil
+}
+
+// Has returns an indicator whether subpool has a transaction cached with the
+// given hash.
+func (p *BlobPool) Has(hash common.Hash) bool {
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ _, ok := p.lookup[hash]
+ return ok
+}
+
+// Get returns a transaction if it is contained in the pool, or nil otherwise.
+func (p *BlobPool) Get(hash common.Hash) *txpool.Transaction {
+ // Track the amount of time waiting to retrieve a fully resolved blob tx from
+ // the pool and the amount of time actually spent on pulling the data from disk.
+ getStart := time.Now()
+ p.lock.RLock()
+ getwaitHist.Update(time.Since(getStart).Nanoseconds())
+ defer p.lock.RUnlock()
+
+ defer func(start time.Time) {
+ gettimeHist.Update(time.Since(start).Nanoseconds())
+ }(time.Now())
+
+ // Pull the blob from disk and return an assembled response
+ id, ok := p.lookup[hash]
+ if !ok {
+ return nil
+ }
+ data, err := p.store.Get(id)
+ if err != nil {
+ log.Error("Tracked blob transaction missing from store", "hash", hash, "id", id, "err", err)
+ return nil
+ }
+ item := new(blobTx)
+ if err = rlp.DecodeBytes(data, item); err != nil {
+ log.Error("Blobs corrupted for traced transaction", "hash", hash, "id", id, "err", err)
+ return nil
+ }
+ return &txpool.Transaction{
+ Tx: item.Tx,
+ BlobTxBlobs: item.Blobs,
+ BlobTxCommits: item.Commits,
+ BlobTxProofs: item.Proofs,
+ }
+}
+
+// Add inserts a set of blob transactions into the pool if they pass validation (both
+// consensus validity and pool restictions).
+func (p *BlobPool) Add(txs []*txpool.Transaction, local bool, sync bool) []error {
+ errs := make([]error, len(txs))
+ for i, tx := range txs {
+ errs[i] = p.add(tx.Tx, tx.BlobTxBlobs, tx.BlobTxCommits, tx.BlobTxProofs)
+ }
+ return errs
+}
+
+// Add inserts a new blob transaction into the pool if it passes validation (both
+// consensus validity and pool restictions).
+func (p *BlobPool) add(tx *types.Transaction, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) (err error) {
+ // The blob pool blocks on adding a transaction. This is because blob txs are
+ // only even pulled form the network, so this method will act as the overload
+ // protection for fetches.
+ waitStart := time.Now()
+ p.lock.Lock()
+ addwaitHist.Update(time.Since(waitStart).Nanoseconds())
+ defer p.lock.Unlock()
+
+ defer func(start time.Time) {
+ addtimeHist.Update(time.Since(start).Nanoseconds())
+ }(time.Now())
+
+ // Ensure the transaction is valid from all perspectives
+ if err := p.validateTx(tx, blobs, commits, proofs); err != nil {
+ log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err)
+ return err
+ }
+ // If the address is not yet known, request exclusivity to track the account
+ // only by this subpool until all transactions are evicted
+ from, _ := types.Sender(p.signer, tx) // already validated above
+ if _, ok := p.index[from]; !ok {
+ if err := p.reserve(from, true); err != nil {
+ return err
+ }
+ defer func() {
+ // If the transaction is rejected by some post-validation check, remove
+ // the lock on the reservation set.
+ //
+ // Note, `err` here is the named error return, which will be initialized
+ // by a return statement before running deferred methods. Take care with
+ // removing or subscoping err as it will break this clause.
+ if err != nil {
+ p.reserve(from, false)
+ }
+ }()
+ }
+ // Transaction permitted into the pool from a nonce and cost perspective,
+ // insert it into the database and update the indices
+ blob, err := rlp.EncodeToBytes(&blobTx{Tx: tx, Blobs: blobs, Commits: commits, Proofs: proofs})
+ if err != nil {
+ log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
+ return err
+ }
+ id, err := p.store.Put(blob)
+ if err != nil {
+ return err
+ }
+ meta := newBlobTxMeta(id, p.store.Size(id), tx)
+
+ var (
+ next = p.state.GetNonce(from)
+ offset = int(tx.Nonce() - next)
+ newacc = false
+ )
+ var oldEvictionExecFeeJumps, oldEvictionBlobFeeJumps float64
+ if txs, ok := p.index[from]; ok {
+ oldEvictionExecFeeJumps = txs[len(txs)-1].evictionExecFeeJumps
+ oldEvictionBlobFeeJumps = txs[len(txs)-1].evictionBlobFeeJumps
+ }
+ if len(p.index[from]) > offset {
+ // Transaction replaces a previously queued one
+ prev := p.index[from][offset]
+ if err := p.store.Delete(prev.id); err != nil {
+ // Shitty situation, but try to recover gracefully instead of going boom
+ log.Error("Failed to delete replaced transaction", "id", prev.id, "err", err)
+ }
+ // Update the transaction index
+ p.index[from][offset] = meta
+ p.spent[from] = new(uint256.Int).Sub(p.spent[from], prev.costCap)
+ p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap)
+
+ delete(p.lookup, prev.hash)
+ p.lookup[meta.hash] = meta.id
+ p.stored += uint64(meta.size) - uint64(prev.size)
+ } else {
+ // Transaction extends previously scheduled ones
+ p.index[from] = append(p.index[from], meta)
+ if _, ok := p.spent[from]; !ok {
+ p.spent[from] = new(uint256.Int)
+ newacc = true
+ }
+ p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap)
+ p.lookup[meta.hash] = meta.id
+ p.stored += uint64(meta.size)
+ }
+ // Recompute the rolling eviction fields. In case of a replacement, this will
+ // recompute all subsequent fields. In case of an append, this will only do
+ // the fresh calculation.
+ txs := p.index[from]
+
+ for i := offset; i < len(txs); i++ {
+ // The first transaction will always use itself
+ if i == 0 {
+ txs[0].evictionExecTip = txs[0].execTipCap
+ txs[0].evictionExecFeeJumps = txs[0].basefeeJumps
+ txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps
+
+ continue
+ }
+ // Subsequent transactions will use a rolling calculation
+ txs[i].evictionExecTip = txs[i-1].evictionExecTip
+ if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 {
+ txs[i].evictionExecTip = txs[i].execTipCap
+ }
+ txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps
+ if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps {
+ txs[i].evictionExecFeeJumps = txs[i].basefeeJumps
+ }
+ txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps
+ if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps {
+ txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps
+ }
+ }
+ // Update the eviction heap with the new information:
+ // - If the transaction is from a new account, add it to the heap
+ // - If the account had a singleton tx replaced, update the heap (new price caps)
+ // - If the account has a transaction replaced or appended, update the heap if significantly changed
+ switch {
+ case newacc:
+ heap.Push(p.evict, from)
+
+ case len(txs) == 1: // 1 tx and not a new acc, must be replacement
+ heap.Fix(p.evict, p.evict.index[from])
+
+ default: // replacement or new append
+ evictionExecFeeDiff := oldEvictionExecFeeJumps - txs[len(txs)-1].evictionExecFeeJumps
+ evictionBlobFeeDiff := oldEvictionBlobFeeJumps - txs[len(txs)-1].evictionBlobFeeJumps
+
+ if math.Abs(evictionExecFeeDiff) > 0.001 || math.Abs(evictionBlobFeeDiff) > 0.001 { // need math.Abs, can go up and down
+ heap.Fix(p.evict, p.evict.index[from])
+ }
+ }
+ // If the pool went over the allowed data limit, evict transactions until
+ // we're again below the threshold
+ for p.stored > p.config.Datacap {
+ p.drop()
+ }
+ p.updateStorageMetrics()
+
+ return nil
+}
+
+// drop removes the worst transaction from the pool. It is primarily used when a
+// freshly added transaction overflows the pool and needs to evict something. The
+// method is also called on startup if the user resizes their storage, might be an
+// expensive run but it should be fine-ish.
+func (p *BlobPool) drop() {
+ // Peek at the account with the worse transaction set to evict from (Go's heap
+ // stores the minimum at index zero of the heap slice) and retrieve it's last
+ // transaction.
+ var (
+ from = p.evict.addrs[0] // cannot call drop on empty pool
+
+ txs = p.index[from]
+ drop = txs[len(txs)-1]
+ last = len(txs) == 1
+ )
+ // Remove the transaction from the pool's index
+ if last {
+ delete(p.index, from)
+ delete(p.spent, from)
+ p.reserve(from, false)
+ } else {
+ txs[len(txs)-1] = nil
+ txs = txs[:len(txs)-1]
+
+ p.index[from] = txs
+ p.spent[from] = new(uint256.Int).Sub(p.spent[from], drop.costCap)
+ }
+ p.stored -= uint64(drop.size)
+ delete(p.lookup, drop.hash)
+
+ // Remove the transaction from the pool's evicion heap:
+ // - If the entire account was dropped, pop off the address
+ // - Otherwise, if the new tail has better eviction caps, fix the heap
+ if last {
+ heap.Pop(p.evict)
+ } else {
+ tail := txs[len(txs)-1] // new tail, surely exists
+
+ evictionExecFeeDiff := tail.evictionExecFeeJumps - drop.evictionExecFeeJumps
+ evictionBlobFeeDiff := tail.evictionBlobFeeJumps - drop.evictionBlobFeeJumps
+
+ if evictionExecFeeDiff > 0.001 || evictionBlobFeeDiff > 0.001 { // no need for math.Abs, monotonic decreasing
+ heap.Fix(p.evict, 0)
+ }
+ }
+ // Remove the transaction from the data store
+ log.Warn("Evicting overflown blob transaction", "from", from, "evicted", drop.nonce, "id", drop.id)
+ if err := p.store.Delete(drop.id); err != nil {
+ log.Error("Failed to drop evicted transaction", "id", drop.id, "err", err)
+ }
+}
+
+// Pending retrieves all currently processable transactions, grouped by origin
+// account and sorted by nonce.
+func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction {
+ // Track the amount of time waiting to retrieve the list of pending blob txs
+ // from the pool and the amount of time actually spent on assembling the data.
+ // The latter will be pretty much moot, but we've kept it to have symmetric
+ // across all user operations.
+ pendStart := time.Now()
+ p.lock.RLock()
+ pendwaitHist.Update(time.Since(pendStart).Nanoseconds())
+ defer p.lock.RUnlock()
+
+ defer func(start time.Time) {
+ pendtimeHist.Update(time.Since(start).Nanoseconds())
+ }(time.Now())
+
+ pending := make(map[common.Address][]*txpool.LazyTransaction)
+ for addr, txs := range p.index {
+ var lazies []*txpool.LazyTransaction
+ for _, tx := range txs {
+ lazies = append(lazies, &txpool.LazyTransaction{
+ Pool: p,
+ Hash: tx.hash,
+ Time: time.Now(), // TODO(karalabe): Maybe save these and use that?
+ GasFeeCap: tx.execFeeCap.ToBig(),
+ GasTipCap: tx.execTipCap.ToBig(),
+ })
+ }
+ if len(lazies) > 0 {
+ pending[addr] = lazies
+ }
+ }
+ return pending
+}
+
+// updateStorageMetrics retrieves a bunch of stats from the data store and pushes
+// them out as metrics.
+func (p *BlobPool) updateStorageMetrics() {
+ stats := p.store.Infos()
+
+ var (
+ dataused uint64
+ datareal uint64
+ slotused uint64
+
+ oversizedDataused uint64
+ oversizedDatagaps uint64
+ oversizedSlotused uint64
+ oversizedSlotgaps uint64
+ )
+ for _, shelf := range stats.Shelves {
+ slotDataused := shelf.FilledSlots * uint64(shelf.SlotSize)
+ slotDatagaps := shelf.GappedSlots * uint64(shelf.SlotSize)
+
+ dataused += slotDataused
+ datareal += slotDataused + slotDatagaps
+ slotused += shelf.FilledSlots
+
+ metrics.GetOrRegisterGauge(fmt.Sprintf(shelfDatausedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDataused))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(shelfDatagapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDatagaps))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots))
+
+ if shelf.SlotSize/blobSize > maxBlobsPerTransaction {
+ oversizedDataused += slotDataused
+ oversizedDatagaps += slotDatagaps
+ oversizedSlotused += shelf.FilledSlots
+ oversizedSlotgaps += shelf.GappedSlots
+ }
+ }
+ datausedGauge.Update(int64(dataused))
+ datarealGauge.Update(int64(datareal))
+ slotusedGauge.Update(int64(slotused))
+
+ oversizedDatausedGauge.Update(int64(oversizedDataused))
+ oversizedDatagapsGauge.Update(int64(oversizedDatagaps))
+ oversizedSlotusedGauge.Update(int64(oversizedSlotused))
+ oversizedSlotgapsGauge.Update(int64(oversizedSlotgaps))
+
+ p.updateLimboMetrics()
+}
+
+// updateLimboMetrics retrieves a bunch of stats from the limbo store and pushes
+// // them out as metrics.
+func (p *BlobPool) updateLimboMetrics() {
+ stats := p.limbo.store.Infos()
+
+ var (
+ dataused uint64
+ datareal uint64
+ slotused uint64
+ )
+ for _, shelf := range stats.Shelves {
+ slotDataused := shelf.FilledSlots * uint64(shelf.SlotSize)
+ slotDatagaps := shelf.GappedSlots * uint64(shelf.SlotSize)
+
+ dataused += slotDataused
+ datareal += slotDataused + slotDatagaps
+ slotused += shelf.FilledSlots
+
+ metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfDatausedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDataused))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfDatagapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDatagaps))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots))
+ }
+ limboDatausedGauge.Update(int64(dataused))
+ limboDatarealGauge.Update(int64(datareal))
+ limboSlotusedGauge.Update(int64(slotused))
+}
+
+// SubscribeTransactions registers a subscription of NewTxsEvent and
+// starts sending event to the given channel.
+func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription {
+ return p.eventScope.Track(p.eventFeed.Subscribe(ch))
+}
+
+// Nonce returns the next nonce of an account, with all transactions executable
+// by the pool already applied on top.
+func (p *BlobPool) Nonce(addr common.Address) uint64 {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if txs, ok := p.index[addr]; ok {
+ return txs[len(txs)-1].nonce + 1
+ }
+ return p.state.GetNonce(addr)
+}
+
+// Stats retrieves the current pool stats, namely the number of pending and the
+// number of queued (non-executable) transactions.
+func (p *BlobPool) Stats() (int, int) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ var pending int
+ for _, txs := range p.index {
+ pending += len(txs)
+ }
+ return pending, 0 // No non-executable txs in the blob pool
+}
+
+// Content retrieves the data content of the transaction pool, returning all the
+// pending as well as queued transactions, grouped by account and sorted by nonce.
+//
+// For the blob pool, this method will return nothing for now.
+// TODO(karalabe): Abstract out the returned metadata.
+func (p *BlobPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
+ return make(map[common.Address][]*types.Transaction), make(map[common.Address][]*types.Transaction)
+}
+
+// ContentFrom retrieves the data content of the transaction pool, returning the
+// pending as well as queued transactions of this address, grouped by nonce.
+//
+// For the blob pool, this method will return nothing for now.
+// TODO(karalabe): Abstract out the returned metadata.
+func (p *BlobPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
+ return []*types.Transaction{}, []*types.Transaction{}
+}
+
+// Locals retrieves the accounts currently considered local by the pool.
+//
+// There is no notion of local accounts in the blob pool.
+func (p *BlobPool) Locals() []common.Address {
+ return []common.Address{}
+}
+
+// Status returns the known status (unknown/pending/queued) of a transaction
+// identified by their hashes.
+func (p *BlobPool) Status(hash common.Hash) txpool.TxStatus {
+ if p.Has(hash) {
+ return txpool.TxStatusPending
+ }
+ return txpool.TxStatusUnknown
+}
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
new file mode 100644
index 0000000000000..a657dd339080e
--- /dev/null
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -0,0 +1,1247 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/sha256"
+ "errors"
+ "math"
+ "math/big"
+ "os"
+ "path/filepath"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/misc"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/txpool"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
+ "github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/holiman/billy"
+ "github.com/holiman/uint256"
+)
+
+var (
+ emptyBlob = kzg4844.Blob{}
+ emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob)
+ emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit)
+ emptyBlobVHash = blobHash(emptyBlobCommit)
+)
+
+func blobHash(commit kzg4844.Commitment) common.Hash {
+ hasher := sha256.New()
+ hasher.Write(commit[:])
+ hash := hasher.Sum(nil)
+
+ var vhash common.Hash
+ vhash[0] = params.BlobTxHashVersion
+ copy(vhash[1:], hash[1:])
+
+ return vhash
+}
+
+// Chain configuration with Cancun enabled.
+//
+// TODO(karalabe): replace with params.MainnetChainConfig after Cancun.
+var testChainConfig *params.ChainConfig
+
+func init() {
+ testChainConfig = new(params.ChainConfig)
+ *testChainConfig = *params.MainnetChainConfig
+
+ testChainConfig.CancunTime = new(uint64)
+ *testChainConfig.CancunTime = uint64(time.Now().Unix())
+}
+
+// testBlockChain is a mock of the live chain for testing the pool.
+type testBlockChain struct {
+ config *params.ChainConfig
+ basefee *uint256.Int
+ blobfee *uint256.Int
+ statedb *state.StateDB
+}
+
+func (bc *testBlockChain) Config() *params.ChainConfig {
+ return bc.config
+}
+
+func (bc *testBlockChain) CurrentBlock() *types.Header {
+ // Yolo, life is too short to invert mist.CalcBaseFee and misc.CalcBlobFee,
+ // just binary search it them.
+
+ // The base fee at 5714 ETH translates into the 21000 base gas higher than
+ // mainnet ether existence, use that as a cap for the tests.
+ var (
+ blockNumber = new(big.Int).Add(bc.config.LondonBlock, big.NewInt(1))
+ blockTime = *bc.config.CancunTime + 1
+ gasLimit = uint64(30_000_000)
+ )
+ lo := new(big.Int)
+ hi := new(big.Int).Mul(big.NewInt(5714), new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil))
+
+ for new(big.Int).Add(lo, big.NewInt(1)).Cmp(hi) != 0 {
+ mid := new(big.Int).Add(lo, hi)
+ mid.Div(mid, big.NewInt(2))
+
+ if misc.CalcBaseFee(bc.config, &types.Header{
+ Number: blockNumber,
+ GasLimit: gasLimit,
+ GasUsed: 0,
+ BaseFee: mid,
+ }).Cmp(bc.basefee.ToBig()) > 0 {
+ hi = mid
+ } else {
+ lo = mid
+ }
+ }
+ baseFee := lo
+
+ // The excess data gas at 2^27 translates into a blob fee higher than mainnet
+ // ether existence, use that as a cap for the tests.
+ lo = new(big.Int)
+ hi = new(big.Int).Exp(big.NewInt(2), big.NewInt(27), nil)
+
+ for new(big.Int).Add(lo, big.NewInt(1)).Cmp(hi) != 0 {
+ mid := new(big.Int).Add(lo, hi)
+ mid.Div(mid, big.NewInt(2))
+
+ if misc.CalcBlobFee(mid.Uint64()).Cmp(bc.blobfee.ToBig()) > 0 {
+ hi = mid
+ } else {
+ lo = mid
+ }
+ }
+ excessDataGas := lo.Uint64()
+
+ return &types.Header{
+ Number: blockNumber,
+ Time: blockTime,
+ GasLimit: gasLimit,
+ BaseFee: baseFee,
+ ExcessDataGas: &excessDataGas,
+ }
+}
+
+func (bc *testBlockChain) CurrentFinalBlock() *types.Header {
+ return &types.Header{
+ Number: big.NewInt(0),
+ }
+}
+
+func (bt *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
+ return nil
+}
+
+func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {
+ return bc.statedb, nil
+}
+
+// makeAddressReserver is a utility method to sanity check that accounts are
+// properly reserved by the blobpool (no duplicate reserves or unreserves).
+func makeAddressReserver() txpool.AddressReserver {
+ var (
+ reserved = make(map[common.Address]struct{})
+ lock sync.Mutex
+ )
+ return func(addr common.Address, reserve bool) error {
+ lock.Lock()
+ defer lock.Unlock()
+
+ _, exists := reserved[addr]
+ if reserve {
+ if exists {
+ panic("already reserved")
+ }
+ reserved[addr] = struct{}{}
+ return nil
+ }
+ if !exists {
+ panic("not reserved")
+ }
+ delete(reserved, addr)
+ return nil
+ }
+}
+
+// makeTx is a utility method to construct a random blob transaction and sign it
+// with a valid key, only setting the interesting fields from the perspective of
+// the blob pool.
+func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, key *ecdsa.PrivateKey) *types.Transaction {
+ tx, _ := types.SignNewTx(key, types.LatestSigner(testChainConfig), makeUnsignedTx(nonce, gasTipCap, gasFeeCap, blobFeeCap))
+ return tx
+}
+
+// makeUnsignedTx is a utility method to construct a random blob tranasaction
+// without signing it.
+func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64) *types.BlobTx {
+ return &types.BlobTx{
+ ChainID: uint256.MustFromBig(testChainConfig.ChainID),
+ Nonce: nonce,
+ GasTipCap: uint256.NewInt(gasTipCap),
+ GasFeeCap: uint256.NewInt(gasFeeCap),
+ Gas: 21000,
+ BlobFeeCap: uint256.NewInt(blobFeeCap),
+ BlobHashes: []common.Hash{emptyBlobVHash},
+ Value: uint256.NewInt(100),
+ }
+}
+
+// verifyPoolInternals iterates over all the transactions in the pool and checks
+// that sort orders, calculated fields, cumulated fields are correct.
+func verifyPoolInternals(t *testing.T, pool *BlobPool) {
+ // Mark this method as a helper to remove from stack traces
+ t.Helper()
+
+ // Verify that all items in the index are present in the lookup and nothing more
+ seen := make(map[common.Hash]struct{})
+ for addr, txs := range pool.index {
+ for _, tx := range txs {
+ if _, ok := seen[tx.hash]; ok {
+ t.Errorf("duplicate hash #%x in transaction index: address %s, nonce %d", tx.hash, addr, tx.nonce)
+ }
+ seen[tx.hash] = struct{}{}
+ }
+ }
+ for hash, id := range pool.lookup {
+ if _, ok := seen[hash]; !ok {
+ t.Errorf("lookup entry missing from transaction index: hash #%x, id %d", hash, id)
+ }
+ delete(seen, hash)
+ }
+ for hash := range seen {
+ t.Errorf("indexed transaction hash #%x missing from lookup table", hash)
+ }
+ // Verify that transactions are sorted per account and contain no nonce gaps
+ for addr, txs := range pool.index {
+ for i := 1; i < len(txs); i++ {
+ if txs[i].nonce != txs[i-1].nonce+1 {
+ t.Errorf("addr %v, tx %d nonce mismatch: have %d, want %d", addr, i, txs[i].nonce, txs[i-1].nonce+1)
+ }
+ }
+ }
+ // Verify that calculated evacuation thresholds are correct
+ for addr, txs := range pool.index {
+ if !txs[0].evictionExecTip.Eq(txs[0].execTipCap) {
+ t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, 0, txs[0].evictionExecTip, txs[0].execTipCap)
+ }
+ if math.Abs(txs[0].evictionExecFeeJumps-txs[0].basefeeJumps) > 0.001 {
+ t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, 0, txs[0].evictionExecFeeJumps, txs[0].basefeeJumps)
+ }
+ if math.Abs(txs[0].evictionBlobFeeJumps-txs[0].blobfeeJumps) > 0.001 {
+ t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, 0, txs[0].evictionBlobFeeJumps, txs[0].blobfeeJumps)
+ }
+ for i := 1; i < len(txs); i++ {
+ wantExecTip := txs[i-1].evictionExecTip
+ if wantExecTip.Gt(txs[i].execTipCap) {
+ wantExecTip = txs[i].execTipCap
+ }
+ if !txs[i].evictionExecTip.Eq(wantExecTip) {
+ t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, i, txs[i].evictionExecTip, wantExecTip)
+ }
+
+ wantExecFeeJumps := txs[i-1].evictionExecFeeJumps
+ if wantExecFeeJumps > txs[i].basefeeJumps {
+ wantExecFeeJumps = txs[i].basefeeJumps
+ }
+ if math.Abs(txs[i].evictionExecFeeJumps-wantExecFeeJumps) > 0.001 {
+ t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionExecFeeJumps, wantExecFeeJumps)
+ }
+
+ wantBlobFeeJumps := txs[i-1].evictionBlobFeeJumps
+ if wantBlobFeeJumps > txs[i].blobfeeJumps {
+ wantBlobFeeJumps = txs[i].blobfeeJumps
+ }
+ if math.Abs(txs[i].evictionBlobFeeJumps-wantBlobFeeJumps) > 0.001 {
+ t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionBlobFeeJumps, wantBlobFeeJumps)
+ }
+ }
+ }
+ // Verify that account balance accumulations are correct
+ for addr, txs := range pool.index {
+ spent := new(uint256.Int)
+ for _, tx := range txs {
+ spent.Add(spent, tx.costCap)
+ }
+ if !pool.spent[addr].Eq(spent) {
+ t.Errorf("addr %v expenditure mismatch: have %d, want %d", addr, pool.spent[addr], spent)
+ }
+ }
+ // Verify that pool storage size is correct
+ var stored uint64
+ for _, txs := range pool.index {
+ for _, tx := range txs {
+ stored += uint64(tx.size)
+ }
+ }
+ if pool.stored != stored {
+ t.Errorf("pool storage mismatch: have %d, want %d", pool.stored, stored)
+ }
+ // Verify the price heap internals
+ verifyHeapInternals(t, pool.evict)
+}
+
+// Tests that transactions can be loaded from disk on startup and that they are
+// correctly discarded if invalid.
+//
+// - 1. A transaction that cannot be decoded must be dropped
+// - 2. A transaction that cannot be recovered (bad signature) must be dropped
+// - 3. All transactions after a nonce gap must be dropped
+// - 4. All transactions after an underpriced one (including it) must be dropped
+func TestOpenDrops(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage)
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert a malformed transaction to verify that decoding errors (or format
+ // changes) are handled gracefully (case 1)
+ malformed, _ := store.Put([]byte("this is a badly encoded transaction"))
+
+ // Insert a transaction with a bad signature to verify that stale junk after
+ // potential hard-forks can get evicted (case 2)
+ tx := types.NewTx(&types.BlobTx{
+ ChainID: uint256.MustFromBig(testChainConfig.ChainID),
+ GasTipCap: new(uint256.Int),
+ GasFeeCap: new(uint256.Int),
+ Gas: 0,
+ Value: new(uint256.Int),
+ Data: nil,
+ BlobFeeCap: new(uint256.Int),
+ V: new(uint256.Int),
+ R: new(uint256.Int),
+ S: new(uint256.Int),
+ })
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ badsig, _ := store.Put(blob)
+
+ // Insert a sequence of transactions with a nonce gap in between to verify
+ // that anything gapped will get evicted (case 3)
+ var (
+ gapper, _ = crypto.GenerateKey()
+
+ valids = make(map[uint64]struct{})
+ gapped = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 3, 4, 6, 7} { // first gap at #2, another at #5
+ tx := makeTx(nonce, 1, 1, 1, gapper)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ if nonce < 2 {
+ valids[id] = struct{}{}
+ } else {
+ gapped[id] = struct{}{}
+ }
+ }
+ // Insert a sequence of transactions with a gapped starting nonce to verify
+ // that the entire set will get dropped.
+ var (
+ dangler, _ = crypto.GenerateKey()
+ dangling = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{1, 2, 3} { // first gap at #0, all set dangling
+ tx := makeTx(nonce, 1, 1, 1, dangler)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ dangling[id] = struct{}{}
+ }
+ // Insert a sequence of transactions with already passed nonces to veirfy
+ // that the entire set will get dropped.
+ var (
+ filler, _ = crypto.GenerateKey()
+ filled = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 2} { // account nonce at 3, all set filled
+ tx := makeTx(nonce, 1, 1, 1, filler)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ filled[id] = struct{}{}
+ }
+ // Insert a sequence of transactions with partially passed nonces to veirfy
+ // that the included part of the set will get dropped
+ var (
+ overlapper, _ = crypto.GenerateKey()
+ overlapped = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 2, 3} { // account nonce at 2, half filled
+ tx := makeTx(nonce, 1, 1, 1, overlapper)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ if nonce >= 2 {
+ valids[id] = struct{}{}
+ } else {
+ overlapped[id] = struct{}{}
+ }
+ }
+ // Insert a sequence of transactions with an underpriced first to verify that
+ // the entire set will get dropped (case 4).
+ var (
+ underpayer, _ = crypto.GenerateKey()
+ underpaid = make(map[uint64]struct{})
+ )
+ for i := 0; i < 5; i++ { // make #0 underpriced
+ var tx *types.Transaction
+ if i == 0 {
+ tx = makeTx(uint64(i), 0, 0, 0, underpayer)
+ } else {
+ tx = makeTx(uint64(i), 1, 1, 1, underpayer)
+ }
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ underpaid[id] = struct{}{}
+ }
+
+ // Insert a sequence of transactions with an underpriced in between to verify
+ // that it and anything newly gapped will get evicted (case 4).
+ var (
+ outpricer, _ = crypto.GenerateKey()
+ outpriced = make(map[uint64]struct{})
+ )
+ for i := 0; i < 5; i++ { // make #2 underpriced
+ var tx *types.Transaction
+ if i == 2 {
+ tx = makeTx(uint64(i), 0, 0, 0, outpricer)
+ } else {
+ tx = makeTx(uint64(i), 1, 1, 1, outpricer)
+ }
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ if i < 2 {
+ valids[id] = struct{}{}
+ } else {
+ outpriced[id] = struct{}{}
+ }
+ }
+ // Insert a sequence of transactions fully overdrafted to verify that the
+ // entire set will get invalidated.
+ var (
+ exceeder, _ = crypto.GenerateKey()
+ exceeded = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 2} { // nonce 0 overdrafts the account
+ var tx *types.Transaction
+ if nonce == 0 {
+ tx = makeTx(nonce, 1, 100, 1, exceeder)
+ } else {
+ tx = makeTx(nonce, 1, 1, 1, exceeder)
+ }
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ exceeded[id] = struct{}{}
+ }
+ // Insert a sequence of transactions partially overdrafted to verify that part
+ // of the set will get invalidated.
+ var (
+ overdrafter, _ = crypto.GenerateKey()
+ overdrafted = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 2} { // nonce 1 overdrafts the account
+ var tx *types.Transaction
+ if nonce == 1 {
+ tx = makeTx(nonce, 1, 100, 1, overdrafter)
+ } else {
+ tx = makeTx(nonce, 1, 1, 1, overdrafter)
+ }
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ if nonce < 1 {
+ valids[id] = struct{}{}
+ } else {
+ overdrafted[id] = struct{}{}
+ }
+ }
+ // Insert a sequence of transactions overflowing the account cap to verify
+ // that part of the set will get invalidated.
+ var (
+ overcapper, _ = crypto.GenerateKey()
+ overcapped = make(map[uint64]struct{})
+ )
+ for nonce := uint64(0); nonce < maxTxsPerAccount+3; nonce++ {
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: makeTx(nonce, 1, 1, 1, overcapper)})
+
+ id, _ := store.Put(blob)
+ if nonce < maxTxsPerAccount {
+ valids[id] = struct{}{}
+ } else {
+ overcapped[id] = struct{}{}
+ }
+ }
+ store.Close()
+
+ // Create a blob pool out of the pre-seeded data
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb.AddBalance(crypto.PubkeyToAddress(gapper.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(dangler.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(filler.PublicKey), big.NewInt(1000000))
+ statedb.SetNonce(crypto.PubkeyToAddress(filler.PublicKey), 3)
+ statedb.AddBalance(crypto.PubkeyToAddress(overlapper.PublicKey), big.NewInt(1000000))
+ statedb.SetNonce(crypto.PubkeyToAddress(overlapper.PublicKey), 2)
+ statedb.AddBalance(crypto.PubkeyToAddress(underpayer.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(outpricer.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(exceeder.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(overdrafter.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), big.NewInt(10000000))
+ statedb.Commit(0, true)
+
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(params.InitialBaseFee),
+ blobfee: uint256.NewInt(params.BlobTxMinDataGasprice),
+ statedb: statedb,
+ }
+ pool := New(Config{Datadir: storage}, chain)
+ if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+ defer pool.Close()
+
+ // Verify that the malformed (case 1), badly signed (case 2) and gapped (case
+ // 3) txs have been deleted from the pool
+ alive := make(map[uint64]struct{})
+ for _, txs := range pool.index {
+ for _, tx := range txs {
+ switch tx.id {
+ case malformed:
+ t.Errorf("malformed RLP transaction remained in storage")
+ case badsig:
+ t.Errorf("invalidly signed transaction remained in storage")
+ default:
+ if _, ok := dangling[tx.id]; ok {
+ t.Errorf("dangling transaction remained in storage: %d", tx.id)
+ } else if _, ok := filled[tx.id]; ok {
+ t.Errorf("filled transaction remained in storage: %d", tx.id)
+ } else if _, ok := overlapped[tx.id]; ok {
+ t.Errorf("overlapped transaction remained in storage: %d", tx.id)
+ } else if _, ok := gapped[tx.id]; ok {
+ t.Errorf("gapped transaction remained in storage: %d", tx.id)
+ } else if _, ok := underpaid[tx.id]; ok {
+ t.Errorf("underpaid transaction remained in storage: %d", tx.id)
+ } else if _, ok := outpriced[tx.id]; ok {
+ t.Errorf("outpriced transaction remained in storage: %d", tx.id)
+ } else if _, ok := exceeded[tx.id]; ok {
+ t.Errorf("fully overdrafted transaction remained in storage: %d", tx.id)
+ } else if _, ok := overdrafted[tx.id]; ok {
+ t.Errorf("partially overdrafted transaction remained in storage: %d", tx.id)
+ } else if _, ok := overcapped[tx.id]; ok {
+ t.Errorf("overcapped transaction remained in storage: %d", tx.id)
+ } else {
+ alive[tx.id] = struct{}{}
+ }
+ }
+ }
+ }
+ // Verify that the rest of the transactions remained alive
+ if len(alive) != len(valids) {
+ t.Errorf("valid transaction count mismatch: have %d, want %d", len(alive), len(valids))
+ }
+ for id := range alive {
+ if _, ok := valids[id]; !ok {
+ t.Errorf("extra transaction %d", id)
+ }
+ }
+ for id := range valids {
+ if _, ok := alive[id]; !ok {
+ t.Errorf("missing transaction %d", id)
+ }
+ }
+ // Verify all the calculated pool internals. Interestingly, this is **not**
+ // a duplication of the above checks, this actually validates the verifier
+ // using the above already hard coded checks.
+ //
+ // Do not remove this, nor alter the above to be generic.
+ verifyPoolInternals(t, pool)
+}
+
+// Tests that transactions loaded from disk are indexed corrently.
+//
+// - 1. Transactions must be groupped by sender, sorted by nonce
+// - 2. Eviction thresholds are calculated correctly for the sequences
+// - 3. Balance usage of an account is totals across all transactions
+func TestOpenIndex(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage)
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert a sequence of transactions with varying price points to check that
+ // the cumulative minimumw will be maintained.
+ var (
+ key, _ = crypto.GenerateKey()
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+
+ txExecTipCaps = []uint64{10, 25, 5, 7, 1, 100}
+ txExecFeeCaps = []uint64{100, 90, 200, 10, 80, 300}
+ txBlobFeeCaps = []uint64{55, 66, 77, 33, 22, 11}
+
+ //basefeeJumps = []float64{39.098, 38.204, 44.983, 19.549, 37.204, 48.426} // log 1.125 (exec fee cap)
+ //blobfeeJumps = []float64{34.023, 35.570, 36.879, 29.686, 26.243, 20.358} // log 1.125 (blob fee cap)
+
+ evictExecTipCaps = []uint64{10, 10, 5, 5, 1, 1}
+ evictExecFeeJumps = []float64{39.098, 38.204, 38.204, 19.549, 19.549, 19.549} // min(log 1.125 (exec fee cap))
+ evictBlobFeeJumps = []float64{34.023, 34.023, 34.023, 29.686, 26.243, 20.358} // min(log 1.125 (blob fee cap))
+
+ totalSpent = uint256.NewInt(21000*(100+90+200+10+80+300) + blobSize*(55+66+77+33+22+11) + 100*6) // 21000 gas x price + 128KB x blobprice + value
+ )
+ for _, i := range []int{5, 3, 4, 2, 0, 1} { // Randomize the tx insertion order to force sorting on load
+ tx := makeTx(uint64(i), txExecTipCaps[i], txExecFeeCaps[i], txBlobFeeCaps[i], key)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ store.Put(blob)
+ }
+ store.Close()
+
+ // Create a blob pool out of the pre-seeded data
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb.AddBalance(addr, big.NewInt(1_000_000_000))
+ statedb.Commit(0, true)
+
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(params.InitialBaseFee),
+ blobfee: uint256.NewInt(params.BlobTxMinDataGasprice),
+ statedb: statedb,
+ }
+ pool := New(Config{Datadir: storage}, chain)
+ if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+ defer pool.Close()
+
+ // Verify that the transactions have been sorted by nonce (case 1)
+ for i := 0; i < len(pool.index[addr]); i++ {
+ if pool.index[addr][i].nonce != uint64(i) {
+ t.Errorf("tx %d nonce mismatch: have %d, want %d", i, pool.index[addr][i].nonce, uint64(i))
+ }
+ }
+ // Verify that the cumulative fee minimums have been correctly calculated (case 2)
+ for i, cap := range evictExecTipCaps {
+ if !pool.index[addr][i].evictionExecTip.Eq(uint256.NewInt(cap)) {
+ t.Errorf("eviction tip cap %d mismatch: have %d, want %d", i, pool.index[addr][i].evictionExecTip, cap)
+ }
+ }
+ for i, jumps := range evictExecFeeJumps {
+ if math.Abs(pool.index[addr][i].evictionExecFeeJumps-jumps) > 0.001 {
+ t.Errorf("eviction fee cap jumps %d mismatch: have %f, want %f", i, pool.index[addr][i].evictionExecFeeJumps, jumps)
+ }
+ }
+ for i, jumps := range evictBlobFeeJumps {
+ if math.Abs(pool.index[addr][i].evictionBlobFeeJumps-jumps) > 0.001 {
+ t.Errorf("eviction blob fee cap jumps %d mismatch: have %f, want %f", i, pool.index[addr][i].evictionBlobFeeJumps, jumps)
+ }
+ }
+ // Verify that the balance usage has been correctly calculated (case 3)
+ if !pool.spent[addr].Eq(totalSpent) {
+ t.Errorf("expenditure mismatch: have %d, want %d", pool.spent[addr], totalSpent)
+ }
+ // Verify all the calculated pool internals. Interestingly, this is **not**
+ // a duplication of the above checks, this actually validates the verifier
+ // using the above already hard coded checks.
+ //
+ // Do not remove this, nor alter the above to be generic.
+ verifyPoolInternals(t, pool)
+}
+
+// Tests that after indexing all the loaded transactions from disk, a price heap
+// is correctly constructed based on the head basefee and blobfee.
+func TestOpenHeap(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage)
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert a few transactions from a few accounts. To remove randomness from
+ // the heap initialization, use a deterministic account/tx/priority ordering.
+ var (
+ key1, _ = crypto.GenerateKey()
+ key2, _ = crypto.GenerateKey()
+ key3, _ = crypto.GenerateKey()
+
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ addr2 = crypto.PubkeyToAddress(key2.PublicKey)
+ addr3 = crypto.PubkeyToAddress(key3.PublicKey)
+ )
+ if bytes.Compare(addr1[:], addr2[:]) > 0 {
+ key1, addr1, key2, addr2 = key2, addr2, key1, addr1
+ }
+ if bytes.Compare(addr1[:], addr3[:]) > 0 {
+ key1, addr1, key3, addr3 = key3, addr3, key1, addr1
+ }
+ if bytes.Compare(addr2[:], addr3[:]) > 0 {
+ key2, addr2, key3, addr3 = key3, addr3, key2, addr2
+ }
+ var (
+ tx1 = makeTx(0, 1, 1000, 90, key1)
+ tx2 = makeTx(0, 1, 800, 70, key2)
+ tx3 = makeTx(0, 1, 1500, 110, key3)
+
+ blob1, _ = rlp.EncodeToBytes(&blobTx{Tx: tx1})
+ blob2, _ = rlp.EncodeToBytes(&blobTx{Tx: tx2})
+ blob3, _ = rlp.EncodeToBytes(&blobTx{Tx: tx3})
+
+ heapOrder = []common.Address{addr2, addr1, addr3}
+ heapIndex = map[common.Address]int{addr2: 0, addr1: 1, addr3: 2}
+ )
+ store.Put(blob1)
+ store.Put(blob2)
+ store.Put(blob3)
+ store.Close()
+
+ // Create a blob pool out of the pre-seeded data
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb.AddBalance(addr1, big.NewInt(1_000_000_000))
+ statedb.AddBalance(addr2, big.NewInt(1_000_000_000))
+ statedb.AddBalance(addr3, big.NewInt(1_000_000_000))
+ statedb.Commit(0, true)
+
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(1050),
+ blobfee: uint256.NewInt(105),
+ statedb: statedb,
+ }
+ pool := New(Config{Datadir: storage}, chain)
+ if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+ defer pool.Close()
+
+ // Verify that the heap's internal state matches the expectations
+ for i, addr := range pool.evict.addrs {
+ if addr != heapOrder[i] {
+ t.Errorf("slot %d mismatch: have %v, want %v", i, addr, heapOrder[i])
+ }
+ }
+ for addr, i := range pool.evict.index {
+ if i != heapIndex[addr] {
+ t.Errorf("index for %v mismatch: have %d, want %d", addr, i, heapIndex[addr])
+ }
+ }
+ // Verify all the calculated pool internals. Interestingly, this is **not**
+ // a duplication of the above checks, this actually validates the verifier
+ // using the above already hard coded checks.
+ //
+ // Do not remove this, nor alter the above to be generic.
+ verifyPoolInternals(t, pool)
+}
+
+// Tests that after the pool's previous state is loaded back, any transactions
+// over the new storage cap will get dropped.
+func TestOpenCap(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage)
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert a few transactions from a few accounts
+ var (
+ key1, _ = crypto.GenerateKey()
+ key2, _ = crypto.GenerateKey()
+ key3, _ = crypto.GenerateKey()
+
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ addr2 = crypto.PubkeyToAddress(key2.PublicKey)
+ addr3 = crypto.PubkeyToAddress(key3.PublicKey)
+
+ tx1 = makeTx(0, 1, 1000, 100, key1)
+ tx2 = makeTx(0, 1, 800, 70, key2)
+ tx3 = makeTx(0, 1, 1500, 110, key3)
+
+ blob1, _ = rlp.EncodeToBytes(&blobTx{Tx: tx1, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
+ blob2, _ = rlp.EncodeToBytes(&blobTx{Tx: tx2, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
+ blob3, _ = rlp.EncodeToBytes(&blobTx{Tx: tx3, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
+
+ keep = []common.Address{addr1, addr3}
+ drop = []common.Address{addr2}
+ size = uint64(2 * (txAvgSize + blobSize))
+ )
+ store.Put(blob1)
+ store.Put(blob2)
+ store.Put(blob3)
+ store.Close()
+
+ // Verify pool capping twice: first by reducing the data cap, then restarting
+ // with a high cap to ensure everything was persisted previously
+ for _, datacap := range []uint64{2 * (txAvgSize + blobSize), 100 * (txAvgSize + blobSize)} {
+ // Create a blob pool out of the pre-seeded data, but cap it to 2 blob transaction
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb.AddBalance(addr1, big.NewInt(1_000_000_000))
+ statedb.AddBalance(addr2, big.NewInt(1_000_000_000))
+ statedb.AddBalance(addr3, big.NewInt(1_000_000_000))
+ statedb.Commit(0, true)
+
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(1050),
+ blobfee: uint256.NewInt(105),
+ statedb: statedb,
+ }
+ pool := New(Config{Datadir: storage, Datacap: datacap}, chain)
+ if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+ // Verify that enough transactions have been dropped to get the pool's size
+ // under the requested limit
+ if len(pool.index) != len(keep) {
+ t.Errorf("tracked account count mismatch: have %d, want %d", len(pool.index), len(keep))
+ }
+ for _, addr := range keep {
+ if _, ok := pool.index[addr]; !ok {
+ t.Errorf("expected account %v missing from pool", addr)
+ }
+ }
+ for _, addr := range drop {
+ if _, ok := pool.index[addr]; ok {
+ t.Errorf("unexpected account %v present in pool", addr)
+ }
+ }
+ if pool.stored != size {
+ t.Errorf("pool stored size mismatch: have %v, want %v", pool.stored, size)
+ }
+ // Verify all the calculated pool internals. Interestingly, this is **not**
+ // a duplication of the above checks, this actually validates the verifier
+ // using the above already hard coded checks.
+ //
+ // Do not remove this, nor alter the above to be generic.
+ verifyPoolInternals(t, pool)
+
+ pool.Close()
+ }
+}
+
+// Tests that adding transaction will correctly store it in the persistent store
+// and update all the indices.
+//
+// Note, this tests mostly checks the pool transaction shuffling logic or things
+// specific to the blob pool. It does not do an exhaustive transaction validity
+// check.
+func TestAdd(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // seed is a helper tumpe to seed an initial state db and pool
+ type seed struct {
+ balance uint64
+ nonce uint64
+ txs []*types.BlobTx
+ }
+
+ // addtx is a helper sender/tx tuple to represent a new tx addition
+ type addtx struct {
+ from string
+ tx *types.BlobTx
+ err error
+ }
+
+ tests := []struct {
+ seeds map[string]seed
+ adds []addtx
+ }{
+ // Transactions from new accounts should be accepted if their initial
+ // nonce matches the expected one from the statedb. Higher or lower must
+ // be rejected.
+ {
+ seeds: map[string]seed{
+ "alice": {balance: 21100 + blobSize},
+ "bob": {balance: 21100 + blobSize, nonce: 1},
+ "claire": {balance: 21100 + blobSize},
+ "dave": {balance: 21100 + blobSize, nonce: 1},
+ },
+ adds: []addtx{
+ { // New account, no previous txs: accept nonce 0
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 1, 1),
+ err: nil,
+ },
+ { // Old account, 1 tx in chain, 0 pending: accept nonce 1
+ from: "bob",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, no previous txs: reject nonce 1
+ from: "claire",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: core.ErrNonceTooHigh,
+ },
+ { // Old account, 1 tx in chain, 0 pending: reject nonce 0
+ from: "dave",
+ tx: makeUnsignedTx(0, 1, 1, 1),
+ err: core.ErrNonceTooLow,
+ },
+ { // Old account, 1 tx in chain, 0 pending: reject nonce 2
+ from: "dave",
+ tx: makeUnsignedTx(2, 1, 1, 1),
+ err: core.ErrNonceTooHigh,
+ },
+ },
+ },
+ // Transactions from already pooled accounts should only be accepted if
+ // the nonces are contiguous (ignore prices for now, will check later)
+ {
+ seeds: map[string]seed{
+ "alice": {
+ balance: 1000000,
+ txs: []*types.BlobTx{
+ makeUnsignedTx(0, 1, 1, 1),
+ },
+ },
+ "bob": {
+ balance: 1000000,
+ nonce: 1,
+ txs: []*types.BlobTx{
+ makeUnsignedTx(1, 1, 1, 1),
+ },
+ },
+ },
+ adds: []addtx{
+ { // New account, 1 tx pending: reject replacement nonce 0 (ignore price for now)
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 1, 1),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 1 tx pending: accept nonce 1
+ from: "alice",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 2 txs pending: reject nonce 3
+ from: "alice",
+ tx: makeUnsignedTx(3, 1, 1, 1),
+ err: core.ErrNonceTooHigh,
+ },
+ { // New account, 2 txs pending: accept nonce 2
+ from: "alice",
+ tx: makeUnsignedTx(2, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 3 txs pending: accept nonce 3 now
+ from: "alice",
+ tx: makeUnsignedTx(3, 1, 1, 1),
+ err: nil,
+ },
+ { // Old account, 1 tx in chain, 1 tx pending: reject replacement nonce 1 (ignore price for now)
+ from: "bob",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // Old account, 1 tx in chain, 1 tx pending: accept nonce 2 (ignore price for now)
+ from: "bob",
+ tx: makeUnsignedTx(2, 1, 1, 1),
+ err: nil,
+ },
+ },
+ },
+ // Transactions should only be accepted into the pool if the cumulative
+ // expenditure doesn't overflow the account balance
+ {
+ seeds: map[string]seed{
+ "alice": {balance: 63299 + 3*blobSize}, // 3 tx - 1 wei
+ },
+ adds: []addtx{
+ { // New account, no previous txs: accept nonce 0 with 21100 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 1 pooled tx with 21100 wei spent: accept nonce 1 with 21100 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 2 pooled tx with 42200 wei spent: reject nonce 2 with 21100 wei spend (1 wei overflow)
+ from: "alice",
+ tx: makeUnsignedTx(2, 1, 1, 1),
+ err: core.ErrInsufficientFunds,
+ },
+ },
+ },
+ // Transactions should only be accepted into the pool if the total count
+ // from the same account doesn't overflow the pool limits
+ {
+ seeds: map[string]seed{
+ "alice": {balance: 10000000},
+ },
+ adds: []addtx{
+ { // New account, no previous txs, 16 slots left: accept nonce 0
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 1 pooled tx, 15 slots left: accept nonce 1
+ from: "alice",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 2 pooled tx, 14 slots left: accept nonce 2
+ from: "alice",
+ tx: makeUnsignedTx(2, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 3 pooled tx, 13 slots left: accept nonce 3
+ from: "alice",
+ tx: makeUnsignedTx(3, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 4 pooled tx, 12 slots left: accept nonce 4
+ from: "alice",
+ tx: makeUnsignedTx(4, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 5 pooled tx, 11 slots left: accept nonce 5
+ from: "alice",
+ tx: makeUnsignedTx(5, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 6 pooled tx, 10 slots left: accept nonce 6
+ from: "alice",
+ tx: makeUnsignedTx(6, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 7 pooled tx, 9 slots left: accept nonce 7
+ from: "alice",
+ tx: makeUnsignedTx(7, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 8 pooled tx, 8 slots left: accept nonce 8
+ from: "alice",
+ tx: makeUnsignedTx(8, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 9 pooled tx, 7 slots left: accept nonce 9
+ from: "alice",
+ tx: makeUnsignedTx(9, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 10 pooled tx, 6 slots left: accept nonce 10
+ from: "alice",
+ tx: makeUnsignedTx(10, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 11 pooled tx, 5 slots left: accept nonce 11
+ from: "alice",
+ tx: makeUnsignedTx(11, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 12 pooled tx, 4 slots left: accept nonce 12
+ from: "alice",
+ tx: makeUnsignedTx(12, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 13 pooled tx, 3 slots left: accept nonce 13
+ from: "alice",
+ tx: makeUnsignedTx(13, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 14 pooled tx, 2 slots left: accept nonce 14
+ from: "alice",
+ tx: makeUnsignedTx(14, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 15 pooled tx, 1 slots left: accept nonce 15
+ from: "alice",
+ tx: makeUnsignedTx(15, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 16 pooled tx, 0 slots left: accept nonce 15 replacement
+ from: "alice",
+ tx: makeUnsignedTx(15, 10, 10, 10),
+ err: nil,
+ },
+ { // New account, 16 pooled tx, 0 slots left: reject nonce 16 with overcap
+ from: "alice",
+ tx: makeUnsignedTx(16, 1, 1, 1),
+ err: txpool.ErrAccountLimitExceeded,
+ },
+ },
+ },
+ // Previously existing transactions should be allowed to be replaced iff
+ // the new cumulative expenditure can be covered by the account and the
+ // prices are bumped all around (no percentage check here).
+ {
+ seeds: map[string]seed{
+ "alice": {balance: 2*100 + 5*21000 + 3*blobSize},
+ },
+ adds: []addtx{
+ { // New account, no previous txs: reject nonce 0 with 341172 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 20, 1),
+ err: core.ErrInsufficientFunds,
+ },
+ { // New account, no previous txs: accept nonce 0 with 173172 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 2, 1),
+ err: nil,
+ },
+ { // New account, 1 pooled tx with 173172 wei spent: accept nonce 1 with 152172 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with 599684 wei spend (173072 extra) (would overflow balance at nonce 1)
+ from: "alice",
+ tx: makeUnsignedTx(0, 2, 5, 2),
+ err: core.ErrInsufficientFunds,
+ },
+ { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-gastip-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 3, 2),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-gascap-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 2, 2, 2),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-blobcap-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 2, 4, 1),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 2 pooled tx with 325344 wei spent: accept nonce 0 with 84100 wei spend (42000 extra)
+ from: "alice",
+ tx: makeUnsignedTx(0, 2, 4, 2),
+ err: nil,
+ },
+ },
+ },
+ // Previously existing transactions should be allowed to be replaced iff
+ // the new prices are bumped by a sufficient amount.
+ {
+ seeds: map[string]seed{
+ "alice": {balance: 100 + 8*21000 + 4*blobSize},
+ },
+ adds: []addtx{
+ { // New account, no previous txs: accept nonce 0
+ from: "alice",
+ tx: makeUnsignedTx(0, 2, 4, 2),
+ err: nil,
+ },
+ { // New account, 1 pooled tx: reject nonce 0 with low-gastip-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 3, 8, 4),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 1 pooled tx: reject nonce 0 with low-gascap-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 4, 6, 4),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 1 pooled tx: reject nonce 0 with low-blobcap-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 4, 8, 3),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 1 pooled tx: accept nonce 0 with all-bumps
+ from: "alice",
+ tx: makeUnsignedTx(0, 4, 8, 4),
+ err: nil,
+ },
+ },
+ },
+ }
+ for i, tt := range tests {
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage) // late defer, still ok
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert the seed transactions for the pool startup
+ var (
+ keys = make(map[string]*ecdsa.PrivateKey)
+ addrs = make(map[string]common.Address)
+ )
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ for acc, seed := range tt.seeds {
+ // Generate a new random key/address for the seed account
+ keys[acc], _ = crypto.GenerateKey()
+ addrs[acc] = crypto.PubkeyToAddress(keys[acc].PublicKey)
+
+ // Seed the state database with this acocunt
+ statedb.AddBalance(addrs[acc], new(big.Int).SetUint64(seed.balance))
+ statedb.SetNonce(addrs[acc], seed.nonce)
+
+ // Sign the seed transactions and store them in the data store
+ for _, tx := range seed.txs {
+ var (
+ signed, _ = types.SignNewTx(keys[acc], types.LatestSigner(testChainConfig), tx)
+ blob, _ = rlp.EncodeToBytes(&blobTx{Tx: signed, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
+ )
+ store.Put(blob)
+ }
+ }
+ statedb.Commit(0, true)
+ store.Close()
+
+ // Create a blob pool out of the pre-seeded dats
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(1050),
+ blobfee: uint256.NewInt(105),
+ statedb: statedb,
+ }
+ pool := New(Config{Datadir: storage}, chain)
+ if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ t.Fatalf("test %d: failed to create blob pool: %v", i, err)
+ }
+ verifyPoolInternals(t, pool)
+
+ // Add each transaction one by one, verifying the pool internals in between
+ for j, add := range tt.adds {
+ signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(testChainConfig), add.tx)
+ if err := pool.add(signed, []kzg4844.Blob{emptyBlob}, []kzg4844.Commitment{emptyBlobCommit}, []kzg4844.Proof{emptyBlobProof}); !errors.Is(err, add.err) {
+ t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, err, add.err)
+ }
+ verifyPoolInternals(t, pool)
+ }
+ // Verify the pool internals and close down the test
+ verifyPoolInternals(t, pool)
+ pool.Close()
+ }
+}
diff --git a/core/txpool/blobpool/config.go b/core/txpool/blobpool/config.go
new file mode 100644
index 0000000000000..99a2002a303f8
--- /dev/null
+++ b/core/txpool/blobpool/config.go
@@ -0,0 +1,50 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// Config are the configuration parameters of the blob transaction pool.
+type Config struct {
+ Datadir string // Data directory containing the currently executable blobs
+ Datacap uint64 // Soft-cap of database storage (hard cap is larger due to overhead)
+ PriceBump uint64 // Minimum price bump percentage to replace an already existing nonce
+}
+
+// DefaultConfig contains the default configurations for the transaction pool.
+var DefaultConfig = Config{
+ Datadir: "blobpool",
+ Datacap: 10 * 1024 * 1024 * 1024,
+ PriceBump: 100, // either have patience or be aggressive, no mushy ground
+}
+
+// sanitize checks the provided user configurations and changes anything that's
+// unreasonable or unworkable.
+func (config *Config) sanitize() Config {
+ conf := *config
+ if conf.Datacap < 1 {
+ log.Warn("Sanitizing invalid blobpool storage cap", "provided", conf.Datacap, "updated", DefaultConfig.Datacap)
+ conf.Datacap = DefaultConfig.Datacap
+ }
+ if conf.PriceBump < 1 {
+ log.Warn("Sanitizing invalid blobpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump)
+ conf.PriceBump = DefaultConfig.PriceBump
+ }
+ return conf
+}
diff --git a/core/txpool/blobpool/evictheap.go b/core/txpool/blobpool/evictheap.go
new file mode 100644
index 0000000000000..7607a911c15bf
--- /dev/null
+++ b/core/txpool/blobpool/evictheap.go
@@ -0,0 +1,146 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "bytes"
+ "container/heap"
+ "math"
+ "sort"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/holiman/uint256"
+)
+
+// evictHeap is a helper data structure to keep track of the cheapest bottleneck
+// transaction from each account to determine which account to evict from.
+//
+// The heap internally tracks a slice of cheapest transactions from each account
+// and a mapping from addresses to indices for direct removals/udates.
+//
+// The goal of the heap is to decide which account has the worst bottleneck to
+// evict transactions from.
+type evictHeap struct {
+ metas *map[common.Address][]*blobTxMeta // Pointer to the blob pool's index for price retrievals
+
+ basefeeJumps float64 // Pre-calculated absolute dynamic fee jumps for the base fee
+ blobfeeJumps float64 // Pre-calculated absolute dynamic fee jumps for the blob fee
+
+ addrs []common.Address // Heap of addresses to retrieve the cheapest out of
+ index map[common.Address]int // Indices into the heap for replacements
+}
+
+// newPriceHeap creates a new heap of cheapets accounts in the blob pool to evict
+// from in case of over saturation.
+func newPriceHeap(basefee *uint256.Int, blobfee *uint256.Int, index *map[common.Address][]*blobTxMeta) *evictHeap {
+ heap := &evictHeap{
+ metas: index,
+ index: make(map[common.Address]int),
+ }
+ // Populate the heap in account sort order. Not really needed in practice,
+ // but it makes the heap initialization deterministic and less annoying to
+ // test in unit tests.
+ addrs := make([]common.Address, 0, len(*index))
+ for addr := range *index {
+ addrs = append(addrs, addr)
+ }
+ sort.Slice(addrs, func(i, j int) bool { return bytes.Compare(addrs[i][:], addrs[j][:]) < 0 })
+
+ for _, addr := range addrs {
+ heap.index[addr] = len(heap.addrs)
+ heap.addrs = append(heap.addrs, addr)
+ }
+ heap.reinit(basefee, blobfee, true)
+ return heap
+}
+
+// reinit updates the pre-calculated dynamic fee jumps in the price heap and runs
+// the sorting algorithm from scratch on the entire heap.
+func (h *evictHeap) reinit(basefee *uint256.Int, blobfee *uint256.Int, force bool) {
+ // If the update is mostly the same as the old, don't sort pointlessly
+ basefeeJumps := dynamicFeeJumps(basefee)
+ blobfeeJumps := dynamicFeeJumps(blobfee)
+
+ if !force && math.Abs(h.basefeeJumps-basefeeJumps) < 0.01 && math.Abs(h.blobfeeJumps-blobfeeJumps) < 0.01 { // TODO(karalabe): 0.01 enough, maybe should be smaller? Maybe this optimization is moot?
+ return
+ }
+ // One or both of the dynamic fees jumped, resort the pool
+ h.basefeeJumps = basefeeJumps
+ h.blobfeeJumps = blobfeeJumps
+
+ heap.Init(h)
+}
+
+// Len implements sort.Interface as part of heap.Interface, returning the number
+// of accounts in the pool which can be considered for eviction.
+func (h *evictHeap) Len() int {
+ return len(h.addrs)
+}
+
+// Less implements sort.Interface as part of heap.Interface, returning which of
+// the two requested accounts has a cheaper bottleneck.
+func (h *evictHeap) Less(i, j int) bool {
+ txsI := (*(h.metas))[h.addrs[i]]
+ txsJ := (*(h.metas))[h.addrs[j]]
+
+ lastI := txsI[len(txsI)-1]
+ lastJ := txsJ[len(txsJ)-1]
+
+ prioI := evictionPriority(h.basefeeJumps, lastI.evictionExecFeeJumps, h.blobfeeJumps, lastI.evictionBlobFeeJumps)
+ if prioI > 0 {
+ prioI = 0
+ }
+ prioJ := evictionPriority(h.basefeeJumps, lastJ.evictionExecFeeJumps, h.blobfeeJumps, lastJ.evictionBlobFeeJumps)
+ if prioJ > 0 {
+ prioJ = 0
+ }
+ if prioI == prioJ {
+ return lastI.evictionExecTip.Lt(lastJ.evictionExecTip)
+ }
+ return prioI < prioJ
+}
+
+// Swap implements sort.Interface as part of heap.Interface, maintaining both the
+// order of the accounts according to the heap, and the account->item slot mapping
+// for replacements.
+func (h *evictHeap) Swap(i, j int) {
+ h.index[h.addrs[i]], h.index[h.addrs[j]] = h.index[h.addrs[j]], h.index[h.addrs[i]]
+ h.addrs[i], h.addrs[j] = h.addrs[j], h.addrs[i]
+}
+
+// Push implements heap.Interface, appending an item to the end of the account
+// ordering as well as the address to item slot mapping.
+func (h *evictHeap) Push(x any) {
+ h.index[x.(common.Address)] = len(h.addrs)
+ h.addrs = append(h.addrs, x.(common.Address))
+}
+
+// Pop implements heap.Interface, removing and returning the last element of the
+// heap.
+//
+// Note, use `heap.Pop`, not `evictHeap.Pop`. This method is used by Go's heap,
+// to provide the functionality, it does not embed it.
+func (h *evictHeap) Pop() any {
+ // Remove the last element from the heap
+ size := len(h.addrs)
+ addr := h.addrs[size-1]
+ h.addrs = h.addrs[:size-1]
+
+ // Unindex the removed element and return
+ delete(h.index, addr)
+ return addr
+}
diff --git a/core/txpool/blobpool/evictheap_test.go b/core/txpool/blobpool/evictheap_test.go
new file mode 100644
index 0000000000000..01b136551cf2a
--- /dev/null
+++ b/core/txpool/blobpool/evictheap_test.go
@@ -0,0 +1,320 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "container/heap"
+ mrand "math/rand"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/holiman/uint256"
+)
+
+var rand = mrand.New(mrand.NewSource(1))
+
+// verifyHeapInternals verifies that all accounts present in the index are also
+// present in the heap and internals are consistent across various indices.
+func verifyHeapInternals(t *testing.T, evict *evictHeap) {
+ t.Helper()
+
+ // Ensure that all accounts are present in the heap and no extras
+ seen := make(map[common.Address]struct{})
+ for i, addr := range evict.addrs {
+ seen[addr] = struct{}{}
+ if _, ok := (*evict.metas)[addr]; !ok {
+ t.Errorf("heap contains unexpected address at slot %d: %v", i, addr)
+ }
+ }
+ for addr := range *evict.metas {
+ if _, ok := seen[addr]; !ok {
+ t.Errorf("heap is missing required address %v", addr)
+ }
+ }
+ if len(evict.addrs) != len(*evict.metas) {
+ t.Errorf("heap size %d mismatches metadata size %d", len(evict.addrs), len(*evict.metas))
+ }
+ // Ensure that all accounts are present in the heap order index and no extras
+ have := make([]common.Address, len(evict.index))
+ for addr, i := range evict.index {
+ have[i] = addr
+ }
+ if len(have) != len(evict.addrs) {
+ t.Errorf("heap index size %d mismatches heap size %d", len(have), len(evict.addrs))
+ }
+ for i := 0; i < len(have) && i < len(evict.addrs); i++ {
+ if have[i] != evict.addrs[i] {
+ t.Errorf("heap index for slot %d mismatches: have %v, want %v", i, have[i], evict.addrs[i])
+ }
+ }
+}
+
+// Tests that the price heap can correctly sort its set of transactions based on
+// an input base- and blob fee.
+func TestPriceHeapSorting(t *testing.T) {
+ tests := []struct {
+ execTips []uint64
+ execFees []uint64
+ blobFees []uint64
+
+ basefee uint64
+ blobfee uint64
+
+ order []int
+ }{
+ // If everything is above the basefee and blobfee, order by miner tip
+ {
+ execTips: []uint64{1, 0, 2},
+ execFees: []uint64{1, 2, 3},
+ blobFees: []uint64{3, 2, 1},
+ basefee: 0,
+ blobfee: 0,
+ order: []int{1, 0, 2},
+ },
+ // If only basefees are used (blob fee matches with network), return the
+ // ones the furthest below the current basefee, splitting same ones with
+ // the tip. Anything above the basefee should be split by tip.
+ {
+ execTips: []uint64{100, 50, 100, 50, 1, 2, 3},
+ execFees: []uint64{1000, 1000, 500, 500, 2000, 2000, 2000},
+ blobFees: []uint64{0, 0, 0, 0, 0, 0, 0},
+ basefee: 1999,
+ blobfee: 0,
+ order: []int{3, 2, 1, 0, 4, 5, 6},
+ },
+ // If only blobfees are used (base fee matches with network), return the
+ // ones the furthest below the current blobfee, splitting same ones with
+ // the tip. Anything above the blobfee should be split by tip.
+ {
+ execTips: []uint64{100, 50, 100, 50, 1, 2, 3},
+ execFees: []uint64{0, 0, 0, 0, 0, 0, 0},
+ blobFees: []uint64{1000, 1000, 500, 500, 2000, 2000, 2000},
+ basefee: 0,
+ blobfee: 1999,
+ order: []int{3, 2, 1, 0, 4, 5, 6},
+ },
+ // If both basefee and blobfee is specified, sort by the larger distance
+ // of the two from the current network conditions, splitting same (loglog)
+ // ones via the tip.
+ //
+ // Basefee: 1000
+ // Blobfee: 100
+ //
+ // Tx #0: (800, 80) - 2 jumps below both => priority -1
+ // Tx #1: (630, 63) - 4 jumps below both => priority -2
+ // Tx #2: (800, 63) - 2 jumps below basefee, 4 jumps below blobfee => priority -2 (blob penalty dominates)
+ // Tx #3: (630, 80) - 4 jumps below basefee, 2 jumps below blobfee => priority -2 (base penalty dominates)
+ //
+ // Txs 1, 2, 3 share the same priority, split via tip, prefer 0 as the best
+ {
+ execTips: []uint64{1, 2, 3, 4},
+ execFees: []uint64{800, 630, 800, 630},
+ blobFees: []uint64{80, 63, 63, 80},
+ basefee: 1000,
+ blobfee: 100,
+ order: []int{1, 2, 3, 0},
+ },
+ }
+ for i, tt := range tests {
+ // Create an index of the transactions
+ index := make(map[common.Address][]*blobTxMeta)
+ for j := byte(0); j < byte(len(tt.execTips)); j++ {
+ addr := common.Address{j}
+
+ var (
+ execTip = uint256.NewInt(tt.execTips[j])
+ execFee = uint256.NewInt(tt.execFees[j])
+ blobFee = uint256.NewInt(tt.blobFees[j])
+
+ basefeeJumps = dynamicFeeJumps(execFee)
+ blobfeeJumps = dynamicFeeJumps(blobFee)
+ )
+ index[addr] = []*blobTxMeta{{
+ id: uint64(j),
+ size: 128 * 1024,
+ nonce: 0,
+ execTipCap: execTip,
+ execFeeCap: execFee,
+ blobFeeCap: blobFee,
+ basefeeJumps: basefeeJumps,
+ blobfeeJumps: blobfeeJumps,
+ evictionExecTip: execTip,
+ evictionExecFeeJumps: basefeeJumps,
+ evictionBlobFeeJumps: blobfeeJumps,
+ }}
+ }
+ // Create a price heap and check the pop order
+ priceheap := newPriceHeap(uint256.NewInt(tt.basefee), uint256.NewInt(tt.blobfee), &index)
+ verifyHeapInternals(t, priceheap)
+
+ for j := 0; j < len(tt.order); j++ {
+ if next := heap.Pop(priceheap); int(next.(common.Address)[0]) != tt.order[j] {
+ t.Errorf("test %d, item %d: order mismatch: have %d, want %d", i, j, next.(common.Address)[0], tt.order[j])
+ } else {
+ delete(index, next.(common.Address)) // remove to simulate a correct pool for the test
+ }
+ verifyHeapInternals(t, priceheap)
+ }
+ }
+}
+
+// Benchmarks reheaping the entire set of accounts in the blob pool.
+func BenchmarkPriceHeapReinit1MB(b *testing.B) { benchmarkPriceHeapReinit(b, 1024*1024) }
+func BenchmarkPriceHeapReinit10MB(b *testing.B) { benchmarkPriceHeapReinit(b, 10*1024*1024) }
+func BenchmarkPriceHeapReinit100MB(b *testing.B) { benchmarkPriceHeapReinit(b, 100*1024*1024) }
+func BenchmarkPriceHeapReinit1GB(b *testing.B) { benchmarkPriceHeapReinit(b, 1024*1024*1024) }
+func BenchmarkPriceHeapReinit10GB(b *testing.B) { benchmarkPriceHeapReinit(b, 10*1024*1024*1024) }
+func BenchmarkPriceHeapReinit25GB(b *testing.B) { benchmarkPriceHeapReinit(b, 25*1024*1024*1024) }
+func BenchmarkPriceHeapReinit50GB(b *testing.B) { benchmarkPriceHeapReinit(b, 50*1024*1024*1024) }
+func BenchmarkPriceHeapReinit100GB(b *testing.B) { benchmarkPriceHeapReinit(b, 100*1024*1024*1024) }
+
+func benchmarkPriceHeapReinit(b *testing.B, datacap uint64) {
+ // Calculate how many unique transactions we can fit into the provided disk
+ // data cap
+ blobs := datacap / (params.BlobTxBytesPerFieldElement * params.BlobTxFieldElementsPerBlob)
+
+ // Create a random set of transactions with random fees. Use a separate account
+ // for each transaction to make it worse case.
+ index := make(map[common.Address][]*blobTxMeta)
+ for i := 0; i < int(blobs); i++ {
+ var addr common.Address
+ rand.Read(addr[:])
+
+ var (
+ execTip = uint256.NewInt(rand.Uint64())
+ execFee = uint256.NewInt(rand.Uint64())
+ blobFee = uint256.NewInt(rand.Uint64())
+
+ basefeeJumps = dynamicFeeJumps(execFee)
+ blobfeeJumps = dynamicFeeJumps(blobFee)
+ )
+ index[addr] = []*blobTxMeta{{
+ id: uint64(i),
+ size: 128 * 1024,
+ nonce: 0,
+ execTipCap: execTip,
+ execFeeCap: execFee,
+ blobFeeCap: blobFee,
+ basefeeJumps: basefeeJumps,
+ blobfeeJumps: blobfeeJumps,
+ evictionExecTip: execTip,
+ evictionExecFeeJumps: basefeeJumps,
+ evictionBlobFeeJumps: blobfeeJumps,
+ }}
+ }
+ // Create a price heap and reinit it over and over
+ heap := newPriceHeap(uint256.NewInt(rand.Uint64()), uint256.NewInt(rand.Uint64()), &index)
+
+ basefees := make([]*uint256.Int, b.N)
+ blobfees := make([]*uint256.Int, b.N)
+ for i := 0; i < b.N; i++ {
+ basefees[i] = uint256.NewInt(rand.Uint64())
+ blobfees[i] = uint256.NewInt(rand.Uint64())
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ heap.reinit(basefees[i], blobfees[i], true)
+ }
+}
+
+// Benchmarks overflowing the heap over and over (add and then drop).
+func BenchmarkPriceHeapOverflow1MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 1024*1024) }
+func BenchmarkPriceHeapOverflow10MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 10*1024*1024) }
+func BenchmarkPriceHeapOverflow100MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024) }
+func BenchmarkPriceHeapOverflow1GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 1024*1024*1024) }
+func BenchmarkPriceHeapOverflow10GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 10*1024*1024*1024) }
+func BenchmarkPriceHeapOverflow25GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 25*1024*1024*1024) }
+func BenchmarkPriceHeapOverflow50GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 50*1024*1024*1024) }
+func BenchmarkPriceHeapOverflow100GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024*1024) }
+
+func benchmarkPriceHeapOverflow(b *testing.B, datacap uint64) {
+ // Calculate how many unique transactions we can fit into the provided disk
+ // data cap
+ blobs := datacap / (params.BlobTxBytesPerFieldElement * params.BlobTxFieldElementsPerBlob)
+
+ // Create a random set of transactions with random fees. Use a separate account
+ // for each transaction to make it worse case.
+ index := make(map[common.Address][]*blobTxMeta)
+ for i := 0; i < int(blobs); i++ {
+ var addr common.Address
+ rand.Read(addr[:])
+
+ var (
+ execTip = uint256.NewInt(rand.Uint64())
+ execFee = uint256.NewInt(rand.Uint64())
+ blobFee = uint256.NewInt(rand.Uint64())
+
+ basefeeJumps = dynamicFeeJumps(execFee)
+ blobfeeJumps = dynamicFeeJumps(blobFee)
+ )
+ index[addr] = []*blobTxMeta{{
+ id: uint64(i),
+ size: 128 * 1024,
+ nonce: 0,
+ execTipCap: execTip,
+ execFeeCap: execFee,
+ blobFeeCap: blobFee,
+ basefeeJumps: basefeeJumps,
+ blobfeeJumps: blobfeeJumps,
+ evictionExecTip: execTip,
+ evictionExecFeeJumps: basefeeJumps,
+ evictionBlobFeeJumps: blobfeeJumps,
+ }}
+ }
+ // Create a price heap and overflow it over and over
+ evict := newPriceHeap(uint256.NewInt(rand.Uint64()), uint256.NewInt(rand.Uint64()), &index)
+ var (
+ addrs = make([]common.Address, b.N)
+ metas = make([]*blobTxMeta, b.N)
+ )
+ for i := 0; i < b.N; i++ {
+ rand.Read(addrs[i][:])
+
+ var (
+ execTip = uint256.NewInt(rand.Uint64())
+ execFee = uint256.NewInt(rand.Uint64())
+ blobFee = uint256.NewInt(rand.Uint64())
+
+ basefeeJumps = dynamicFeeJumps(execFee)
+ blobfeeJumps = dynamicFeeJumps(blobFee)
+ )
+ metas[i] = &blobTxMeta{
+ id: uint64(int(blobs) + i),
+ size: 128 * 1024,
+ nonce: 0,
+ execTipCap: execTip,
+ execFeeCap: execFee,
+ blobFeeCap: blobFee,
+ basefeeJumps: basefeeJumps,
+ blobfeeJumps: blobfeeJumps,
+ evictionExecTip: execTip,
+ evictionExecFeeJumps: basefeeJumps,
+ evictionBlobFeeJumps: blobfeeJumps,
+ }
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ index[addrs[i]] = []*blobTxMeta{metas[i]}
+ heap.Push(evict, addrs[i])
+
+ drop := heap.Pop(evict)
+ delete(index, drop.(common.Address))
+ }
+}
diff --git a/core/txpool/blobpool/interface.go b/core/txpool/blobpool/interface.go
new file mode 100644
index 0000000000000..6f296a54bd63d
--- /dev/null
+++ b/core/txpool/blobpool/interface.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+// BlockChain defines the minimal set of methods needed to back a blob pool with
+// a chain. Exists to allow mocking the live chain out of tests.
+type BlockChain interface {
+ // Config retrieves the chain's fork configuration.
+ Config() *params.ChainConfig
+
+ // CurrentBlock returns the current head of the chain.
+ CurrentBlock() *types.Header
+
+ // CurrentFinalBlock returns the current block below which blobs should not
+ // be maintained anymore for reorg purposes.
+ CurrentFinalBlock() *types.Header
+
+ // GetBlock retrieves a specific block, used during pool resets.
+ GetBlock(hash common.Hash, number uint64) *types.Block
+
+ // StateAt returns a state database for a given root hash (generally the head).
+ StateAt(root common.Hash) (*state.StateDB, error)
+}
diff --git a/core/txpool/blobpool/limbo.go b/core/txpool/blobpool/limbo.go
new file mode 100644
index 0000000000000..4cb5042c2bb5a
--- /dev/null
+++ b/core/txpool/blobpool/limbo.go
@@ -0,0 +1,258 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "errors"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/holiman/billy"
+)
+
+// limboBlob is a wrapper around an opaque blobset that also contains the tx hash
+// to which it belongs as well as the block number in which it was included for
+// finality eviction.
+type limboBlob struct {
+ Owner common.Hash // Owner transaction's hash to support resurrecting reorged txs
+ Block uint64 // Block in which the blob transaction was included
+
+ Blobs []kzg4844.Blob // The opaque blobs originally part of the transaction
+ Commits []kzg4844.Commitment // The commitments for the original blobs
+ Proofs []kzg4844.Proof // The proofs verifying the commitments
+}
+
+// limbo is a light, indexed database to temporarily store recently included
+// blobs until they are finalized. The purpose is to support small reorgs, which
+// would require pulling back up old blobs (which aren't part of the chain).
+//
+// TODO(karalabe): Currently updating the inclusion block of a blob needs a full db rewrite. Can we do without?
+type limbo struct {
+ store billy.Database // Persistent data store for limboed blobs
+
+ index map[common.Hash]uint64 // Mappings from tx hashes to datastore ids
+ groups map[uint64]map[uint64]common.Hash // Set of txs included in past blocks
+}
+
+// newLimbo opens and indexes a set of limboed blob transactions.
+func newLimbo(datadir string) (*limbo, error) {
+ l := &limbo{
+ index: make(map[common.Hash]uint64),
+ groups: make(map[uint64]map[uint64]common.Hash),
+ }
+ // Index all limboed blobs on disk and delete anything inprocessable
+ var fails []uint64
+ index := func(id uint64, size uint32, data []byte) {
+ if l.parseBlob(id, data) != nil {
+ fails = append(fails, id)
+ }
+ }
+ store, err := billy.Open(billy.Options{Path: datadir}, newSlotter(), index)
+ if err != nil {
+ return nil, err
+ }
+ l.store = store
+
+ if len(fails) > 0 {
+ log.Warn("Dropping invalidated limboed blobs", "ids", fails)
+ for _, id := range fails {
+ if err := l.store.Delete(id); err != nil {
+ l.Close()
+ return nil, err
+ }
+ }
+ }
+ return l, nil
+}
+
+// Close closes down the underlying persistent store.
+func (l *limbo) Close() error {
+ return l.store.Close()
+}
+
+// parseBlob is a callback method on limbo creation that gets called for each
+// limboed blob on disk to create the in-memory metadata index.
+func (l *limbo) parseBlob(id uint64, data []byte) error {
+ item := new(limboBlob)
+ if err := rlp.DecodeBytes(data, item); err != nil {
+ // This path is impossible unless the disk data representation changes
+ // across restarts. For that ever unprobable case, recover gracefully
+ // by ignoring this data entry.
+ log.Error("Failed to decode blob limbo entry", "id", id, "err", err)
+ return err
+ }
+ if _, ok := l.index[item.Owner]; ok {
+ // This path is impossible, unless due to a programming error a blob gets
+ // inserted into the limbo which was already part of if. Recover gracefully
+ // by ignoring this data entry.
+ log.Error("Dropping duplicate blob limbo entry", "owner", item.Owner, "id", id)
+ return errors.New("duplicate blob")
+ }
+ l.index[item.Owner] = id
+
+ if _, ok := l.groups[item.Block]; !ok {
+ l.groups[item.Block] = make(map[uint64]common.Hash)
+ }
+ l.groups[item.Block][id] = item.Owner
+
+ return nil
+}
+
+// finalize evicts all blobs belonging to a recently finalized block or older.
+func (l *limbo) finalize(final *types.Header) {
+ // Just in case there's no final block yet (network not yet merged, weird
+ // restart, sethead, etc), fail gracefully.
+ if final == nil {
+ log.Error("Nil finalized block cannot evict old blobs")
+ return
+ }
+ for block, ids := range l.groups {
+ if block > final.Number.Uint64() {
+ continue
+ }
+ for id, owner := range ids {
+ if err := l.store.Delete(id); err != nil {
+ log.Error("Failed to drop finalized blob", "block", block, "id", id, "err", err)
+ }
+ delete(l.index, owner)
+ }
+ delete(l.groups, block)
+ }
+}
+
+// push stores a new blob transaction into the limbo, waiting until finality for
+// it to be automatically evicted.
+func (l *limbo) push(tx common.Hash, block uint64, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error {
+ // If the blobs are already tracked by the limbo, consider it a programming
+ // error. There's not much to do against it, but be loud.
+ if _, ok := l.index[tx]; ok {
+ log.Error("Limbo cannot push already tracked blobs", "tx", tx)
+ return errors.New("already tracked blob transaction")
+ }
+ if err := l.setAndIndex(tx, block, blobs, commits, proofs); err != nil {
+ log.Error("Failed to set and index liboed blobs", "tx", tx, "err", err)
+ return err
+ }
+ return nil
+}
+
+// pull retrieves a previously pushed set of blobs back from the limbo, removing
+// it at the same time. This method should be used when a previously included blob
+// transaction gets reorged out.
+func (l *limbo) pull(tx common.Hash) ([]kzg4844.Blob, []kzg4844.Commitment, []kzg4844.Proof, error) {
+ // If the blobs are not tracked by the limbo, there's not much to do. This
+ // can happen for example if a blob transaction is mined without pushing it
+ // into the network first.
+ id, ok := l.index[tx]
+ if !ok {
+ log.Trace("Limbo cannot pull non-tracked blobs", "tx", tx)
+ return nil, nil, nil, errors.New("unseen blob transaction")
+ }
+ item, err := l.getAndDrop(id)
+ if err != nil {
+ log.Error("Failed to get and drop limboed blobs", "tx", tx, "id", id, "err", err)
+ return nil, nil, nil, err
+ }
+ return item.Blobs, item.Commits, item.Proofs, nil
+}
+
+// update changes the block number under which a blob transaction is tracked. This
+// method should be used when a reorg changes a transaction's inclusion block.
+//
+// The method may log errors for various unexpcted scenarios but will not return
+// any of it since there's no clear error case. Some errors may be due to coding
+// issues, others caused by signers mining MEV stuff or swapping transactions. In
+// all cases, the pool needs to continue operating.
+func (l *limbo) update(tx common.Hash, block uint64) {
+ // If the blobs are not tracked by the limbo, there's not much to do. This
+ // can happen for example if a blob transaction is mined without pushing it
+ // into the network first.
+ id, ok := l.index[tx]
+ if !ok {
+ log.Trace("Limbo cannot update non-tracked blobs", "tx", tx)
+ return
+ }
+ // If there was no change in the blob's inclusion block, don't mess around
+ // with heavy database operations.
+ if _, ok := l.groups[block][id]; ok {
+ log.Trace("Blob transaction unchanged in limbo", "tx", tx, "block", block)
+ return
+ }
+ // Retrieve the old blobs from the data store and write tehm back with a new
+ // block number. IF anything fails, there's not much to do, go on.
+ item, err := l.getAndDrop(id)
+ if err != nil {
+ log.Error("Failed to get and drop limboed blobs", "tx", tx, "id", id, "err", err)
+ return
+ }
+ if err := l.setAndIndex(tx, block, item.Blobs, item.Commits, item.Proofs); err != nil {
+ log.Error("Failed to set and index limboed blobs", "tx", tx, "err", err)
+ return
+ }
+ log.Trace("Blob transaction updated in limbo", "tx", tx, "old-block", item.Block, "new-block", block)
+}
+
+// getAndDrop retrieves a blob item from the limbo store and deletes it both from
+// the store and indices.
+func (l *limbo) getAndDrop(id uint64) (*limboBlob, error) {
+ data, err := l.store.Get(id)
+ if err != nil {
+ return nil, err
+ }
+ item := new(limboBlob)
+ if err = rlp.DecodeBytes(data, item); err != nil {
+ return nil, err
+ }
+ delete(l.index, item.Owner)
+ delete(l.groups[item.Block], id)
+ if len(l.groups[item.Block]) == 0 {
+ delete(l.groups, item.Block)
+ }
+ if err := l.store.Delete(id); err != nil {
+ return nil, err
+ }
+ return item, nil
+}
+
+// setAndIndex assembles a limbo blob database entry and stores it, also updating
+// the in-memory indices.
+func (l *limbo) setAndIndex(tx common.Hash, block uint64, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error {
+ item := &limboBlob{
+ Owner: tx,
+ Block: block,
+ Blobs: blobs,
+ Commits: commits,
+ Proofs: proofs,
+ }
+ data, err := rlp.EncodeToBytes(item)
+ if err != nil {
+ panic(err) // cannot happen runtime, dev error
+ }
+ id, err := l.store.Put(data)
+ if err != nil {
+ return err
+ }
+ l.index[tx] = id
+ if _, ok := l.groups[block]; !ok {
+ l.groups[block] = make(map[uint64]common.Hash)
+ }
+ l.groups[block][id] = tx
+ return nil
+}
diff --git a/core/txpool/blobpool/metrics.go b/core/txpool/blobpool/metrics.go
new file mode 100644
index 0000000000000..280913b3a9166
--- /dev/null
+++ b/core/txpool/blobpool/metrics.go
@@ -0,0 +1,78 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import "github.com/ethereum/go-ethereum/metrics"
+
+var (
+ // datacapGauge tracks the user's configured capacity for the blob pool. It
+ // is mostly a way to expose/debug issues.
+ datacapGauge = metrics.NewRegisteredGauge("blobpool/datacap", nil)
+
+ // The below metrics track the per-datastore metrics for the primary blob
+ // store and the temporary limbo store.
+ datausedGauge = metrics.NewRegisteredGauge("blobpool/dataused", nil)
+ datarealGauge = metrics.NewRegisteredGauge("blobpool/datareal", nil)
+ slotusedGauge = metrics.NewRegisteredGauge("blobpool/slotused", nil)
+
+ limboDatausedGauge = metrics.NewRegisteredGauge("blobpool/limbo/dataused", nil)
+ limboDatarealGauge = metrics.NewRegisteredGauge("blobpool/limbo/datareal", nil)
+ limboSlotusedGauge = metrics.NewRegisteredGauge("blobpool/limbo/slotused", nil)
+
+ // The below metrics track the per-shelf metrics for the primary blob store
+ // and the temporary limbo store.
+ shelfDatausedGaugeName = "blobpool/shelf-%d/dataused"
+ shelfDatagapsGaugeName = "blobpool/shelf-%d/datagaps"
+ shelfSlotusedGaugeName = "blobpool/shelf-%d/slotused"
+ shelfSlotgapsGaugeName = "blobpool/shelf-%d/slotgaps"
+
+ limboShelfDatausedGaugeName = "blobpool/limbo/shelf-%d/dataused"
+ limboShelfDatagapsGaugeName = "blobpool/limbo/shelf-%d/datagaps"
+ limboShelfSlotusedGaugeName = "blobpool/limbo/shelf-%d/slotused"
+ limboShelfSlotgapsGaugeName = "blobpool/limbo/shelf-%d/slotgaps"
+
+ // The oversized metrics aggregate the shelf stats above the max blob count
+ // limits to track transactions that are just huge, but don't contain blobs.
+ //
+ // There are no oversized data in the limbo, it only contains blobs and some
+ // constant metadata.
+ oversizedDatausedGauge = metrics.NewRegisteredGauge("blobpool/oversized/dataused", nil)
+ oversizedDatagapsGauge = metrics.NewRegisteredGauge("blobpool/oversized/datagaps", nil)
+ oversizedSlotusedGauge = metrics.NewRegisteredGauge("blobpool/oversized/slotused", nil)
+ oversizedSlotgapsGauge = metrics.NewRegisteredGauge("blobpool/oversized/slotgaps", nil)
+
+ // basefeeGauge and blobfeeGauge track the current network 1559 base fee and
+ // 4844 blob fee respectively.
+ basefeeGauge = metrics.NewRegisteredGauge("blobpool/basefee", nil)
+ blobfeeGauge = metrics.NewRegisteredGauge("blobpool/blobfee", nil)
+
+ // pooltipGauge is the configurable miner tip to permit a transaction into
+ // the pool.
+ pooltipGauge = metrics.NewRegisteredGauge("blobpool/pooltip", nil)
+
+ // addwait/time, resetwait/time and getwait/time track the rough health of
+ // the pool and wether or not it's capable of keeping up with the load from
+ // the network.
+ addwaitHist = metrics.NewRegisteredHistogram("blobpool/addwait", nil, metrics.NewExpDecaySample(1028, 0.015))
+ addtimeHist = metrics.NewRegisteredHistogram("blobpool/addtime", nil, metrics.NewExpDecaySample(1028, 0.015))
+ getwaitHist = metrics.NewRegisteredHistogram("blobpool/getwait", nil, metrics.NewExpDecaySample(1028, 0.015))
+ gettimeHist = metrics.NewRegisteredHistogram("blobpool/gettime", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pendwaitHist = metrics.NewRegisteredHistogram("blobpool/pendwait", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pendtimeHist = metrics.NewRegisteredHistogram("blobpool/pendtime", nil, metrics.NewExpDecaySample(1028, 0.015))
+ resetwaitHist = metrics.NewRegisteredHistogram("blobpool/resetwait", nil, metrics.NewExpDecaySample(1028, 0.015))
+ resettimeHist = metrics.NewRegisteredHistogram("blobpool/resettime", nil, metrics.NewExpDecaySample(1028, 0.015))
+)
diff --git a/core/txpool/blobpool/priority.go b/core/txpool/blobpool/priority.go
new file mode 100644
index 0000000000000..18e545c2a8761
--- /dev/null
+++ b/core/txpool/blobpool/priority.go
@@ -0,0 +1,90 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "math"
+ "math/bits"
+
+ "github.com/holiman/uint256"
+)
+
+// log2_1_125 is used in the eviction priority calculation.
+var log2_1_125 = math.Log2(1.125)
+
+// evictionPriority calculates the eviction priority based on the algorithm
+// described in the BlobPool docs for a both fee components.
+//
+// This method takes about 8ns on a very recent laptop CPU, recalculating about
+// 125 million transaction priority values per second.
+func evictionPriority(basefeeJumps float64, txBasefeeJumps, blobfeeJumps, txBlobfeeJumps float64) int {
+ var (
+ basefeePriority = evictionPriority1D(basefeeJumps, txBasefeeJumps)
+ blobfeePriority = evictionPriority1D(blobfeeJumps, txBlobfeeJumps)
+ )
+ if basefeePriority < blobfeePriority {
+ return basefeePriority
+ }
+ return blobfeePriority
+}
+
+// evictionPriority1D calculates the eviction priority based on the algorithm
+// described in the BlobPool docs for a single fee component.
+func evictionPriority1D(basefeeJumps float64, txfeeJumps float64) int {
+ jumps := txfeeJumps - basefeeJumps
+ if int(jumps) == 0 {
+ return 0 // can't log2 0
+ }
+ if jumps < 0 {
+ return -intLog2(uint(-math.Floor(jumps)))
+ }
+ return intLog2(uint(math.Ceil(jumps)))
+}
+
+// dynamicFeeJumps calculates the log1.125(fee), namely the number of fee jumps
+// needed to reach the requested one. We only use it when calculating the jumps
+// between 2 fees, so it doesn't matter from what exact number with returns.
+// it returns the result from (0, 1, 1.125).
+//
+// This method is very expensive, taking about 75ns on a very recent laptop CPU,
+// but the result does not change with the lifetime of a transaction, so it can
+// be cached.
+func dynamicFeeJumps(fee *uint256.Int) float64 {
+ if fee.IsZero() {
+ return 0 // can't log2 zero, should never happen outside tests, but don't choke
+ }
+ return math.Log2(fee.Float64()) / log2_1_125
+}
+
+// intLog2 is a helper to calculate the integral part of a log2 of an unsigned
+// integer. It is a very specific calculation that's not particularly useful in
+// general, but it's what we need here (it's fast).
+func intLog2(n uint) int {
+ switch {
+ case n == 0:
+ panic("log2(0) is undefined")
+
+ case n < 2048:
+ return bits.UintSize - bits.LeadingZeros(n) - 1
+
+ default:
+ // The input is log1.125(uint256) = log2(uint256) / log2(1.125). At the
+ // most extreme, log2(uint256) will be a bit below 257, and the constant
+ // log2(1.125) ~= 0.17. The larges input thus is ~257 / ~0.17 ~= ~1511.
+ panic("dynamic fee jump diffs cannot reach this")
+ }
+}
diff --git a/core/txpool/blobpool/priority_test.go b/core/txpool/blobpool/priority_test.go
new file mode 100644
index 0000000000000..4aad919925f56
--- /dev/null
+++ b/core/txpool/blobpool/priority_test.go
@@ -0,0 +1,87 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "testing"
+
+ "github.com/holiman/uint256"
+)
+
+// Tests that the priority fees are calculated correctly as the log2 of the fee
+// jumps needed to go from the base fee to the tx's fee cap.
+func TestPriorityCalculation(t *testing.T) {
+ tests := []struct {
+ basefee uint64
+ txfee uint64
+ result int
+ }{
+ {basefee: 7, txfee: 10, result: 2}, // 3.02 jumps, 4 ceil, 2 log2
+ {basefee: 17_200_000_000, txfee: 17_200_000_000, result: 0}, // 0 jumps, special case 0 log2
+ {basefee: 9_853_941_692, txfee: 11_085_092_510, result: 0}, // 0.99 jumps, 1 ceil, 0 log2
+ {basefee: 11_544_106_391, txfee: 10_356_781_100, result: 0}, // -0.92 jumps, -1 floor, 0 log2
+ {basefee: 17_200_000_000, txfee: 7, result: -7}, // -183.57 jumps, -184 floor, -7 log2
+ {basefee: 7, txfee: 17_200_000_000, result: 7}, // 183.57 jumps, 184 ceil, 7 log2
+ }
+ for i, tt := range tests {
+ var (
+ baseJumps = dynamicFeeJumps(uint256.NewInt(tt.basefee))
+ feeJumps = dynamicFeeJumps(uint256.NewInt(tt.txfee))
+ )
+ if prio := evictionPriority1D(baseJumps, feeJumps); prio != tt.result {
+ t.Errorf("test %d priority mismatch: have %d, want %d", i, prio, tt.result)
+ }
+ }
+}
+
+// Benchmarks how many dynamic fee jump values can be done.
+func BenchmarkDynamicFeeJumpCalculation(b *testing.B) {
+ fees := make([]*uint256.Int, b.N)
+ for i := 0; i < b.N; i++ {
+ fees[i] = uint256.NewInt(rand.Uint64())
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ dynamicFeeJumps(fees[i])
+ }
+}
+
+// Benchmarks how many priority recalculations can be done.
+func BenchmarkPriorityCalculation(b *testing.B) {
+ // The basefee and blob fee is constant for all transactions across a block,
+ // so we can assume theit absolute jump counts can be pre-computed.
+ basefee := uint256.NewInt(17_200_000_000) // 17.2 Gwei is the 22.03.2023 zero-emission basefee, random number
+ blobfee := uint256.NewInt(123_456_789_000) // Completely random, no idea what this will be
+
+ basefeeJumps := dynamicFeeJumps(basefee)
+ blobfeeJumps := dynamicFeeJumps(blobfee)
+
+ // The transaction's fee cap and blob fee cap are constant across the life
+ // of the transaction, so we can pre-calculate and cache them.
+ txBasefeeJumps := make([]float64, b.N)
+ txBlobfeeJumps := make([]float64, b.N)
+ for i := 0; i < b.N; i++ {
+ txBasefeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rand.Uint64()))
+ txBlobfeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rand.Uint64()))
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ evictionPriority(basefeeJumps, txBasefeeJumps[i], blobfeeJumps, txBlobfeeJumps[i])
+ }
+}
diff --git a/core/txpool/blobpool/slotter.go b/core/txpool/blobpool/slotter.go
new file mode 100644
index 0000000000000..35349c3445cf5
--- /dev/null
+++ b/core/txpool/blobpool/slotter.go
@@ -0,0 +1,38 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+// newSlotter creates a helper method for the Billy datastore that returns the
+// individual shelf sizes used to store transactions in.
+//
+// The slotter will create shelves for each possible blob count + some tx metadata
+// wiggle room, up to the max permitted limits.
+//
+// The slotter also creates a shelf for 0-blob transactions. Whilst those are not
+// allowed in the current protocol, having an empty shelf is not a relevant use
+// of resources, but it makes stress testing with junk transactions simpler.
+func newSlotter() func() (uint32, bool) {
+ slotsize := uint32(txAvgSize)
+ slotsize -= uint32(blobSize) // underflows, it's ok, will overflow back in the first return
+
+ return func() (size uint32, done bool) {
+ slotsize += blobSize
+ finished := slotsize > maxBlobsPerTransaction*blobSize+txMaxSize
+
+ return slotsize, finished
+ }
+}
diff --git a/core/txpool/blobpool/slotter_test.go b/core/txpool/blobpool/slotter_test.go
new file mode 100644
index 0000000000000..2751a1872541f
--- /dev/null
+++ b/core/txpool/blobpool/slotter_test.go
@@ -0,0 +1,58 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import "testing"
+
+// Tests that the slotter creates the expected database shelves.
+func TestNewSlotter(t *testing.T) {
+ // Generate the database shelve sizes
+ slotter := newSlotter()
+
+ var shelves []uint32
+ for {
+ shelf, done := slotter()
+ shelves = append(shelves, shelf)
+ if done {
+ break
+ }
+ }
+ // Compare the database shelves to the expected ones
+ want := []uint32{
+ 0*blobSize + txAvgSize, // 0 blob + some expected tx infos
+ 1*blobSize + txAvgSize, // 1 blob + some expected tx infos
+ 2*blobSize + txAvgSize, // 2 blob + some expected tx infos (could be fewer blobs and more tx data)
+ 3*blobSize + txAvgSize, // 3 blob + some expected tx infos (could be fewer blobs and more tx data)
+ 4*blobSize + txAvgSize, // 4 blob + some expected tx infos (could be fewer blobs and more tx data)
+ 5*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 6*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 7*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 8*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 9*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 10*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 11*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 12*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos >= 4 blobs + max tx metadata size
+ }
+ if len(shelves) != len(want) {
+ t.Errorf("shelves count mismatch: have %d, want %d", len(shelves), len(want))
+ }
+ for i := 0; i < len(shelves) && i < len(want); i++ {
+ if shelves[i] != want[i] {
+ t.Errorf("shelf %d mismatch: have %d, want %d", i, shelves[i], want[i])
+ }
+ }
+}
diff --git a/core/txpool/errors.go b/core/txpool/errors.go
index b8c1c914a347d..bc26550f78ca2 100644
--- a/core/txpool/errors.go
+++ b/core/txpool/errors.go
@@ -34,6 +34,10 @@ var (
// with a different one without the required price bump.
ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
+ // ErrAccountLimitExceeded is returned if a transaction would exceed the number
+ // allowed by a pool for a single account.
+ ErrAccountLimitExceeded = errors.New("account limit exceeded")
+
// ErrGasLimit is returned if a transaction's requested gas limit exceeds the
// maximum allowance of the current block.
ErrGasLimit = errors.New("exceeds block gas limit")
diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go
index 5b2f4eea09fac..782f0facb5195 100644
--- a/core/txpool/legacypool/legacypool.go
+++ b/core/txpool/legacypool/legacypool.go
@@ -219,6 +219,7 @@ type LegacyPool struct {
locals *accountSet // Set of local transaction to exempt from eviction rules
journal *journal // Journal of local transaction to back up to disk
+ reserve txpool.AddressReserver // Address reserver to ensure exclusivity across subpools
pending map[common.Address]*list // All currently processable transactions
queue map[common.Address]*list // Queued but non-processable transactions
beats map[common.Address]time.Time // Last heartbeat from each known account
@@ -291,7 +292,10 @@ func (pool *LegacyPool) Filter(tx *types.Transaction) bool {
// head to allow balance / nonce checks. The transaction journal will be loaded
// from disk and filtered based on the provided starting settings. The internal
// goroutines will be spun up and the pool deemed operational afterwards.
-func (pool *LegacyPool) Init(gasTip *big.Int, head *types.Header) error {
+func (pool *LegacyPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error {
+ // Set the address reserver to request exclusive access to pooled accounts
+ pool.reserve = reserve
+
// Set the basic pool parameters
pool.gasTip.Store(gasTip)
pool.reset(nil, head)
@@ -365,7 +369,7 @@ func (pool *LegacyPool) loop() {
if time.Since(pool.beats[addr]) > pool.config.Lifetime {
list := pool.queue[addr].Flatten()
for _, tx := range list {
- pool.removeTx(tx.Hash(), true)
+ pool.removeTx(tx.Hash(), true, true)
}
queuedEvictionMeter.Mark(int64(len(list)))
}
@@ -428,7 +432,7 @@ func (pool *LegacyPool) SetGasTip(tip *big.Int) {
// pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
drop := pool.all.RemotesBelowTip(tip)
for _, tx := range drop {
- pool.removeTx(tx.Hash(), false)
+ pool.removeTx(tx.Hash(), false, true)
}
pool.priced.Removed(len(drop))
}
@@ -508,11 +512,11 @@ func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction,
// The enforceTips parameter can be used to do an extra filtering on the pending
// transactions and only return those whose **effective** tip is large enough in
// the next pending execution environment.
-func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*types.Transaction {
+func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction {
pool.mu.Lock()
defer pool.mu.Unlock()
- pending := make(map[common.Address][]*types.Transaction, len(pool.pending))
+ pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending))
for addr, list := range pool.pending {
txs := list.Flatten()
@@ -526,7 +530,18 @@ func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*types.Tr
}
}
if len(txs) > 0 {
- pending[addr] = txs
+ lazies := make([]*txpool.LazyTransaction, len(txs))
+ for i := 0; i < len(txs); i++ {
+ lazies[i] = &txpool.LazyTransaction{
+ Pool: pool,
+ Hash: txs[i].Hash(),
+ Tx: &txpool.Transaction{Tx: txs[i]},
+ Time: txs[i].Time(),
+ GasFeeCap: txs[i].GasFeeCap(),
+ GasTipCap: txs[i].GasTipCap(),
+ }
+ }
+ pending[addr] = lazies
}
}
return pending
@@ -586,6 +601,16 @@ func (pool *LegacyPool) validateTx(tx *types.Transaction, local bool) error {
State: pool.currentState,
FirstNonceGap: nil, // Pool allows arbitrary arrival order, don't invalidate nonce gaps
+ UsedAndLeftSlots: func(addr common.Address) (int, int) {
+ var have int
+ if list := pool.pending[addr]; list != nil {
+ have += list.Len()
+ }
+ if list := pool.queue[addr]; list != nil {
+ have += list.Len()
+ }
+ return have, math.MaxInt
+ },
ExistingExpenditure: func(addr common.Address) *big.Int {
if list := pool.pending[addr]; list != nil {
return list.totalcost
@@ -632,10 +657,31 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
invalidTxMeter.Mark(1)
return false, err
}
-
// already validated by this point
from, _ := types.Sender(pool.signer, tx)
+ // If the address is not yet known, request exclusivity to track the account
+ // only by this subpool until all transactions are evicted
+ var (
+ _, hasPending = pool.pending[from]
+ _, hasQueued = pool.queue[from]
+ )
+ if !hasPending && !hasQueued {
+ if err := pool.reserve(from, true); err != nil {
+ return false, err
+ }
+ defer func() {
+ // If the transaction is rejected by some post-validation check, remove
+ // the lock on the reservation set.
+ //
+ // Note, `err` here is the named error return, which will be initialized
+ // by a return statement before running deferred methods. Take care with
+ // removing or subscoping err as it will break this clause.
+ if err != nil {
+ pool.reserve(from, false)
+ }
+ }()
+ }
// If the transaction pool is full, discard underpriced transactions
if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue {
// If the new transaction is underpriced, don't accept it
@@ -690,7 +736,10 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
for _, tx := range drop {
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
underpricedTxMeter.Mark(1)
- dropped := pool.removeTx(tx.Hash(), false)
+
+ sender, _ := types.Sender(pool.signer, tx)
+ dropped := pool.removeTx(tx.Hash(), false, sender != from) // Don't unreserve the sender of the tx being added if last from the acc
+
pool.changesSinceReorg += dropped
}
}
@@ -1014,8 +1063,14 @@ func (pool *LegacyPool) Has(hash common.Hash) bool {
// removeTx removes a single transaction from the queue, moving all subsequent
// transactions back to the future queue.
+//
+// In unreserve is false, the account will not be relinquished to the main txpool
+// even if there are no more references to it. This is used to handle a race when
+// a tx being added, and it evicts a previously scheduled tx from the same account,
+// which could lead to a premature release of the lock.
+//
// Returns the number of transactions removed from the pending queue.
-func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool) int {
+func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bool) int {
// Fetch the transaction we wish to delete
tx := pool.all.Get(hash)
if tx == nil {
@@ -1023,6 +1078,20 @@ func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool) int {
}
addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
+ // If after deletion there are no more transactions belonging to this account,
+ // relinquish the address reservation. It's a bit convoluted do this, via a
+ // defer, but it's safer vs. the many return pathways.
+ if unreserve {
+ defer func() {
+ var (
+ _, hasPending = pool.pending[addr]
+ _, hasQueued = pool.queue[addr]
+ )
+ if !hasPending && !hasQueued {
+ pool.reserve(addr, false)
+ }
+ }()
+ }
// Remove it from the list of known transactions
pool.all.Remove(hash)
if outofbound {
@@ -1273,7 +1342,7 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
// there's nothing to add
if newNum >= oldNum {
// If we reorged to a same or higher number, then it's not a case of setHead
- log.Warn("Transaction pool reset with missing oldhead",
+ log.Warn("Transaction pool reset with missing old head",
"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
return
}
@@ -1287,7 +1356,7 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
// the firing of newhead-event and _now_: most likely a
// reorg caused by sync-reversion or explicit sethead back to an
// earlier block.
- log.Warn("New head missing in txpool reset", "number", newHead.Number, "hash", newHead.Hash())
+ log.Warn("Transaction pool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash())
return
}
var discarded, included types.Transactions
@@ -1317,7 +1386,13 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
return
}
}
- reinject = types.TxDifference(discarded, included)
+ lost := make([]*types.Transaction, 0, len(discarded))
+ for _, tx := range types.TxDifference(discarded, included) {
+ if pool.Filter(tx) {
+ lost = append(lost, tx)
+ }
+ }
+ reinject = lost
}
}
}
@@ -1402,6 +1477,9 @@ func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.T
if list.Empty() {
delete(pool.queue, addr)
delete(pool.beats, addr)
+ if _, ok := pool.pending[addr]; !ok {
+ pool.reserve(addr, false)
+ }
}
}
return promoted
@@ -1523,7 +1601,7 @@ func (pool *LegacyPool) truncateQueue() {
// Drop all transactions if they are less than the overflow
if size := uint64(list.Len()); size <= drop {
for _, tx := range list.Flatten() {
- pool.removeTx(tx.Hash(), true)
+ pool.removeTx(tx.Hash(), true, true)
}
drop -= size
queuedRateLimitMeter.Mark(int64(size))
@@ -1532,7 +1610,7 @@ func (pool *LegacyPool) truncateQueue() {
// Otherwise drop only last few transactions
txs := list.Flatten()
for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
- pool.removeTx(txs[i].Hash(), true)
+ pool.removeTx(txs[i].Hash(), true, true)
drop--
queuedRateLimitMeter.Mark(1)
}
@@ -1594,6 +1672,9 @@ func (pool *LegacyPool) demoteUnexecutables() {
// Delete the entire pending entry if it became empty.
if list.Empty() {
delete(pool.pending, addr)
+ if _, ok := pool.queue[addr]; !ok {
+ pool.reserve(addr, false)
+ }
}
}
}
diff --git a/core/txpool/legacypool/legacypool2_test.go b/core/txpool/legacypool/legacypool2_test.go
index 5de34588afe2c..a73c1bb8a7724 100644
--- a/core/txpool/legacypool/legacypool2_test.go
+++ b/core/txpool/legacypool/legacypool2_test.go
@@ -84,7 +84,7 @@ func TestTransactionFutureAttack(t *testing.T) {
config.GlobalQueue = 100
config.GlobalSlots = 100
pool := New(config, blockchain)
- pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
fillPool(t, pool)
pending, _ := pool.Stats()
@@ -118,7 +118,7 @@ func TestTransactionFuture1559(t *testing.T) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
- pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Create a number of test accounts, fund them and make transactions
@@ -151,7 +151,7 @@ func TestTransactionZAttack(t *testing.T) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
- pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Create a number of test accounts, fund them and make transactions
fillPool(t, pool)
@@ -222,7 +222,7 @@ func BenchmarkFutureAttack(b *testing.B) {
config.GlobalQueue = 100
config.GlobalSlots = 100
pool := New(config, blockchain)
- pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
fillPool(b, pool)
diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go
index 14612a686cfd8..a8f3dd7d86247 100644
--- a/core/txpool/legacypool/legacypool_test.go
+++ b/core/txpool/legacypool/legacypool_test.go
@@ -24,6 +24,7 @@ import (
"math/big"
"math/rand"
"os"
+ "sync"
"sync/atomic"
"testing"
"time"
@@ -127,6 +128,31 @@ func dynamicFeeTx(nonce uint64, gaslimit uint64, gasFee *big.Int, tip *big.Int,
return tx
}
+func makeAddressReserver() txpool.AddressReserver {
+ var (
+ reserved = make(map[common.Address]struct{})
+ lock sync.Mutex
+ )
+ return func(addr common.Address, reserve bool) error {
+ lock.Lock()
+ defer lock.Unlock()
+
+ _, exists := reserved[addr]
+ if reserve {
+ if exists {
+ panic("already reserved")
+ }
+ reserved[addr] = struct{}{}
+ return nil
+ }
+ if !exists {
+ panic("not reserved")
+ }
+ delete(reserved, addr)
+ return nil
+ }
+}
+
func setupPool() (*LegacyPool, *ecdsa.PrivateKey) {
return setupPoolWithConfig(params.TestChainConfig)
}
@@ -137,7 +163,7 @@ func setupPoolWithConfig(config *params.ChainConfig) (*LegacyPool, *ecdsa.Privat
key, _ := crypto.GenerateKey()
pool := New(testTxPoolConfig, blockchain)
- if err := pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock()); err != nil {
+ if err := pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()); err != nil {
panic(err)
}
// wait for the pool to initialize
@@ -256,7 +282,7 @@ func TestStateChangeDuringReset(t *testing.T) {
tx1 := transaction(1, 100000, key)
pool := New(testTxPoolConfig, blockchain)
- pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
nonce := pool.Nonce(address)
@@ -455,7 +481,7 @@ func TestChainFork(t *testing.T) {
if _, err := pool.add(tx, false); err != nil {
t.Error("didn't expect error", err)
}
- pool.removeTx(tx.Hash(), true)
+ pool.removeTx(tx.Hash(), true, true)
// reset the pool's internal state
resetState()
@@ -676,7 +702,7 @@ func TestPostponing(t *testing.T) {
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
- pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Create two test accounts to produce different gap profiles with
@@ -893,7 +919,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible)
pool := New(config, blockchain)
- pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Create a number of test accounts and fund them (last one will be the local)
@@ -986,7 +1012,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
config.NoLocals = nolocals
pool := New(config, blockchain)
- pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Create two test accounts to ensure remotes expire but locals do not
@@ -1171,7 +1197,7 @@ func TestPendingGlobalLimiting(t *testing.T) {
config.GlobalSlots = config.AccountSlots * 10
pool := New(config, blockchain)
- pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Create a number of test accounts and fund them
@@ -1275,7 +1301,7 @@ func TestCapClearsFromAll(t *testing.T) {
config.GlobalSlots = 8
pool := New(config, blockchain)
- pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Create a number of test accounts and fund them
@@ -1308,7 +1334,7 @@ func TestPendingMinimumAllowance(t *testing.T) {
config.GlobalSlots = 1
pool := New(config, blockchain)
- pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Create a number of test accounts and fund them
@@ -1354,7 +1380,7 @@ func TestRepricing(t *testing.T) {
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
- pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
@@ -1603,7 +1629,7 @@ func TestRepricingKeepsLocals(t *testing.T) {
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
- pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Create a number of test accounts and fund them
@@ -1681,7 +1707,7 @@ func TestUnderpricing(t *testing.T) {
config.GlobalQueue = 2
pool := New(config, blockchain)
- pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
@@ -1796,7 +1822,7 @@ func TestStableUnderpricing(t *testing.T) {
config.GlobalQueue = 0
pool := New(config, blockchain)
- pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
@@ -2025,7 +2051,7 @@ func TestDeduplication(t *testing.T) {
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
- pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Create a test account to add transactions with
@@ -2092,7 +2118,7 @@ func TestReplacement(t *testing.T) {
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
- pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
@@ -2303,7 +2329,7 @@ func testJournaling(t *testing.T, nolocals bool) {
config.Rejournal = time.Second
pool := New(config, blockchain)
- pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
// Create two test accounts to ensure remotes expire but locals do not
local, _ := crypto.GenerateKey()
@@ -2341,7 +2367,7 @@ func testJournaling(t *testing.T, nolocals bool) {
blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool = New(config, blockchain)
- pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
pending, queued = pool.Stats()
if queued != 0 {
@@ -2368,7 +2394,7 @@ func testJournaling(t *testing.T, nolocals bool) {
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool = New(config, blockchain)
- pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
pending, queued = pool.Stats()
if pending != 0 {
@@ -2399,7 +2425,7 @@ func TestStatusCheck(t *testing.T) {
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
- pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock())
+ pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Create the test accounts to check various transaction statuses with
diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go
index 835f0de97a3bc..70c0918e140c2 100644
--- a/core/txpool/subpool.go
+++ b/core/txpool/subpool.go
@@ -18,6 +18,7 @@ package txpool
import (
"math/big"
+ "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
@@ -36,6 +37,32 @@ type Transaction struct {
BlobTxProofs []kzg4844.Proof // Proofs needed by the blob pool
}
+// LazyTransaction contains a small subset of the transaction properties that is
+// enough for the miner and other APIs to handle large batches of transactions;
+// and supports pulling up the entire transaction when really needed.
+type LazyTransaction struct {
+ Pool SubPool // Transaction subpool to pull the real transaction up
+ Hash common.Hash // Transaction hash to pull up if needed
+ Tx *Transaction // Transaction if already resolved
+
+ Time time.Time // Time when the transaction was first seen
+ GasFeeCap *big.Int // Maximum fee per gas the transaction may consume
+ GasTipCap *big.Int // Maximum miner tip per gas the transaction can pay
+}
+
+// Resolve retrieves the full transaction belonging to a lazy handle if it is still
+// maintained by the transaction pool.
+func (ltx *LazyTransaction) Resolve() *Transaction {
+ if ltx.Tx == nil {
+ ltx.Tx = ltx.Pool.Get(ltx.Hash)
+ }
+ return ltx.Tx
+}
+
+// AddressReserver is passed by the main transaction pool to subpools, so they
+// may request (and relinquish) exclusive access to certain addresses.
+type AddressReserver func(addr common.Address, reserve bool) error
+
// SubPool represents a specialized transaction pool that lives on its own (e.g.
// blob pool). Since independent of how many specialized pools we have, they do
// need to be updated in lockstep and assemble into one coherent view for block
@@ -53,7 +80,7 @@ type SubPool interface {
// These should not be passed as a constructor argument - nor should the pools
// start by themselves - in order to keep multiple subpools in lockstep with
// one another.
- Init(gasTip *big.Int, head *types.Header) error
+ Init(gasTip *big.Int, head *types.Header, reserve AddressReserver) error
// Close terminates any background processing threads and releases any held
// resources.
@@ -81,7 +108,7 @@ type SubPool interface {
// Pending retrieves all currently processable transactions, grouped by origin
// account and sorted by nonce.
- Pending(enforceTips bool) map[common.Address][]*types.Transaction
+ Pending(enforceTips bool) map[common.Address][]*LazyTransaction
// SubscribeTransactions subscribes to new transaction events.
SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription
diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go
index 24105babc94fe..b0e91fee6c446 100644
--- a/core/txpool/txpool.go
+++ b/core/txpool/txpool.go
@@ -17,13 +17,17 @@
package txpool
import (
+ "errors"
"fmt"
"math/big"
+ "sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
)
// TxStatus is the current status of a transaction as seen by the pool.
@@ -36,6 +40,15 @@ const (
TxStatusIncluded
)
+var (
+ // reservationsGaugeName is the prefix of a per-subpool address reservation
+ // metric.
+ //
+ // This is mostly a sanity metric to ensure there's no bug that would make
+ // some subpool hog all the reservations due to mis-accounting.
+ reservationsGaugeName = "txpool/reservations"
+)
+
// BlockChain defines the minimal set of methods needed to back a tx pool with
// a chain. Exists to allow mocking the live chain out of tests.
type BlockChain interface {
@@ -52,9 +65,13 @@ type BlockChain interface {
// They exit the pool when they are included in the blockchain or evicted due to
// resource constraints.
type TxPool struct {
- subpools []SubPool // List of subpools for specialized transaction handling
- subs event.SubscriptionScope // Subscription scope to unscubscribe all on shutdown
- quit chan chan error // Quit channel to tear down the head updater
+ subpools []SubPool // List of subpools for specialized transaction handling
+
+ reservations map[common.Address]SubPool // Map with the account to pool reservations
+ reserveLock sync.Mutex // Lock protecting the account reservations
+
+ subs event.SubscriptionScope // Subscription scope to unscubscribe all on shutdown
+ quit chan chan error // Quit channel to tear down the head updater
}
// New creates a new transaction pool to gather, sort and filter inbound
@@ -66,11 +83,12 @@ func New(gasTip *big.Int, chain BlockChain, subpools []SubPool) (*TxPool, error)
head := chain.CurrentBlock()
pool := &TxPool{
- subpools: subpools,
- quit: make(chan chan error),
+ subpools: subpools,
+ reservations: make(map[common.Address]SubPool),
+ quit: make(chan chan error),
}
for i, subpool := range subpools {
- if err := subpool.Init(gasTip, head); err != nil {
+ if err := subpool.Init(gasTip, head, pool.reserver(i, subpool)); err != nil {
for j := i - 1; j >= 0; j-- {
subpools[j].Close()
}
@@ -81,6 +99,52 @@ func New(gasTip *big.Int, chain BlockChain, subpools []SubPool) (*TxPool, error)
return pool, nil
}
+// reserver is a method to create an address reservation callback to exclusively
+// assign/deassign addresses to/from subpools. This can ensure that at any point
+// in time, only a single subpool is able to manage an account, avoiding cross
+// subpool eviction issues and nonce conflicts.
+func (p *TxPool) reserver(id int, subpool SubPool) AddressReserver {
+ return func(addr common.Address, reserve bool) error {
+ p.reserveLock.Lock()
+ defer p.reserveLock.Unlock()
+
+ owner, exists := p.reservations[addr]
+ if reserve {
+ // Double reservations are forbidden even from the same pool to
+ // avoid subtle bugs in the long term.
+ if exists {
+ if owner == subpool {
+ log.Error("pool attempted to reserve already-owned address", "address", addr)
+ return nil // Ignore fault to give the pool a chance to recover while the bug gets fixed
+ }
+ return errors.New("address already reserved")
+ }
+ p.reservations[addr] = subpool
+ if metrics.Enabled {
+ m := fmt.Sprintf("%s/%d", reservationsGaugeName, id)
+ metrics.GetOrRegisterGauge(m, nil).Inc(1)
+ }
+ return nil
+ }
+ // Ensure subpools only attempt to unreserve their own owned addresses,
+ // otherwise flag as a programming error.
+ if !exists {
+ log.Error("pool attempted to unreserve non-reserved address", "address", addr)
+ return errors.New("address not reserved")
+ }
+ if subpool != owner {
+ log.Error("pool attempted to unreserve non-owned address", "address", addr)
+ return errors.New("address not owned")
+ }
+ delete(p.reservations, addr)
+ if metrics.Enabled {
+ m := fmt.Sprintf("%s/%d", reservationsGaugeName, id)
+ metrics.GetOrRegisterGauge(m, nil).Dec(1)
+ }
+ return nil
+ }
+}
+
// Close terminates the transaction pool and all its subpools.
func (p *TxPool) Close() error {
var errs []error
@@ -242,8 +306,8 @@ func (p *TxPool) Add(txs []*Transaction, local bool, sync bool) []error {
// Pending retrieves all currently processable transactions, grouped by origin
// account and sorted by nonce.
-func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*types.Transaction {
- txs := make(map[common.Address][]*types.Transaction)
+func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*LazyTransaction {
+ txs := make(map[common.Address][]*LazyTransaction)
for _, subpool := range p.subpools {
for addr, set := range subpool.Pending(enforceTips) {
txs[addr] = set
diff --git a/core/txpool/validation.go b/core/txpool/validation.go
index af678277f868a..67c39a99396f9 100644
--- a/core/txpool/validation.go
+++ b/core/txpool/validation.go
@@ -166,6 +166,11 @@ type ValidationOptionsWithState struct {
// nonce gaps will be ignored and permitted.
FirstNonceGap func(addr common.Address) uint64
+ // UsedAndLeftSlots is a mandatory callback to retrieve the number of tx slots
+ // used and the number still permitted for an account. New transactions will
+ // be rejected once the number of remaining slots reaches zero.
+ UsedAndLeftSlots func(addr common.Address) (int, int)
+
// ExistingExpenditure is a mandatory callback to retrieve the cummulative
// cost of the already pooled transactions to check for overdrafts.
ExistingExpenditure func(addr common.Address) *big.Int
@@ -220,6 +225,12 @@ func ValidateTransactionWithState(tx *types.Transaction, signer types.Signer, op
if balance.Cmp(need) < 0 {
return fmt.Errorf("%w: balance %v, queued cost %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, spent, cost, new(big.Int).Sub(need, balance))
}
+ // Transaction takes a new nonce value out of the pool. Ensure it doesn't
+ // overflow the number of permitted transactions from a single accoun
+ // (i.e. max cancellable via out-of-bound transaction).
+ if used, left := opts.UsedAndLeftSlots(from); left <= 0 {
+ return fmt.Errorf("%w: pooled %d txs", ErrAccountLimitExceeded, used)
+ }
}
return nil
}
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 579fc00d10436..91a20d32a7930 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -18,7 +18,6 @@ package types
import (
"bytes"
- "container/heap"
"errors"
"io"
"math/big"
@@ -394,6 +393,19 @@ func (tx *Transaction) BlobGasFeeCapIntCmp(other *big.Int) int {
return tx.inner.blobGasFeeCap().Cmp(other)
}
+// SetTime sets the decoding time of a transaction. This is used by tests to set
+// arbitrary times and by persistent transaction pools when loading old txs from
+// disk.
+func (tx *Transaction) SetTime(t time.Time) {
+ tx.time = t
+}
+
+// Time returns the time when the transaction was first seen on the network. It
+// is a heuristic to prefer mining older txs vs new all other things equal.
+func (tx *Transaction) Time() time.Time {
+ return tx.time
+}
+
// Hash returns the transaction hash.
func (tx *Transaction) Hash() common.Hash {
if hash := tx.hash.Load(); hash != nil {
@@ -502,123 +514,6 @@ func (s TxByNonce) Len() int { return len(s) }
func (s TxByNonce) Less(i, j int) bool { return s[i].Nonce() < s[j].Nonce() }
func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-// TxWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap
-type TxWithMinerFee struct {
- tx *Transaction
- minerFee *big.Int
-}
-
-// NewTxWithMinerFee creates a wrapped transaction, calculating the effective
-// miner gasTipCap if a base fee is provided.
-// Returns error in case of a negative effective miner gasTipCap.
-func NewTxWithMinerFee(tx *Transaction, baseFee *big.Int) (*TxWithMinerFee, error) {
- minerFee, err := tx.EffectiveGasTip(baseFee)
- if err != nil {
- return nil, err
- }
- return &TxWithMinerFee{
- tx: tx,
- minerFee: minerFee,
- }, nil
-}
-
-// TxByPriceAndTime implements both the sort and the heap interface, making it useful
-// for all at once sorting as well as individually adding and removing elements.
-type TxByPriceAndTime []*TxWithMinerFee
-
-func (s TxByPriceAndTime) Len() int { return len(s) }
-func (s TxByPriceAndTime) Less(i, j int) bool {
- // If the prices are equal, use the time the transaction was first seen for
- // deterministic sorting
- cmp := s[i].minerFee.Cmp(s[j].minerFee)
- if cmp == 0 {
- return s[i].tx.time.Before(s[j].tx.time)
- }
- return cmp > 0
-}
-func (s TxByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-func (s *TxByPriceAndTime) Push(x interface{}) {
- *s = append(*s, x.(*TxWithMinerFee))
-}
-
-func (s *TxByPriceAndTime) Pop() interface{} {
- old := *s
- n := len(old)
- x := old[n-1]
- old[n-1] = nil
- *s = old[0 : n-1]
- return x
-}
-
-// TransactionsByPriceAndNonce represents a set of transactions that can return
-// transactions in a profit-maximizing sorted order, while supporting removing
-// entire batches of transactions for non-executable accounts.
-type TransactionsByPriceAndNonce struct {
- txs map[common.Address][]*Transaction // Per account nonce-sorted list of transactions
- heads TxByPriceAndTime // Next transaction for each unique account (price heap)
- signer Signer // Signer for the set of transactions
- baseFee *big.Int // Current base fee
-}
-
-// NewTransactionsByPriceAndNonce creates a transaction set that can retrieve
-// price sorted transactions in a nonce-honouring way.
-//
-// Note, the input map is reowned so the caller should not interact any more with
-// if after providing it to the constructor.
-func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address][]*Transaction, baseFee *big.Int) *TransactionsByPriceAndNonce {
- // Initialize a price and received time based heap with the head transactions
- heads := make(TxByPriceAndTime, 0, len(txs))
- for from, accTxs := range txs {
- acc, _ := Sender(signer, accTxs[0])
- wrapped, err := NewTxWithMinerFee(accTxs[0], baseFee)
- // Remove transaction if sender doesn't match from, or if wrapping fails.
- if acc != from || err != nil {
- delete(txs, from)
- continue
- }
- heads = append(heads, wrapped)
- txs[from] = accTxs[1:]
- }
- heap.Init(&heads)
-
- // Assemble and return the transaction set
- return &TransactionsByPriceAndNonce{
- txs: txs,
- heads: heads,
- signer: signer,
- baseFee: baseFee,
- }
-}
-
-// Peek returns the next transaction by price.
-func (t *TransactionsByPriceAndNonce) Peek() *Transaction {
- if len(t.heads) == 0 {
- return nil
- }
- return t.heads[0].tx
-}
-
-// Shift replaces the current best head with the next one from the same account.
-func (t *TransactionsByPriceAndNonce) Shift() {
- acc, _ := Sender(t.signer, t.heads[0].tx)
- if txs, ok := t.txs[acc]; ok && len(txs) > 0 {
- if wrapped, err := NewTxWithMinerFee(txs[0], t.baseFee); err == nil {
- t.heads[0], t.txs[acc] = wrapped, txs[1:]
- heap.Fix(&t.heads, 0)
- return
- }
- }
- heap.Pop(&t.heads)
-}
-
-// Pop removes the best transaction, *not* replacing it with the next one from
-// the same account. This should be used when a transaction cannot be executed
-// and hence all subsequent ones should be discarded from the same account.
-func (t *TransactionsByPriceAndNonce) Pop() {
- heap.Pop(&t.heads)
-}
-
// copyAddressPtr copies an address.
func copyAddressPtr(a *common.Address) *common.Address {
if a == nil {
diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go
index dbe77fa6036aa..a984a9c70952d 100644
--- a/core/types/transaction_test.go
+++ b/core/types/transaction_test.go
@@ -23,10 +23,8 @@ import (
"errors"
"fmt"
"math/big"
- "math/rand"
"reflect"
"testing"
- "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
@@ -259,152 +257,6 @@ func TestRecipientNormal(t *testing.T) {
}
}
-func TestTransactionPriceNonceSortLegacy(t *testing.T) {
- testTransactionPriceNonceSort(t, nil)
-}
-
-func TestTransactionPriceNonceSort1559(t *testing.T) {
- testTransactionPriceNonceSort(t, big.NewInt(0))
- testTransactionPriceNonceSort(t, big.NewInt(5))
- testTransactionPriceNonceSort(t, big.NewInt(50))
-}
-
-// Tests that transactions can be correctly sorted according to their price in
-// decreasing order, but at the same time with increasing nonces when issued by
-// the same account.
-func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) {
- // Generate a batch of accounts to start with
- keys := make([]*ecdsa.PrivateKey, 25)
- for i := 0; i < len(keys); i++ {
- keys[i], _ = crypto.GenerateKey()
- }
- signer := LatestSignerForChainID(common.Big1)
-
- // Generate a batch of transactions with overlapping values, but shifted nonces
- groups := map[common.Address][]*Transaction{}
- expectedCount := 0
- for start, key := range keys {
- addr := crypto.PubkeyToAddress(key.PublicKey)
- count := 25
- for i := 0; i < 25; i++ {
- var tx *Transaction
- gasFeeCap := rand.Intn(50)
- if baseFee == nil {
- tx = NewTx(&LegacyTx{
- Nonce: uint64(start + i),
- To: &common.Address{},
- Value: big.NewInt(100),
- Gas: 100,
- GasPrice: big.NewInt(int64(gasFeeCap)),
- Data: nil,
- })
- } else {
- tx = NewTx(&DynamicFeeTx{
- Nonce: uint64(start + i),
- To: &common.Address{},
- Value: big.NewInt(100),
- Gas: 100,
- GasFeeCap: big.NewInt(int64(gasFeeCap)),
- GasTipCap: big.NewInt(int64(rand.Intn(gasFeeCap + 1))),
- Data: nil,
- })
- if count == 25 && int64(gasFeeCap) < baseFee.Int64() {
- count = i
- }
- }
- tx, err := SignTx(tx, signer, key)
- if err != nil {
- t.Fatalf("failed to sign tx: %s", err)
- }
- groups[addr] = append(groups[addr], tx)
- }
- expectedCount += count
- }
- // Sort the transactions and cross check the nonce ordering
- txset := NewTransactionsByPriceAndNonce(signer, groups, baseFee)
-
- txs := Transactions{}
- for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
- txs = append(txs, tx)
- txset.Shift()
- }
- if len(txs) != expectedCount {
- t.Errorf("expected %d transactions, found %d", expectedCount, len(txs))
- }
- for i, txi := range txs {
- fromi, _ := Sender(signer, txi)
-
- // Make sure the nonce order is valid
- for j, txj := range txs[i+1:] {
- fromj, _ := Sender(signer, txj)
- if fromi == fromj && txi.Nonce() > txj.Nonce() {
- t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce())
- }
- }
- // If the next tx has different from account, the price must be lower than the current one
- if i+1 < len(txs) {
- next := txs[i+1]
- fromNext, _ := Sender(signer, next)
- tip, err := txi.EffectiveGasTip(baseFee)
- nextTip, nextErr := next.EffectiveGasTip(baseFee)
- if err != nil || nextErr != nil {
- t.Errorf("error calculating effective tip")
- }
- if fromi != fromNext && tip.Cmp(nextTip) < 0 {
- t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice())
- }
- }
- }
-}
-
-// Tests that if multiple transactions have the same price, the ones seen earlier
-// are prioritized to avoid network spam attacks aiming for a specific ordering.
-func TestTransactionTimeSort(t *testing.T) {
- // Generate a batch of accounts to start with
- keys := make([]*ecdsa.PrivateKey, 5)
- for i := 0; i < len(keys); i++ {
- keys[i], _ = crypto.GenerateKey()
- }
- signer := HomesteadSigner{}
-
- // Generate a batch of transactions with overlapping prices, but different creation times
- groups := map[common.Address][]*Transaction{}
- for start, key := range keys {
- addr := crypto.PubkeyToAddress(key.PublicKey)
-
- tx, _ := SignTx(NewTransaction(0, common.Address{}, big.NewInt(100), 100, big.NewInt(1), nil), signer, key)
- tx.time = time.Unix(0, int64(len(keys)-start))
-
- groups[addr] = append(groups[addr], tx)
- }
- // Sort the transactions and cross check the nonce ordering
- txset := NewTransactionsByPriceAndNonce(signer, groups, nil)
-
- txs := Transactions{}
- for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
- txs = append(txs, tx)
- txset.Shift()
- }
- if len(txs) != len(keys) {
- t.Errorf("expected %d transactions, found %d", len(keys), len(txs))
- }
- for i, txi := range txs {
- fromi, _ := Sender(signer, txi)
- if i+1 < len(txs) {
- next := txs[i+1]
- fromNext, _ := Sender(signer, next)
-
- if txi.GasPrice().Cmp(next.GasPrice()) < 0 {
- t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice())
- }
- // Make sure time order is ascending if the txs have the same gas price
- if txi.GasPrice().Cmp(next.GasPrice()) == 0 && txi.time.After(next.time) {
- t.Errorf("invalid received time ordering: tx #%d (A=%x T=%v) > tx #%d (A=%x T=%v)", i, fromi[:4], txi.time, i+1, fromNext[:4], next.time)
- }
- }
- }
-}
-
// TestTransactionCoding tests serializing/de-serializing to/from rlp and JSON.
func TestTransactionCoding(t *testing.T) {
key, err := crypto.GenerateKey()
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 80f5bcee61444..30e2493684109 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -301,7 +301,11 @@ func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {
pending := b.eth.txPool.Pending(false)
var txs types.Transactions
for _, batch := range pending {
- txs = append(txs, batch...)
+ for _, lazy := range batch {
+ if tx := lazy.Resolve(); tx != nil {
+ txs = append(txs, tx.Tx)
+ }
+ }
}
return txs, nil
}
diff --git a/eth/backend.go b/eth/backend.go
index 63bd864b21eb7..667200bceddaf 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -35,6 +35,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/pruner"
"github.com/ethereum/go-ethereum/core/txpool"
+ "github.com/ethereum/go-ethereum/core/txpool/blobpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -206,12 +207,17 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
}
eth.bloomIndexer.Start(eth.blockchain)
+ if config.BlobPool.Datadir != "" {
+ config.BlobPool.Datadir = stack.ResolvePath(config.BlobPool.Datadir)
+ }
+ blobPool := blobpool.New(config.BlobPool, eth.blockchain)
+
if config.TxPool.Journal != "" {
config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal)
}
legacyPool := legacypool.New(config.TxPool, eth.blockchain)
- eth.txPool, err = txpool.New(new(big.Int).SetUint64(config.TxPool.PriceLimit), eth.blockchain, []txpool.SubPool{legacyPool})
+ eth.txPool, err = txpool.New(new(big.Int).SetUint64(config.TxPool.PriceLimit), eth.blockchain, []txpool.SubPool{legacyPool, blobPool})
if err != nil {
return nil, err
}
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 5de0055b51933..4bc8b8dc6c6e5 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/txpool/blobpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice"
@@ -69,6 +70,7 @@ var Defaults = Config{
FilterLogCacheSize: 32,
Miner: miner.DefaultConfig,
TxPool: legacypool.DefaultConfig,
+ BlobPool: blobpool.DefaultConfig,
RPCGasCap: 50000000,
RPCEVMTimeout: 5 * time.Second,
GPO: FullNodeGPO,
@@ -129,7 +131,8 @@ type Config struct {
Miner miner.Config
// Transaction pool options
- TxPool legacypool.Config
+ TxPool legacypool.Config
+ BlobPool blobpool.Config
// Gas Price Oracle options
GPO gasprice.Config
diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go
index 82b2d5b6b0007..324fbe380ea38 100644
--- a/eth/ethconfig/gen_config.go
+++ b/eth/ethconfig/gen_config.go
@@ -7,6 +7,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/txpool/blobpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice"
@@ -43,6 +44,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
FilterLogCacheSize int
Miner miner.Config
TxPool legacypool.Config
+ BlobPool blobpool.Config
GPO gasprice.Config
EnablePreimageRecording bool
DocRoot string `toml:"-"`
@@ -80,6 +82,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.FilterLogCacheSize = c.FilterLogCacheSize
enc.Miner = c.Miner
enc.TxPool = c.TxPool
+ enc.BlobPool = c.BlobPool
enc.GPO = c.GPO
enc.EnablePreimageRecording = c.EnablePreimageRecording
enc.DocRoot = c.DocRoot
@@ -121,6 +124,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
FilterLogCacheSize *int
Miner *miner.Config
TxPool *legacypool.Config
+ BlobPool *blobpool.Config
GPO *gasprice.Config
EnablePreimageRecording *bool
DocRoot *string `toml:"-"`
@@ -215,6 +219,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.TxPool != nil {
c.TxPool = *dec.TxPool
}
+ if dec.BlobPool != nil {
+ c.BlobPool = *dec.BlobPool
+ }
if dec.GPO != nil {
c.GPO = *dec.GPO
}
diff --git a/eth/handler.go b/eth/handler.go
index f0b8d65a12925..2453c08afbc05 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -73,7 +73,7 @@ type txPool interface {
// Pending should return pending transactions.
// The slice should be modifiable by the caller.
- Pending(enforceTips bool) map[common.Address][]*types.Transaction
+ Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction
// SubscribeNewTxsEvent should return an event subscription of
// NewTxsEvent and send events to the given channel.
diff --git a/eth/handler_test.go b/eth/handler_test.go
index 2f776e874b3d8..7451e17012a00 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -101,7 +101,7 @@ func (p *testTxPool) Add(txs []*txpool.Transaction, local bool, sync bool) []err
}
// Pending returns all the transactions known to the pool
-func (p *testTxPool) Pending(enforceTips bool) map[common.Address][]*types.Transaction {
+func (p *testTxPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction {
p.lock.RLock()
defer p.lock.RUnlock()
@@ -113,7 +113,19 @@ func (p *testTxPool) Pending(enforceTips bool) map[common.Address][]*types.Trans
for _, batch := range batches {
sort.Sort(types.TxByNonce(batch))
}
- return batches
+ pending := make(map[common.Address][]*txpool.LazyTransaction)
+ for addr, batch := range batches {
+ for _, tx := range batch {
+ pending[addr] = append(pending[addr], &txpool.LazyTransaction{
+ Hash: tx.Hash(),
+ Tx: &txpool.Transaction{Tx: tx},
+ Time: tx.Time(),
+ GasFeeCap: tx.GasFeeCap(),
+ GasTipCap: tx.GasTipCap(),
+ })
+ }
+ }
+ return pending
}
// SubscribeNewTxsEvent should return an event subscription of NewTxsEvent and
diff --git a/eth/sync.go b/eth/sync.go
index 89bffbe653bd5..ba7a7427a51a9 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/log"
@@ -36,27 +35,15 @@ const (
// syncTransactions starts sending all currently pending transactions to the given peer.
func (h *handler) syncTransactions(p *eth.Peer) {
- // Assemble the set of transaction to broadcast or announce to the remote
- // peer. Fun fact, this is quite an expensive operation as it needs to sort
- // the transactions if the sorting is not cached yet. However, with a random
- // order, insertions could overflow the non-executable queues and get dropped.
- //
- // TODO(karalabe): Figure out if we could get away with random order somehow
- var txs types.Transactions
- pending := h.txpool.Pending(false)
- for _, batch := range pending {
- txs = append(txs, batch...)
+ var hashes []common.Hash
+ for _, batch := range h.txpool.Pending(false) {
+ for _, tx := range batch {
+ hashes = append(hashes, tx.Hash)
+ }
}
- if len(txs) == 0 {
+ if len(hashes) == 0 {
return
}
- // The eth/65 protocol introduces proper transaction announcements, so instead
- // of dripping transactions across multiple peers, just send the entire list as
- // an announcement and let the remote side decide what they need (likely nothing).
- hashes := make([]common.Hash, len(txs))
- for i, tx := range txs {
- hashes[i] = tx.Hash()
- }
p.AsyncSendPooledTransactionHashes(hashes)
}
diff --git a/go.mod b/go.mod
index 47535d7923868..9c7121c0355ce 100644
--- a/go.mod
+++ b/go.mod
@@ -36,8 +36,9 @@ require (
github.com/gorilla/websocket v1.4.2
github.com/graph-gophers/graphql-go v1.3.0
github.com/hashicorp/go-bexpr v0.1.10
+ github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7
github.com/holiman/bloomfilter/v2 v2.0.3
- github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c
+ github.com/holiman/uint256 v1.2.3
github.com/huin/goupnp v1.0.3
github.com/influxdata/influxdb-client-go/v2 v2.4.0
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c
@@ -59,7 +60,7 @@ require (
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/tyler-smith/go-bip39 v1.1.0
- github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
+ github.com/urfave/cli/v2 v2.24.1
go.uber.org/automaxprocs v1.5.2
golang.org/x/crypto v0.9.0
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc
diff --git a/go.sum b/go.sum
index 4fce0746cc00a..591764e65830b 100644
--- a/go.sum
+++ b/go.sum
@@ -7,7 +7,7 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSu
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
+github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
@@ -233,10 +233,12 @@ github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpx
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw=
+github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
-github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c h1:DZfsyhDK1hnSS5lH8l+JggqzEleHteTYfutAiVlSUM8=
-github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
+github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o=
+github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
@@ -439,8 +441,8 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
-github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
-github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
+github.com/urfave/cli/v2 v2.24.1 h1:/QYYr7g0EhwXEML8jO+8OYt5trPnLHS0p3mrgExJ5NU=
+github.com/urfave/cli/v2 v2.24.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
diff --git a/miner/ordering.go b/miner/ordering.go
new file mode 100644
index 0000000000000..4c3055f0d3178
--- /dev/null
+++ b/miner/ordering.go
@@ -0,0 +1,147 @@
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package miner
+
+import (
+ "container/heap"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/core/txpool"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+// txWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap
+type txWithMinerFee struct {
+ tx *txpool.LazyTransaction
+ from common.Address
+ fees *big.Int
+}
+
+// newTxWithMinerFee creates a wrapped transaction, calculating the effective
+// miner gasTipCap if a base fee is provided.
+// Returns error in case of a negative effective miner gasTipCap.
+func newTxWithMinerFee(tx *txpool.LazyTransaction, from common.Address, baseFee *big.Int) (*txWithMinerFee, error) {
+ tip := new(big.Int).Set(tx.GasTipCap)
+ if baseFee != nil {
+ if tx.GasFeeCap.Cmp(baseFee) < 0 {
+ return nil, types.ErrGasFeeCapTooLow
+ }
+ tip = math.BigMin(tx.GasTipCap, new(big.Int).Sub(tx.GasFeeCap, baseFee))
+ }
+ return &txWithMinerFee{
+ tx: tx,
+ from: from,
+ fees: tip,
+ }, nil
+}
+
+// txByPriceAndTime implements both the sort and the heap interface, making it useful
+// for all at once sorting as well as individually adding and removing elements.
+type txByPriceAndTime []*txWithMinerFee
+
+func (s txByPriceAndTime) Len() int { return len(s) }
+func (s txByPriceAndTime) Less(i, j int) bool {
+ // If the prices are equal, use the time the transaction was first seen for
+ // deterministic sorting
+ cmp := s[i].fees.Cmp(s[j].fees)
+ if cmp == 0 {
+ return s[i].tx.Time.Before(s[j].tx.Time)
+ }
+ return cmp > 0
+}
+func (s txByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func (s *txByPriceAndTime) Push(x interface{}) {
+ *s = append(*s, x.(*txWithMinerFee))
+}
+
+func (s *txByPriceAndTime) Pop() interface{} {
+ old := *s
+ n := len(old)
+ x := old[n-1]
+ old[n-1] = nil
+ *s = old[0 : n-1]
+ return x
+}
+
+// transactionsByPriceAndNonce represents a set of transactions that can return
+// transactions in a profit-maximizing sorted order, while supporting removing
+// entire batches of transactions for non-executable accounts.
+type transactionsByPriceAndNonce struct {
+ txs map[common.Address][]*txpool.LazyTransaction // Per account nonce-sorted list of transactions
+ heads txByPriceAndTime // Next transaction for each unique account (price heap)
+ signer types.Signer // Signer for the set of transactions
+ baseFee *big.Int // Current base fee
+}
+
+// newTransactionsByPriceAndNonce creates a transaction set that can retrieve
+// price sorted transactions in a nonce-honouring way.
+//
+// Note, the input map is reowned so the caller should not interact any more with
+// if after providing it to the constructor.
+func newTransactionsByPriceAndNonce(signer types.Signer, txs map[common.Address][]*txpool.LazyTransaction, baseFee *big.Int) *transactionsByPriceAndNonce {
+ // Initialize a price and received time based heap with the head transactions
+ heads := make(txByPriceAndTime, 0, len(txs))
+ for from, accTxs := range txs {
+ wrapped, err := newTxWithMinerFee(accTxs[0], from, baseFee)
+ if err != nil {
+ delete(txs, from)
+ continue
+ }
+ heads = append(heads, wrapped)
+ txs[from] = accTxs[1:]
+ }
+ heap.Init(&heads)
+
+ // Assemble and return the transaction set
+ return &transactionsByPriceAndNonce{
+ txs: txs,
+ heads: heads,
+ signer: signer,
+ baseFee: baseFee,
+ }
+}
+
+// Peek returns the next transaction by price.
+func (t *transactionsByPriceAndNonce) Peek() *txpool.LazyTransaction {
+ if len(t.heads) == 0 {
+ return nil
+ }
+ return t.heads[0].tx
+}
+
+// Shift replaces the current best head with the next one from the same account.
+func (t *transactionsByPriceAndNonce) Shift() {
+ acc := t.heads[0].from
+ if txs, ok := t.txs[acc]; ok && len(txs) > 0 {
+ if wrapped, err := newTxWithMinerFee(txs[0], acc, t.baseFee); err == nil {
+ t.heads[0], t.txs[acc] = wrapped, txs[1:]
+ heap.Fix(&t.heads, 0)
+ return
+ }
+ }
+ heap.Pop(&t.heads)
+}
+
+// Pop removes the best transaction, *not* replacing it with the next one from
+// the same account. This should be used when a transaction cannot be executed
+// and hence all subsequent ones should be discarded from the same account.
+func (t *transactionsByPriceAndNonce) Pop() {
+ heap.Pop(&t.heads)
+}
diff --git a/miner/ordering_test.go b/miner/ordering_test.go
new file mode 100644
index 0000000000000..589633e0b8dd2
--- /dev/null
+++ b/miner/ordering_test.go
@@ -0,0 +1,188 @@
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package miner
+
+import (
+ "crypto/ecdsa"
+ "math/big"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/txpool"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+)
+
+func TestTransactionPriceNonceSortLegacy(t *testing.T) {
+ testTransactionPriceNonceSort(t, nil)
+}
+
+func TestTransactionPriceNonceSort1559(t *testing.T) {
+ testTransactionPriceNonceSort(t, big.NewInt(0))
+ testTransactionPriceNonceSort(t, big.NewInt(5))
+ testTransactionPriceNonceSort(t, big.NewInt(50))
+}
+
+// Tests that transactions can be correctly sorted according to their price in
+// decreasing order, but at the same time with increasing nonces when issued by
+// the same account.
+func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) {
+ // Generate a batch of accounts to start with
+ keys := make([]*ecdsa.PrivateKey, 25)
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ }
+ signer := types.LatestSignerForChainID(common.Big1)
+
+ // Generate a batch of transactions with overlapping values, but shifted nonces
+ groups := map[common.Address][]*txpool.LazyTransaction{}
+ expectedCount := 0
+ for start, key := range keys {
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ count := 25
+ for i := 0; i < 25; i++ {
+ var tx *types.Transaction
+ gasFeeCap := rand.Intn(50)
+ if baseFee == nil {
+ tx = types.NewTx(&types.LegacyTx{
+ Nonce: uint64(start + i),
+ To: &common.Address{},
+ Value: big.NewInt(100),
+ Gas: 100,
+ GasPrice: big.NewInt(int64(gasFeeCap)),
+ Data: nil,
+ })
+ } else {
+ tx = types.NewTx(&types.DynamicFeeTx{
+ Nonce: uint64(start + i),
+ To: &common.Address{},
+ Value: big.NewInt(100),
+ Gas: 100,
+ GasFeeCap: big.NewInt(int64(gasFeeCap)),
+ GasTipCap: big.NewInt(int64(rand.Intn(gasFeeCap + 1))),
+ Data: nil,
+ })
+ if count == 25 && int64(gasFeeCap) < baseFee.Int64() {
+ count = i
+ }
+ }
+ tx, err := types.SignTx(tx, signer, key)
+ if err != nil {
+ t.Fatalf("failed to sign tx: %s", err)
+ }
+ groups[addr] = append(groups[addr], &txpool.LazyTransaction{
+ Hash: tx.Hash(),
+ Tx: &txpool.Transaction{Tx: tx},
+ Time: tx.Time(),
+ GasFeeCap: tx.GasFeeCap(),
+ GasTipCap: tx.GasTipCap(),
+ })
+ }
+ expectedCount += count
+ }
+ // Sort the transactions and cross check the nonce ordering
+ txset := newTransactionsByPriceAndNonce(signer, groups, baseFee)
+
+ txs := types.Transactions{}
+ for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
+ txs = append(txs, tx.Tx.Tx)
+ txset.Shift()
+ }
+ if len(txs) != expectedCount {
+ t.Errorf("expected %d transactions, found %d", expectedCount, len(txs))
+ }
+ for i, txi := range txs {
+ fromi, _ := types.Sender(signer, txi)
+
+ // Make sure the nonce order is valid
+ for j, txj := range txs[i+1:] {
+ fromj, _ := types.Sender(signer, txj)
+ if fromi == fromj && txi.Nonce() > txj.Nonce() {
+ t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce())
+ }
+ }
+ // If the next tx has different from account, the price must be lower than the current one
+ if i+1 < len(txs) {
+ next := txs[i+1]
+ fromNext, _ := types.Sender(signer, next)
+ tip, err := txi.EffectiveGasTip(baseFee)
+ nextTip, nextErr := next.EffectiveGasTip(baseFee)
+ if err != nil || nextErr != nil {
+ t.Errorf("error calculating effective tip: %v, %v", err, nextErr)
+ }
+ if fromi != fromNext && tip.Cmp(nextTip) < 0 {
+ t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice())
+ }
+ }
+ }
+}
+
+// Tests that if multiple transactions have the same price, the ones seen earlier
+// are prioritized to avoid network spam attacks aiming for a specific ordering.
+func TestTransactionTimeSort(t *testing.T) {
+ // Generate a batch of accounts to start with
+ keys := make([]*ecdsa.PrivateKey, 5)
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ }
+ signer := types.HomesteadSigner{}
+
+ // Generate a batch of transactions with overlapping prices, but different creation times
+ groups := map[common.Address][]*txpool.LazyTransaction{}
+ for start, key := range keys {
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+
+ tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 100, big.NewInt(1), nil), signer, key)
+ tx.SetTime(time.Unix(0, int64(len(keys)-start)))
+
+ groups[addr] = append(groups[addr], &txpool.LazyTransaction{
+ Hash: tx.Hash(),
+ Tx: &txpool.Transaction{Tx: tx},
+ Time: tx.Time(),
+ GasFeeCap: tx.GasFeeCap(),
+ GasTipCap: tx.GasTipCap(),
+ })
+ }
+ // Sort the transactions and cross check the nonce ordering
+ txset := newTransactionsByPriceAndNonce(signer, groups, nil)
+
+ txs := types.Transactions{}
+ for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
+ txs = append(txs, tx.Tx.Tx)
+ txset.Shift()
+ }
+ if len(txs) != len(keys) {
+ t.Errorf("expected %d transactions, found %d", len(keys), len(txs))
+ }
+ for i, txi := range txs {
+ fromi, _ := types.Sender(signer, txi)
+ if i+1 < len(txs) {
+ next := txs[i+1]
+ fromNext, _ := types.Sender(signer, next)
+
+ if txi.GasPrice().Cmp(next.GasPrice()) < 0 {
+ t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice())
+ }
+ // Make sure time order is ascending if the txs have the same gas price
+ if txi.GasPrice().Cmp(next.GasPrice()) == 0 && txi.Time().After(next.Time()) {
+ t.Errorf("invalid received time ordering: tx #%d (A=%x T=%v) > tx #%d (A=%x T=%v)", i, fromi[:4], txi.Time(), i+1, fromNext[:4], next.Time())
+ }
+ }
+ }
+}
diff --git a/miner/worker.go b/miner/worker.go
index d524bb133dfc6..f05d56703037a 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
@@ -533,12 +534,18 @@ func (w *worker) mainLoop() {
if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas {
continue
}
- txs := make(map[common.Address][]*types.Transaction, len(ev.Txs))
+ txs := make(map[common.Address][]*txpool.LazyTransaction, len(ev.Txs))
for _, tx := range ev.Txs {
acc, _ := types.Sender(w.current.signer, tx)
- txs[acc] = append(txs[acc], tx)
+ txs[acc] = append(txs[acc], &txpool.LazyTransaction{
+ Hash: tx.Hash(),
+ Tx: &txpool.Transaction{Tx: tx},
+ Time: tx.Time(),
+ GasFeeCap: tx.GasFeeCap(),
+ GasTipCap: tx.GasTipCap(),
+ })
}
- txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee)
+ txset := newTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee)
tcount := w.current.tcount
w.commitTransactions(w.current, txset, nil)
@@ -727,24 +734,24 @@ func (w *worker) updateSnapshot(env *environment) {
w.snapshotState = env.state.Copy()
}
-func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]*types.Log, error) {
+func (w *worker) commitTransaction(env *environment, tx *txpool.Transaction) ([]*types.Log, error) {
var (
snap = env.state.Snapshot()
gp = env.gasPool.Gas()
)
- receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig())
+ receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx.Tx, &env.header.GasUsed, *w.chain.GetVMConfig())
if err != nil {
env.state.RevertToSnapshot(snap)
env.gasPool.SetGas(gp)
return nil, err
}
- env.txs = append(env.txs, tx)
+ env.txs = append(env.txs, tx.Tx)
env.receipts = append(env.receipts, receipt)
return receipt.Logs, nil
}
-func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *atomic.Int32) error {
+func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAndNonce, interrupt *atomic.Int32) error {
gasLimit := env.header.GasLimit
if env.gasPool == nil {
env.gasPool = new(core.GasPool).AddGas(gasLimit)
@@ -764,30 +771,37 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP
break
}
// Retrieve the next transaction and abort if all done.
- tx := txs.Peek()
- if tx == nil {
+ ltx := txs.Peek()
+ if ltx == nil {
break
}
+ tx := ltx.Resolve()
+ if tx == nil {
+ log.Warn("Ignoring evicted transaction")
+
+ txs.Pop()
+ continue
+ }
// Error may be ignored here. The error has already been checked
// during transaction acceptance is the transaction pool.
- from, _ := types.Sender(env.signer, tx)
+ from, _ := types.Sender(env.signer, tx.Tx)
// Check whether the tx is replay protected. If we're not in the EIP155 hf
// phase, start ignoring the sender until we do.
- if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) {
- log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block)
+ if tx.Tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) {
+ log.Trace("Ignoring reply protected transaction", "hash", tx.Tx.Hash(), "eip155", w.chainConfig.EIP155Block)
txs.Pop()
continue
}
// Start executing the transaction
- env.state.SetTxContext(tx.Hash(), env.tcount)
+ env.state.SetTxContext(tx.Tx.Hash(), env.tcount)
logs, err := w.commitTransaction(env, tx)
switch {
case errors.Is(err, core.ErrNonceTooLow):
// New head notification data race between the transaction pool and miner, shift
- log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
+ log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Tx.Nonce())
txs.Shift()
case errors.Is(err, nil):
@@ -799,7 +813,7 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP
default:
// Transaction is regarded as invalid, drop all consecutive transactions from
// the same sender because of `nonce-too-high` clause.
- log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
+ log.Debug("Transaction failed, account skipped", "hash", tx.Tx.Hash(), "err", err)
txs.Pop()
}
}
@@ -905,7 +919,7 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err
// Fill the block with all available pending transactions.
pending := w.eth.TxPool().Pending(true)
- localTxs, remoteTxs := make(map[common.Address][]*types.Transaction), pending
+ localTxs, remoteTxs := make(map[common.Address][]*txpool.LazyTransaction), pending
for _, account := range w.eth.TxPool().Locals() {
if txs := remoteTxs[account]; len(txs) > 0 {
delete(remoteTxs, account)
@@ -913,13 +927,13 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err
}
}
if len(localTxs) > 0 {
- txs := types.NewTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee)
+ txs := newTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee)
if err := w.commitTransactions(env, txs, interrupt); err != nil {
return err
}
}
if len(remoteTxs) > 0 {
- txs := types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee)
+ txs := newTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee)
if err := w.commitTransactions(env, txs, interrupt); err != nil {
return err
}