diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index b4a7b5fa95c26..2f41f4508ce5d 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -77,6 +77,10 @@ var (
utils.TxPoolAccountQueueFlag,
utils.TxPoolGlobalQueueFlag,
utils.TxPoolLifetimeFlag,
+ utils.BlobPoolDataDirFlag,
+ utils.BlobPoolDataCapFlag,
+ utils.BlobPoolPriceLimitFlag,
+ utils.BlobPoolPriceBumpFlag,
utils.SyncModeFlag,
utils.SyncTargetFlag,
utils.ExitWhenSyncedFlag,
@@ -423,7 +427,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon
}
// Set the gas price to the limits from the CLI and start mining
gasprice := flags.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
- ethBackend.TxPool().SetGasPrice(gasprice)
+ ethBackend.TxPool().SetGasTip(gasprice)
if err := ethBackend.StartMining(); err != nil {
utils.Fatalf("Failed to start mining: %v", err)
}
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index b67671e03b986..c65b35234ab62 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -339,18 +339,18 @@ var (
TxPoolJournalFlag = &cli.StringFlag{
Name: "txpool.journal",
Usage: "Disk journal for local transaction to survive node restarts",
- Value: txpool.DefaultConfig.Journal,
+ Value: ethconfig.Defaults.TxPool.Journal,
Category: flags.TxPoolCategory,
}
TxPoolRejournalFlag = &cli.DurationFlag{
Name: "txpool.rejournal",
Usage: "Time interval to regenerate the local transaction journal",
- Value: txpool.DefaultConfig.Rejournal,
+ Value: ethconfig.Defaults.TxPool.Rejournal,
Category: flags.TxPoolCategory,
}
TxPoolPriceLimitFlag = &cli.Uint64Flag{
Name: "txpool.pricelimit",
- Usage: "Minimum gas price limit to enforce for acceptance into the pool",
+ Usage: "Minimum gas price tip to enforce for acceptance into the pool",
Value: ethconfig.Defaults.TxPool.PriceLimit,
Category: flags.TxPoolCategory,
}
@@ -390,7 +390,31 @@ var (
Value: ethconfig.Defaults.TxPool.Lifetime,
Category: flags.TxPoolCategory,
}
-
+ // Blob transaction pool settings
+ BlobPoolDataDirFlag = &cli.StringFlag{
+ Name: "blobpool.datadir",
+ Usage: "Data directory to store blob transactions in",
+ Value: ethconfig.Defaults.BlobPool.Datadir,
+ Category: flags.BlobPoolCategory,
+ }
+ BlobPoolDataCapFlag = &cli.Uint64Flag{
+ Name: "blobpool.datacap",
+ Usage: "Disk space to allocate for pending blob transactions (soft limit)",
+ Value: ethconfig.Defaults.BlobPool.Datacap,
+ Category: flags.BlobPoolCategory,
+ }
+ BlobPoolPriceLimitFlag = &cli.Uint64Flag{
+ Name: "blobpool.pricelimit",
+ Usage: "Minimum gas price tip to enforce for acceptance into the blob pool",
+ Value: ethconfig.Defaults.BlobPool.PriceLimit,
+ Category: flags.BlobPoolCategory,
+ }
+ BlobPoolPriceBumpFlag = &cli.Uint64Flag{
+ Name: "blobpool.pricebump",
+ Usage: "Price bump percentage to replace an already existing blob transaction",
+ Value: ethconfig.Defaults.BlobPool.PriceBump,
+ Category: flags.BlobPoolCategory,
+ }
// Performance tuning settings
CacheFlag = &cli.IntFlag{
Name: "cache",
diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go
new file mode 100644
index 0000000000000..27cbea05529bb
--- /dev/null
+++ b/core/txpool/blobpool/blobpool.go
@@ -0,0 +1,1282 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package blobpool implements the EIP-4844 blob transaction pool.
+package blobpool
+
+import (
+ "container/heap"
+ "fmt"
+ "math"
+ "math/big"
+ "os"
+ "path/filepath"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/misc"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/txpool"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/holiman/billy"
+ "github.com/holiman/uint256"
+)
+
+const (
+ // blobSize is the protocol constrained byte size of a single blob in a
+ // transaction. There can be multiple of these embedded into a single tx.
+ blobSize = params.BlobTxFieldElementsPerBlob * params.BlobTxBytesPerFieldElement
+
+ // maxBlobsPerTransaction is the maximum number of blobs a single transaction
+ // is allowed to contain. Whilst the spec states it's unlimited, the block
+ // data slots are protocol bound, which implicitly also limit this.
+ maxBlobsPerTransaction = params.BlobTxMaxDataGasPerBlock / params.BlobTxDataGasPerBlob
+
+ // txAvgSize is an approximate byte size of a transaction metadata to avoid
+ // tiny overflows causing all txs to move a shelf higher, wasting disk space.
+ txAvgSize = 4 * 1024
+
+ // txMaxSize is the maximum size a single transaction can have, outside
+ // the included blobs. Since blob transactions are pulled instead of pushed,
+ // and only a small metadata is kept in ram, the rest is on disk, there is
+ // no critical limit that should be enforced. Still, capping it to some sane
+ // limit can never hurt.
+ txMaxSize = 1024 * 1024
+
+ // pendingTransactionStore is the subfolder containing the currently queued
+ // blob transactions.
+ pendingTransactionStore = "queue"
+
+ // limboedTransactionStore is the subfolder containing the currently included
+ // but not yet finalized transaction blobs.
+ limboedTransactionStore = "limbo"
+)
+
+// blobTx is a wrapper around types.BlobTx which also contains the literal blob
+// data along with all the transaction metadata.
+type blobTx struct {
+ Tx *types.Transaction
+ Blobs [][]byte
+}
+
+// blobTxMeta is the minimal subset of types.BlobTx necessary to validate and
+// schedule the blob transactions into the following blocks. Only ever add the
+// bare minimum needed fields to keep the size down (and thus number of entries
+// larger with the same memory consumption).
+type blobTxMeta struct {
+ id uint64 // Storage ID in the pool's persistent store
+ size uint32 // Byte size in the pool's persistent store
+
+ nonce uint64 // Needed to prioritize inclusion order within an account
+ costCap *uint256.Int // Needed to validate cummulative balance sufficiency
+ execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump
+ execFeeCap *uint256.Int // Needed to validate replacement price bump
+ blobFeeCap *uint256.Int // Needed to validate replacement price bump
+
+ basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap
+ blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap
+
+ evictionExecTip *uint256.Int // Worst gas tip across all previous nonces
+ evictionExecFeeJumps float64 // Worst base fee (converted to fee jumps) across all previous nonces
+ evictionBlobFeeJumps float64 // Worse blob fee (converted to fee jumps) across all previous nonces
+}
+
+// newBlobTxMeta retrieves the indexed metadata fields from a blob transaction
+// and assembles a helper struct to track in memory.
+func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta {
+ // Temporarilly permit non-blob txs in with a fake blob fee cap while battle
+ // testing the pool with normal transactions. TODO(karalabe): remove this
+ blobFeeCap := new(uint256.Int)
+ if fee := tx.BlobGasFeeCap(); fee != nil {
+ blobFeeCap = uint256.MustFromBig(fee)
+ }
+ meta := &blobTxMeta{
+ id: id,
+ size: size,
+ nonce: tx.Nonce(),
+ costCap: uint256.MustFromBig(tx.Cost()),
+ execTipCap: uint256.MustFromBig(tx.GasTipCap()),
+ execFeeCap: uint256.MustFromBig(tx.GasFeeCap()),
+ blobFeeCap: blobFeeCap, // TODO(karalabe): uint256.MustFromBig(tx.BlobGasFeeCap()),
+ }
+ meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap)
+ meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap)
+
+ return meta
+}
+
+// BlobTxShim is an extremely tiny subset of a blob transaction that is used by
+// the miner to sort and select transactions to include, requesting the needed
+// data only for those transactions that will really get added to the next block.
+type BlobTxShim struct {
+}
+
+// BlobPool is the transaction pool dedicated to EIP-4844 blob transactions.
+//
+// Blob transactions are special snowflakes that are designed for a very specific
+// purpose (rollups) and are expected to adhere to that specific use case. These
+// behavioral expectations allow us to design a transaction pool that is more robust
+// (i.e. resending issues) and more resilient to DoS attacks (e.g. replace-flush
+// attacks) than the generic tx pool. These improvemenets will also mean, however,
+// that we enforce a significantly more agressive strategy on entering and exiting
+// the pool:
+//
+// - Blob transactions are large. With the initial design aiming for 128KB blobs,
+// we must ensure that these only traverse the network the absolute minimum
+// number of times. Broadcasting to sqrt(peers) is out of the question, rather
+// these should only ever be announced and the remote side should request it if
+// it wants to.
+//
+// - Block blob-space is limited. With blocks being capped to a few blob txs, we
+// can make use of the very low expected churn rate within the pool. Notably,
+// we should be able to use a persistent disk backend for the pool, solving
+// the tx resend issue that plagues the generic tx pool, as long as there's no
+// artificial churn (i.e. pool wars).
+//
+// - Purpose of blobs are layer-2s. Layer-2s are meant to use blob transactions to
+// commit to their own current state, which is independent of Ethereum mainnet
+// (state, txs). This means that there's no reason for blob tx cancellation or
+// replacement, apart from a potential basefee / miner tip adjustment.
+//
+// - Replacements are expensive. Given their size, propagating a replacement
+// blob transaction to an existing one should be agressively discouraged.
+// Whilst generic transactions can start at 1 Wei gas cost and require a 10%
+// fee bump to replace, we suggest requiring a higher min cost (e.g. 1 gwei)
+// and a more agressive bump (100%).
+//
+// - Cancellation is prohibitive. Evicting an already propagated blob tx is a huge
+// DoS vector. As such, a) replacement (higher-fee) blob txs mustn't invalidate
+// already propagated (future) blob txs (cummulative fee); b) nonce-gapped blob
+// txs are disallowed; c) the presence of blob transactions exclude non-blob
+// transactions.
+//
+// - Local txs are meaningless. Mining pools historically used local transactions
+// for payouts or for backdoor deals. With 1559 in place, the basefee usually
+// dominates the final price, so 0 or non-0 tip doesn't change much. Blob txs
+// retain the 1559 2D gas pricing (and introduce on top a dynamic data gas fee),
+// so locality is moot. With a disk backed blob pool avoiding the resend issue,
+// there's also no need to save own transactions for later.
+//
+// - No-blob blob-txs are bad. Theoretically there's no strong reason to disallow
+// blob txs containing 0 blobs. In practice, admitting such txs into the pool
+// breaks the low-churn invariant as blob constaints don't apply anymore. Even
+// though we could accept blocks contaning such txs, a reorg would require moving
+// them back into the blob pool, which can break invariants.
+//
+// - Dropping blobs needs delay. When normal transactions are included, they
+// are immediately evicted from the pool since they are contained in the
+// including block. Blobs however are not included in the execution chain,
+// so a mini reorg cannot re-pool "lost" blob transactions. To support reorgs,
+// blobs are retained on disk until they are finalized.
+//
+// - Blobs can arrive via flashbots. Blocks might contain blob transactions we
+// have never seen on the network. Since we cannot recover them from blocks
+// either, the engine_newPayload needs to give them to us, and we cache them
+// until finality to support reorgs without tx losses.
+//
+// Whilst some constraints above might sound overly agressive, the general idea is
+// that the blob pool should work robustly for its intended use case and whilst
+// anyone is free to use blob transactions for arbitrary non-rollup use cases,
+// they should not be allowed to run amok the network.
+//
+// Implementation wise there are a few interesting design choices:
+//
+// - Adding a transaction to the pool blocks until persisted to disk. This is
+// viable because TPS is low (2-4 blobs per block initially, maybe 8-16 at
+// peak), so natural churn is a couple MB per block. Replacements doing O(n)
+// updates are forbidden and transaction propagation is pull based (i.e. no
+// pileup of pending data).
+//
+// - When transactions are chosen for inclusion, the primary criteria is the
+// signer tip (and having a basefee/data fee high enough of course). However,
+// same-tip tranactions will be split by their basefee/datafee, prefering
+// those that are closer to the current network limits. The idea being that
+// very relaxed ones can be included even if the fees go up, when the closer
+// ones could already be invalid.
+//
+// When the pool eventually reaches saturation, some old transactions - that may
+// never execute - will need to be evicted in favor of newer ones. The eviction
+// strategy is quite complex:
+//
+// - Exceeding capacity evicts the highest-nonce of the account with the lowest
+// paying blob transaction anywhere in the pooled nonce-sequence, as that tx
+// would be executed the furthest in the future and is thus blocking anything
+// after it. The smallest is deliberately not evicted to avoid a nonce-gap.
+//
+// - Analogously, if the pool is full, the consideration price of a new tx for
+// evicting an old one is the smallest price in the entire nonce-sequence of
+// the account. This avoids malicious users DoSing the pool with seemingly
+// high paying transactions hidden behind a low-paying blocked one.
+//
+// - Since blob transactions have 3 price parameters: execution tip, execution
+// fee cap and data fee cap, there's no singular parameter to create a total
+// price ordering on. What's more, since the base fee and blob fee can move
+// independently of one another, there's no pre-defined way to combine them
+// into a stable order either. This leads to a multi-dimentional problem to
+// solve after every block.
+//
+// - The first observation is that comparing 1559 base fees or 4844 blob fees
+// needs to happen in the context of their dynamism. Since these fees jump
+// up or down in ~1.125 multipliers (at max) across blocks, comparing fees
+// in two transactions should be based on log1.125(fee) to eliminate noise.
+//
+// - The second observation is that the basefee and blobfee move independently,
+// so there's no way to split mixed txs on their own (A has higher base fee,
+// B has higher blob fee). Rather than look at the absolute fees, the useful
+// metric is the max time it can take to exceed the transaction's fee caps.
+// Specifically, we're interested in the number of jumps needed to go from
+// the current fee to the transaction's cap:
+//
+// jumps = log1.125(txfee) - log1.125(basefee)
+//
+// - The third observation is that the base fee tends to hover around rather
+// than swing wildly. The number of jumps needed from the current fee starts
+// to get less relevant the higher it is. To remove the noise here too, the
+// pool will use log(jumps) as the delta for comparing transactions.
+//
+// delta = sign(jumps) * log(abs(jumps))
+//
+// - To establish a total order, we need to reduce the dimensionality of the
+// two base fees (log jumps) to a single value. The interesting aspect from
+// the pool's perspective is how fast will a tx get executable (fees going
+// down, crossing the smaller negative jump counter) or non-executable (fees
+// going up, crossing the smaller positive jump counter). As such, the pool
+// cares only about the min of the two delta values for eviction priority.
+//
+// priority = min(delta-basefee, delta-blobfee)
+//
+// - The above very agressive dimensionality and noise reduction should result
+// in transaction being groupped into a small number of buckets, the further
+// the fees the larger the buckets. This is good because it allows us to use
+// the miner tip meaningfully as a splitter.
+//
+// - For the scenario where the pool does not contain non-executable blob txs
+// anymore, it does not make sense to grant a later eviction priority to txs
+// with high fee caps since it could enable pool wars. As such, any positive
+// priority will be groupped together.
+//
+// priority = min(delta-basefee, delta-blobfee, 0)
+//
+// Optimization tradeoffs:
+//
+// - Eviction relies on 3 fee minimums per account (exec tip, exec cap and blob
+// cap). Maintaining these values across all transactions from the account is
+// problematic as each transaction replacement or inclusion would require a
+// rescan of all other transactions to recalcualte the minimum. Instead, the
+// pool maintains a rolling minimum across the nonce range. Updating all the
+// minimums will need to be done only starting at the swapped in/out nonce
+// and leading up to the first no-change.
+type BlobPool struct {
+ config Config // Pool configuration
+
+ store billy.Database // Persistent data store for the tx metadata and blobs
+ stored uint64 // Useful data size of all transactions on disk
+ limbo *limbo // Persistent data store for the non-finalized blobs
+
+ signer types.Signer // Transaction signer to use for sender recovery
+ chain BlockChain // Chain object to access the state through
+
+ head *types.Header // Current head of the chain
+ state *state.StateDB // Current state at the head of the chain
+ gasTip *uint256.Int // Currently accepted minimum gas tip
+
+ index map[common.Address][]*blobTxMeta // Blob transactions groupped by accounts, sorted by nonce
+ spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts
+ evict *evictHeap // Heap of cheapest accounts for eviction when full
+
+ lock sync.Mutex // Mutex protecting the pool during reorg handling
+}
+
+// New creates a new blob transaction pool to gather, sort and filter inbound
+// blob transactions from the network.
+func New(config Config, chain BlockChain) (*BlobPool, error) {
+ // Sanitize the input to ensure no vulnerable gas prices are set
+ config = (&config).sanitize()
+
+ var (
+ queuedir = filepath.Join(config.Datadir, pendingTransactionStore)
+ limbodir = filepath.Join(config.Datadir, limboedTransactionStore)
+ )
+ // Create the transaction pool with its initial settings
+ head := chain.CurrentBlock()
+ state, err := chain.StateAt(head.Root)
+ if err != nil {
+ return nil, err
+ }
+ pool := &BlobPool{
+ config: config,
+ signer: types.LatestSigner(chain.Config()),
+ chain: chain,
+ head: head,
+ state: state,
+ index: make(map[common.Address][]*blobTxMeta),
+ spent: make(map[common.Address]*uint256.Int),
+ }
+ // Index all transactions on disk and delete anything inprocessable
+ var fails []uint64
+ index := func(id uint64, size uint32, blob []byte) {
+ if pool.parseTransaction(id, size, blob) != nil {
+ fails = append(fails, id)
+ }
+ }
+ if err := os.MkdirAll(queuedir, 0700); err != nil {
+ return nil, err
+ }
+ store, err := billy.Open(billy.Options{Path: queuedir}, newSlotter(), index)
+ if err != nil {
+ return nil, err
+ }
+ pool.store = store
+
+ if len(fails) > 0 {
+ log.Warn("Dropping invalidated blob transactions", "ids", fails)
+ for _, id := range fails {
+ if err := pool.store.Delete(id); err != nil {
+ pool.Close()
+ return nil, err
+ }
+ }
+ }
+ // Sort the indexed transactions by nonce and delete anything gapped, create
+ // the eviction heap of anyone still standing
+ for addr, _ := range pool.index {
+ pool.recheck(addr, nil)
+ }
+ var (
+ basefee = uint256.MustFromBig(misc.CalcBaseFee(chain.Config(), pool.head))
+ blobfee = uint256.MustFromBig(misc.CalcBlobFee(pool.head.ExcessDataGas))
+ )
+ pool.evict = newPriceHeap(basefee, blobfee, &pool.index)
+
+ // Pool initialized, attach the blob limbo to it to track blobs included
+ // recently but not yet finalized
+ pool.limbo, err = newLimbo(limbodir)
+ if err != nil {
+ pool.Close()
+ return nil, err
+ }
+ // Set the configured gas tip, triggering a filtering of anything just loaded
+ basefeeGauge.Update(int64(basefee.Uint64()))
+ blobfeeGauge.Update(int64(blobfee.Uint64()))
+
+ pool.SetGasTip(new(big.Int).SetUint64(config.PriceLimit))
+
+ // Since the user might have modified their pool's capacity, evict anything
+ // above the current allowance
+ for pool.stored > pool.config.Datacap {
+ pool.drop()
+ }
+ // Updat the metrics and return the cosntructed pool
+ datacapGauge.Update(int64(pool.config.Datacap))
+ pool.updateStorageMetrics()
+
+ return pool, nil
+}
+
+// Close closes down the underlying persistent store.
+func (p *BlobPool) Close() error {
+ var errs []error
+ if err := p.limbo.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ if err := p.store.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ switch {
+ case errs == nil:
+ return nil
+ case len(errs) == 1:
+ return errs[0]
+ default:
+ return fmt.Errorf("%v", errs)
+ }
+}
+
+// parseTransaction is a callback method on pool creation that gets called for
+// each transaction on disk to create the in-memory metadata index.
+func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
+ item := new(blobTx)
+ if err := rlp.DecodeBytes(blob, item); err != nil {
+ // This path is impossible unless the disk data representation changes
+ // across restarts. For that ever unprobable case, recover gracefully
+ // by ignoring this data entry.
+ log.Error("Failed to decode blob pool entry", "id", id, "err", err)
+ return err
+ }
+ meta := newBlobTxMeta(id, size, item.Tx)
+
+ sender, err := p.signer.Sender(item.Tx)
+ if err != nil {
+ // This path is impossible unless the signature validity changes across
+ // restarts. For that ever unprobable case, recover gracefully by ignoring
+ // this data entry.
+ log.Error("Failed to recover blob tx sender", "id", id, "hash", item.Tx.Hash(), "err", err)
+ return err
+ }
+ if _, ok := p.index[sender]; !ok {
+ p.index[sender] = []*blobTxMeta{}
+ p.spent[sender] = new(uint256.Int)
+ }
+ p.index[sender] = append(p.index[sender], meta)
+ p.spent[sender] = new(uint256.Int).Add(p.spent[sender], meta.costCap)
+
+ p.stored += uint64(meta.size)
+ return nil
+}
+
+// recheck verifies the pool's content for a specific account and drops anything
+// that does not fit anymore (dangling or filled nonce, overdraft).
+func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint64) {
+ // Sort the transactions belonging to the account so reinjects can be simpler
+ txs := p.index[addr]
+ if inclusions != nil && txs == nil { // during reorgs, we might find new accounts
+ return
+ }
+ sort.Slice(txs, func(i, j int) bool {
+ return txs[i].nonce < txs[j].nonce
+ })
+ // If there is a gap between the chain state and the blob pool, drop
+ // all the transactions as they are non-executable. Similarly, if the
+ // entire tx range was included, drop all.
+ var (
+ next = p.state.GetNonce(addr)
+ gapped = txs[0].nonce > next
+ filled = txs[len(txs)-1].nonce < next
+ )
+ if gapped || filled {
+ var (
+ ids []uint64
+ nonces []uint64
+ )
+ for i := 0; i < len(txs); i++ {
+ ids = append(ids, txs[i].id)
+ nonces = append(nonces, txs[i].nonce)
+
+ p.stored -= uint64(txs[i].size)
+
+ // Included transactions blobs need to be moved to the limbo
+ if filled && inclusions != nil {
+ p.offload(addr, txs[i].nonce, txs[i].id, inclusions)
+ }
+ }
+ delete(p.index, addr)
+ delete(p.spent, addr)
+ if inclusions != nil { // only during reorgs will the heap will be initialized
+ heap.Remove(p.evict, p.evict.index[addr])
+ }
+ if gapped {
+ log.Warn("Dropping dangling blob transactions", "from", addr, "missing", next, "drop", nonces, "ids", ids)
+ } else {
+ log.Warn("Dropping filled blob transactions", "from", addr, "filled", nonces, "ids", ids)
+ }
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ }
+ return
+ }
+ // If there is overlap between the chain state and the blob pool, drop
+ // anything below the current state
+ if txs[0].nonce < next {
+ var (
+ ids []uint64
+ nonces []uint64
+ )
+ for txs[0].nonce < next {
+ ids = append(ids, txs[0].id)
+ nonces = append(nonces, txs[0].nonce)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[0].costCap)
+ p.stored -= uint64(txs[0].size)
+
+ // Included transactions blobs need to be moved to the limbo
+ if filled && inclusions != nil {
+ p.offload(addr, txs[0].nonce, txs[0].id, inclusions)
+ }
+ txs = txs[1:]
+ }
+ log.Warn("Dropping overlapped blob transactions", "from", addr, "overlapped", nonces, "ids", ids, "left", len(txs))
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ }
+ p.index[addr] = txs
+ }
+ // Iterate over the transactions to initialize their eviction thresholds
+ // and to detect any nonce gaps
+ txs[0].evictionExecTip = txs[0].execTipCap
+ txs[0].evictionExecFeeJumps = txs[0].basefeeJumps
+ txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps
+
+ for i := 1; i < len(txs); i++ {
+ // If there's no nonce gap, initialize the evicion thresholds as the
+ // minimum between the cumulative thresholds and the current tx fees
+ if txs[i].nonce == txs[i-1].nonce+1 {
+ txs[i].evictionExecTip = txs[i-1].evictionExecTip
+ if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 {
+ txs[i].evictionExecTip = txs[i].execTipCap
+ }
+ txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps
+ if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps {
+ txs[i].evictionExecFeeJumps = txs[i].basefeeJumps
+ }
+ txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps
+ if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps {
+ txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps
+ }
+ continue
+ }
+ // Sanity check that there's no double nonce. This case would be a coding
+ // error, but better know about it
+ if txs[i].nonce == txs[i-1].nonce {
+ log.Error("Duplicate nonce blob transaction", "from", addr, "nonce", txs[i].nonce)
+ }
+ // Otherwise if there's a nonce gap evict all later transactions
+ var (
+ ids []uint64
+ nonces []uint64
+ )
+ for j := i; j < len(txs); j++ {
+ ids = append(ids, txs[j].id)
+ nonces = append(nonces, txs[j].nonce)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[j].costCap)
+ p.stored -= uint64(txs[j].size)
+ }
+ p.index[addr] = txs[:i]
+
+ log.Warn("Dropping gapped blob transactions", "from", addr, "missing", txs[i-1].nonce+1, "drop", nonces, "ids", ids)
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ }
+ break
+ }
+ // Ensure that there's no over-draft, this is expected to happen when some
+ // transctions get included without publishing on the network
+ var (
+ balance = p.state.GetBalance(addr)
+ spent = p.spent[addr]
+ )
+ if spent.ToBig().Cmp(balance) > 0 {
+ // Evict the highest nonce transactions until the pending set falls under
+ // the account's available balance
+ var (
+ ids []uint64
+ nonces []uint64
+ )
+ for p.spent[addr].ToBig().Cmp(balance) > 0 {
+ last := txs[len(txs)-1]
+ txs[len(txs)-1] = nil
+ txs = txs[:len(txs)-1]
+
+ ids = append(ids, last.id)
+ nonces = append(nonces, last.nonce)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap)
+ p.stored -= uint64(last.size)
+ }
+ if len(txs) == 0 {
+ delete(p.index, addr)
+ delete(p.spent, addr)
+ if inclusions != nil { // only during reorgs will the heap will be initialized
+ heap.Remove(p.evict, p.evict.index[addr])
+ }
+ } else {
+ p.index[addr] = txs
+ }
+ log.Warn("Dropping overdrafted blob transactions", "from", addr, "balance", balance, "spent", spent, "drop", nonces, "ids", ids)
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ }
+ }
+}
+
+// offload removes a tracked blob transaction from the pool and moves it into the
+// limbo for tracking until finality.
+//
+// The method may log errors for various unexpcted scenarios but will not return
+// any of it since there's no clear error case. Some errors may be due to coding
+// issues, others caused by signers mining MEV stuff or swapping transactions. In
+// all cases, the pool needs to continue operating.
+func (p *BlobPool) offload(addr common.Address, nonce uint64, id uint64, inclusions map[common.Hash]uint64) {
+ data, err := p.store.Get(id)
+ if err != nil {
+ log.Error("Blobs missing for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
+ return
+ }
+ item := new(blobTx)
+ if err = rlp.DecodeBytes(data, item); err != nil {
+ log.Error("Blobs corrupted for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
+ return
+ }
+ block, ok := inclusions[item.Tx.Hash()]
+ if !ok {
+ log.Warn("Blob transaction swapped out by signer", "from", addr, "nonce", nonce, "id", id)
+ return
+ }
+ if err := p.limbo.push(item.Tx.Hash(), block, item.Blobs); err != nil {
+ log.Warn("Failed to offload blob tx into limbo", "err", err)
+ return
+ }
+}
+
+// Reset implements txpool.SubPool, allowing the blob pool's internal state to be
+// kept in sync with the main transacion pool's internal state.
+func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
+ waitStart := time.Now()
+ p.lock.Lock()
+ resetwaitHist.Update(time.Since(waitStart).Nanoseconds())
+ defer p.lock.Unlock()
+
+ defer func(start time.Time) {
+ resettimeHist.Update(time.Since(start).Nanoseconds())
+ }(time.Now())
+
+ statedb, err := p.chain.StateAt(newHead.Root)
+ if err != nil {
+ log.Error("Failed to reset blobpool state", "err", err)
+ return
+ }
+ p.head = newHead
+ p.state = statedb
+
+ // Run the reorg between the old and new head and figure out which accounts
+ // need to be rechecked and which transactions need to be readded
+ if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil {
+ for addr, txs := range reinject {
+ // Blindly push all the lost transactions back into the pool
+ for _, tx := range txs {
+ p.reinject(addr, tx)
+ }
+ // Recheck the account's pooled transactions to drop included and
+ // invalidated one
+ p.recheck(addr, inclusions)
+ }
+ }
+ // Flush out any blobs from limbo that are older than the latest finality
+ p.limbo.finalize(p.chain.CurrentFinalBlock())
+
+ // Reset the price heap for the new set of basefee/blobfee pairs
+ var (
+ basefee = uint256.MustFromBig(misc.CalcBaseFee(p.chain.Config(), newHead))
+ blobfee = uint256.MustFromBig(misc.CalcBlobFee(newHead.ExcessDataGas))
+ )
+ p.evict.reinit(basefee, blobfee, false)
+
+ basefeeGauge.Update(int64(basefee.Uint64()))
+ blobfeeGauge.Update(int64(blobfee.Uint64()))
+ p.updateStorageMetrics()
+}
+
+// reorg assembles all the transactors and missing transactions between an old
+// and new head to figure out which account's tx set needs to be rechecked and
+// which transactions need to be requeued.
+//
+// The transactionblock inclusion infos are also returned to allow tracking any
+// just-included blocks by block number in the limbo.
+func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address]types.Transactions, map[common.Hash]uint64) {
+ // If the pool was not yet initialized, don't do anything
+ if oldHead == nil {
+ return nil, nil
+ }
+ // If the reorg is too deep, avoid doing it (will happen during snap sync)
+ oldNum := oldHead.Number.Uint64()
+ newNum := newHead.Number.Uint64()
+
+ if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
+ return nil, nil
+ }
+ // Reorg seems shallow enough to pull in all transactions into memory
+ var (
+ transactors = make(map[common.Address]struct{})
+ discarded = make(map[common.Address]types.Transactions)
+ included = make(map[common.Address]types.Transactions)
+ inclusions = make(map[common.Hash]uint64)
+
+ rem = p.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
+ add = p.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
+ )
+ if rem == nil {
+ // This can happen if a setHead is performed, where we simply discard
+ // the old head from the chain. If that is the case, we don't have the
+ // lost transactions anymore, and there's nothing to add.
+ if newNum >= oldNum {
+ // If we reorged to a same or higher number, then it's not a case
+ // of setHead
+ log.Warn("Blobpool reset with missing old head",
+ "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
+ return nil, nil
+ }
+ // If the reorg ended up on a lower number, it's indicative of setHead
+ // being the cause
+ log.Debug("Skipping blobpool reset caused by setHead",
+ "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
+ return nil, nil
+ }
+ // Both old and new blocks exist, traverse through the progression chain
+ // and accumulate the transactors and transactions
+ for rem.NumberU64() > add.NumberU64() {
+ for _, tx := range rem.Transactions() {
+ from, _ := p.signer.Sender(tx)
+
+ discarded[from] = append(discarded[from], tx)
+ transactors[from] = struct{}{}
+ }
+ if rem = p.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
+ log.Error("Unrooted old chain seen by blobpool", "block", oldHead.Number, "hash", oldHead.Hash())
+ return nil, nil
+ }
+ }
+ for add.NumberU64() > rem.NumberU64() {
+ for _, tx := range add.Transactions() {
+ from, _ := p.signer.Sender(tx)
+
+ included[from] = append(included[from], tx)
+ inclusions[tx.Hash()] = add.NumberU64()
+ transactors[from] = struct{}{}
+ }
+ if add = p.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
+ log.Error("Unrooted new chain seen by blobpool", "block", newHead.Number, "hash", newHead.Hash())
+ return nil, nil
+ }
+ }
+ for rem.Hash() != add.Hash() {
+ for _, tx := range rem.Transactions() {
+ from, _ := p.signer.Sender(tx)
+
+ discarded[from] = append(discarded[from], tx)
+ transactors[from] = struct{}{}
+ }
+ if rem = p.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
+ log.Error("Unrooted old chain seen by blobpool", "block", oldHead.Number, "hash", oldHead.Hash())
+ return nil, nil
+ }
+ for _, tx := range add.Transactions() {
+ from, _ := p.signer.Sender(tx)
+
+ included[from] = append(included[from], tx)
+ inclusions[tx.Hash()] = add.NumberU64()
+ transactors[from] = struct{}{}
+ }
+ if add = p.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
+ log.Error("Unrooted new chain seen by blobpool", "block", newHead.Number, "hash", newHead.Hash())
+ return nil, nil
+ }
+ }
+ // Generate the set of transactions per address to pull back into the pool,
+ // also updating the rest along the way
+ reinject := make(map[common.Address]types.Transactions)
+ for addr := range transactors {
+ // Generate the set that was lost to reinject into the pool
+ reinject[addr] = types.TxDifference(discarded[addr], included[addr])
+
+ // Update the set that was already reincluded to track the blocks in limbo
+ for _, tx := range types.TxDifference(included[addr], discarded[addr]) {
+ p.limbo.update(tx.Hash(), inclusions[tx.Hash()])
+ }
+ }
+ return reinject, inclusions
+}
+
+// reinject blindly pushes a transaction previously inlcuded in the chain - and
+// just reorged out - into the pool. The transaction is assumed valid (having
+// been in the chain), thus the only validation needed is nonce sorting and over-
+// draft checks after injection.
+func (p *BlobPool) reinject(addr common.Address, tx *types.Transaction) {
+ // Retrieve the associated blob from the limbo. Without the blobs, we cannot
+ // add the transaction back into the pool as it is not mineable.
+ blobs, err := p.limbo.pull(tx.Hash())
+ if err != nil {
+ log.Error("Blobs unavailable, dropping reorged tx", "err", err)
+ return
+ }
+ // Serialize the transaction back into the primary datastore
+ blob, err := rlp.EncodeToBytes(&blobTx{Tx: tx, Blobs: blobs})
+ if err != nil {
+ log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
+ return
+ }
+ id, err := p.store.Put(blob)
+ if err != nil {
+ log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err)
+ return
+ }
+ // Update the indixes and metrics
+ meta := newBlobTxMeta(id, p.store.Size(id), tx)
+
+ if _, ok := p.index[addr]; !ok {
+ p.index[addr] = []*blobTxMeta{meta}
+ p.spent[addr] = meta.costCap
+ p.evict.Push(addr)
+ } else {
+ p.index[addr] = append(p.index[addr], meta)
+ p.spent[addr] = new(uint256.Int).Add(p.spent[addr], meta.costCap)
+ }
+ p.stored += uint64(meta.size)
+}
+
+// SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements
+// to be kept in sync with the main transacion pool's gas requirements.
+func (p *BlobPool) SetGasTip(tip *big.Int) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ // Store the new minimum gas tip
+ old := p.gasTip
+ p.gasTip = uint256.MustFromBig(tip)
+
+ // If the min miner fee increased, remove transactions below the new threshold
+ if old == nil || p.gasTip.Cmp(old) > 0 {
+ for addr, txs := range p.index {
+ for i, tx := range txs {
+ if tx.execTipCap.Cmp(p.gasTip) < 0 {
+ // Drop the offending transaction
+ var (
+ ids = []uint64{tx.id}
+ nonces = []uint64{tx.nonce}
+ )
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[i].costCap)
+ p.stored -= uint64(tx.size)
+ txs[i] = nil
+
+ // Drop everything afterwards, no gaps allowed
+ for j, tx := range txs[i+1:] {
+ ids = append(ids, tx.id)
+ nonces = append(nonces, tx.nonce)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], tx.costCap)
+ p.stored -= uint64(tx.size)
+ txs[i+1+j] = nil
+ }
+ // Clear out the dropped transactions from the index
+ if i > 0 {
+ p.index[addr] = txs[:i]
+ } else {
+ delete(p.index, addr)
+ delete(p.spent, addr)
+ }
+ // Clear out the transactions from the data store
+ log.Warn("Dropping underpriced blob transaction", "from", addr, "rejected", tx.nonce, "tip", tx.execTipCap, "want", tip, "drop", nonces, "ids", ids)
+ for _, id := range ids {
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete dropped transaction", "id", id, "err", err)
+ }
+ }
+ break
+ }
+ }
+ }
+ }
+ log.Debug("Blobpool tip threshold updated", "tip", tip)
+ pooltipGague.Update(tip.Int64())
+ p.updateStorageMetrics()
+}
+
+// validateTx checks whether a transaction is valid according to the consensus
+// rules and adheres to some heuristic limits of the local node (price and size).
+func (p *BlobPool) validateTx(tx *types.Transaction, blobs [][]byte) error {
+ // Sanity check that only appropriate transactions and of a given data limit
+ // are even considered acceptance into the pool.
+ if tx.Type() != types.BlobTxType {
+ return fmt.Errorf("%w: tx type %v not blob transaction", core.ErrTxTypeNotSupported, tx.Type())
+ }
+ if tx.Size() > txMaxSize {
+ return fmt.Errorf("%w: transaction size %v, limit %v", txpool.ErrOversizedData, tx.Size(), txMaxSize)
+ }
+ // Verify the consensus rules to weed out invalid transactions
+ if err := txpool.ValidateTransaction(tx, p.head, p.chain.Config(), p.signer); err != nil {
+ return err
+ }
+ // Drop transactions under our own minimal accepted gas price or tip
+ if tx.GasTipCapIntCmp(p.gasTip.ToBig()) < 0 {
+ return fmt.Errorf("%w: tip needed %v, tip permitted %v", txpool.ErrUnderpriced, p.gasTip, tx.GasTipCap())
+ }
+ // Ensure the transaction adheres to nonce ordering
+ from, _ := p.signer.Sender(tx) // already validated above
+
+ next := p.state.GetNonce(from)
+ if next > tx.Nonce() {
+ return fmt.Errorf("%w: next nonce %v, tx nonce %v", core.ErrNonceTooLow, next, tx.Nonce())
+ }
+ // Ensure the transaction doesn't produce a nonce gap in the blob pool
+ if gap := next + uint64(len(p.index[from])); gap < tx.Nonce() {
+ return fmt.Errorf("%w: tx nonce %v, gapped nonce %v", core.ErrNonceTooHigh, tx.Nonce(), gap)
+ }
+ // Transactor should have enough funds to cover the base costs
+ // cost == V + GP * GL + DGP * DG
+ var (
+ balance, _ = uint256.FromBig(p.state.GetBalance(from))
+ cost, _ = uint256.FromBig(tx.Cost())
+ )
+ if balance.Cmp(cost) < 0 {
+ return fmt.Errorf("%w: balance %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, cost, new(uint256.Int).Sub(cost, balance))
+ }
+ // Transactor should have enough funds to cover the cummulative costs too
+ spent := p.spent[from]
+ if uint64(len(p.index[from])) > tx.Nonce()-next {
+ // A replacement transaction must not overspend the account (invalidating
+ // already propagated transactions)
+ prev := p.index[from][int(tx.Nonce()-next)]
+ bump := new(uint256.Int).Sub(cost, prev.costCap)
+ need := new(uint256.Int).Add(spent, bump)
+ if balance.Cmp(need) < 0 {
+ return fmt.Errorf("%w: balance %v, queued cost %v, tx bumped %v, overshot %v", core.ErrInsufficientFunds, balance, spent, bump, new(uint256.Int).Sub(need, balance))
+ }
+ // Account can support the replacement, but the price bump must also be met
+ switch {
+ case tx.GasFeeCapIntCmp(prev.execFeeCap.ToBig()) <= 0:
+ return fmt.Errorf("%w: new tx gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap)
+ case tx.GasTipCapIntCmp(prev.execTipCap.ToBig()) <= 0:
+ return fmt.Errorf("%w: new tx gas tip cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasTipCap(), prev.execTipCap)
+ case tx.BlobGasFeeCap() != nil && /* TODO(karalabe) remove nil check */ tx.BlobGasFeeCapIntCmp(prev.blobFeeCap.ToBig()) <= 0:
+ return fmt.Errorf("%w: new tx blob gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.BlobGasFeeCap(), prev.blobFeeCap)
+ }
+ var (
+ multiplier = uint256.NewInt(100 + p.config.PriceBump)
+
+ minGasFeeCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.execFeeCap), uint256.NewInt(100))
+ minGasTipCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.execTipCap), uint256.NewInt(100))
+ minBlobGasFeeCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.blobFeeCap), uint256.NewInt(100))
+ )
+ switch {
+ case tx.GasFeeCapIntCmp(minGasFeeCap.ToBig()) < 0:
+ return fmt.Errorf("%w: new tx gas fee cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap, p.config.PriceBump)
+ case tx.GasTipCapIntCmp(minGasTipCap.ToBig()) < 0:
+ return fmt.Errorf("%w: new tx gas tip cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.GasTipCap(), prev.execTipCap, p.config.PriceBump)
+ case tx.BlobGasFeeCap() != nil && /* TODO(karalabe) remove nil check */ tx.BlobGasFeeCapIntCmp(minBlobGasFeeCap.ToBig()) < 0:
+ return fmt.Errorf("%w: new tx blob gas fee cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.BlobGasFeeCap(), prev.blobFeeCap, p.config.PriceBump)
+ }
+ } else if spent != nil {
+ // A new transaction must not overspend the account (becoming invalid
+ // when execution reaches it)
+ need := new(uint256.Int).Add(spent, cost)
+ if balance.Cmp(need) < 0 {
+ return fmt.Errorf("%w: balance %v, queued cost %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, spent, cost, new(uint256.Int).Sub(need, balance))
+ }
+ }
+ // Transaction validation passed, check the blob cryptographic commitments
+ // TODO(karalabe)
+
+ return nil
+}
+
+// Add inserts a new blob transaction into the pool if it passes validation (both
+// consensus validity and pool restictions).
+func (p *BlobPool) Add(tx *types.Transaction, blobs [][]byte) error {
+ // The blob pool blocks on adding a transaction. This is because blob txs are
+ // only even pulled form the network, so this method will act as the overload
+ // protection for fetches.
+ waitStart := time.Now()
+ p.lock.Lock()
+ addwaitHist.Update(time.Since(waitStart).Nanoseconds())
+ defer p.lock.Unlock()
+
+ defer func(start time.Time) {
+ addtimeHist.Update(time.Since(start).Nanoseconds())
+ }(time.Now())
+
+ // Ensure the transaction is valid from all perspectives
+ if err := p.validateTx(tx, blobs); err != nil {
+ log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err)
+ return err
+ }
+ // Transaction permitted into the pool from a nonce and cost perspective,
+ // insert it into the database and update the indices
+ blob, err := rlp.EncodeToBytes(&blobTx{Tx: tx, Blobs: blobs})
+ if err != nil {
+ log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
+ return err
+ }
+ id, err := p.store.Put(blob)
+ if err != nil {
+ return err
+ }
+ meta := newBlobTxMeta(id, p.store.Size(id), tx)
+
+ var (
+ from, _ = types.Sender(p.signer, tx) // already validated above
+ next = p.state.GetNonce(from)
+ offset = int(tx.Nonce() - next)
+ newacc = false
+ )
+ if len(p.index[from]) > offset {
+ // Transaction replaces a previously queued one
+ prev := p.index[from][offset]
+ if err := p.store.Delete(prev.id); err != nil {
+ // Shitty situation, but try to recover gracefully instead of going boom
+ log.Error("Failed to delete replaced transaction", "id", prev.id, "err", err)
+ }
+ // Update the transaction index
+ p.index[from][offset] = meta
+ p.spent[from] = new(uint256.Int).Sub(p.spent[from], prev.costCap)
+ p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap)
+ p.stored += uint64(meta.size) - uint64(prev.size)
+ } else {
+ // Transaction extends previously scheduled ones
+ p.index[from] = append(p.index[from], meta)
+ if _, ok := p.spent[from]; !ok {
+ p.spent[from] = new(uint256.Int)
+ newacc = true
+ }
+ p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap)
+ p.stored += uint64(meta.size)
+ }
+ // Recompute the rolling eviction fields. In case of a replacement, this will
+ // recompute all subsequent fields. In case of an append, this will only do
+ // the fresh calculation.
+ var (
+ txs = p.index[from]
+
+ oldEvictionExecFeeJumps = txs[len(txs)-1].evictionExecFeeJumps
+ oldEvictionBlobFeeJumps = txs[len(txs)-1].evictionBlobFeeJumps
+ )
+ for i := offset; i < len(txs); i++ {
+ // The first transaction will always use itself
+ if i == 0 {
+ txs[0].evictionExecTip = txs[0].execTipCap
+ txs[0].evictionExecFeeJumps = txs[0].basefeeJumps
+ txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps
+
+ continue
+ }
+ // Subsequent transactions will use a rolling calculation
+ txs[i].evictionExecTip = txs[i-1].evictionExecTip
+ if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 {
+ txs[i].evictionExecTip = txs[i].execTipCap
+ }
+ txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps
+ if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps {
+ txs[i].evictionExecFeeJumps = txs[i].basefeeJumps
+ }
+ txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps
+ if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps {
+ txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps
+ }
+ }
+ // Update the eviction heap with the new information:
+ // - If the transaction is from a new account, add it to the heap
+ // - If the account had a singleton tx replaced, update the heap (new price caps)
+ // - If the account had it's last tx updated or appended to, update heap iff lower than prev
+ // - If the account had an internal tx updated, compare old tail price caps with new ones
+ switch {
+ case newacc:
+ heap.Push(p.evict, from)
+
+ case len(txs) == 1: // 1 tx and not a new acc, must be replacement
+ heap.Fix(p.evict, p.evict.index[from])
+
+ case offset == len(txs): // either append or last tx replacement
+ evictionExecFeeDiff := txs[offset-1].evictionExecFeeJumps - txs[offset].evictionExecFeeJumps
+ evictionBlobFeeDiff := txs[offset-1].evictionBlobFeeJumps - txs[offset].evictionBlobFeeJumps
+
+ if evictionExecFeeDiff > 0.001 || evictionBlobFeeDiff > 0.001 { // no need for math.Abs, monotonic decreasing
+ heap.Fix(p.evict, p.evict.index[from])
+ }
+
+ default: // no new account, no singleton tx, no last update; must be internal replacement
+ evictionExecFeeDiff := oldEvictionExecFeeJumps - txs[offset].evictionExecFeeJumps
+ evictionBlobFeeDiff := oldEvictionBlobFeeJumps - txs[offset].evictionBlobFeeJumps
+
+ if math.Abs(evictionExecFeeDiff) > 0.001 || math.Abs(evictionBlobFeeDiff) > 0.001 { // need math.Abs, can go up and down
+ heap.Fix(p.evict, p.evict.index[from])
+ }
+ }
+ // If the pool went over the allowed data limit, evict transactions until
+ // we're again below the threshold
+ for p.stored > p.config.Datacap {
+ p.drop()
+ }
+ p.updateStorageMetrics()
+
+ return nil
+}
+
+// drop removes the worst transaction from the pool. It is primarilly used when a
+// freshly added transaction overflows the pool and needs to evict something. The
+// method is also called on startup if the user resizes their storage, might be an
+// expensive run but it sould be fine-ish.
+func (p *BlobPool) drop() {
+ // Peek at the account with the worse transaction set to evict from (Go's heap
+ // stores the minimum at index zero of the heap slice) and retrieve it's last
+ // transaction.
+ var (
+ from = p.evict.addrs[0] // cannot call drop on empty pool
+
+ txs = p.index[from]
+ drop = txs[len(txs)-1]
+ last = len(txs) == 1
+ )
+ // Remove the transaction from the pool's index
+ if last {
+ delete(p.index, from)
+ delete(p.spent, from)
+ } else {
+ txs[len(txs)-1] = nil
+ txs = txs[:len(txs)-1]
+
+ p.index[from] = txs
+ p.spent[from] = new(uint256.Int).Sub(p.spent[from], drop.costCap)
+ }
+ p.stored -= uint64(drop.size)
+
+ // Remove the transation from the pool's evicion heap:
+ // - If the entire account was dropped, pop off the address
+ // - Otherwise, if the new tail has better eviction caps, fix the heap
+ if last {
+ heap.Pop(p.evict)
+ } else {
+ tail := txs[len(txs)-1] // new tail, surely exists
+
+ evictionExecFeeDiff := tail.evictionExecFeeJumps - drop.evictionExecFeeJumps
+ evictionBlobFeeDiff := tail.evictionBlobFeeJumps - drop.evictionBlobFeeJumps
+
+ if evictionExecFeeDiff > 0.001 || evictionBlobFeeDiff > 0.001 { // no need for math.Abs, monotonic decreasing
+ heap.Fix(p.evict, 0)
+ }
+ }
+ // Remove the transaction from the data store
+ log.Warn("Evicting overflown blob transaction", "from", from, "evicted", drop.nonce, "id", drop.id)
+ if err := p.store.Delete(drop.id); err != nil {
+ log.Error("Failed to drop evicted transaction", "id", drop.id, "err", err)
+ }
+}
+
+// Pending retrieves a snapshot of the pending transactions for the miner to sift
+// through and pick the best ones. The method already does a pre-filtering since
+// there's no point to retrieve non-executble ones.
+//
+// Note, please don't provide an RPC API method to expose these. The blob pool will
+// ideally get enormous, and it will not be feasible to constatly expose that entire
+// dump out of the process.
+func (p *BlobPool) Pending(basefee, datafee *uint256.Int) map[common.Address][]*BlobTxShim {
+ waitStart := time.Now()
+ p.lock.Lock()
+ pendwaitHist.Update(time.Since(waitStart).Nanoseconds())
+ defer p.lock.Unlock()
+
+ defer func(start time.Time) {
+ pendtimeHist.Update(time.Since(start).Nanoseconds())
+ }(time.Now())
+
+ pending := make(map[common.Address][]*BlobTxShim)
+ for addr, txs := range p.index {
+ var shims []*BlobTxShim
+ for _, tx := range txs {
+ // If the transaction cannot be executed in the current block, or does
+ // not meet teh minimum required tip, stop aggregating the account
+ if tx.execFeeCap.Lt(basefee) || tx.blobFeeCap.Lt(datafee) {
+ break
+ }
+ if new(uint256.Int).Sub(tx.execFeeCap, basefee).Lt(p.gasTip) {
+ break
+ }
+ // Transaction met teh minimum filters, att to the shims
+ shims = append(shims, &BlobTxShim{})
+ }
+ if len(shims) > 0 {
+ pending[addr] = shims
+ }
+ }
+ return pending
+}
+
+// updateStorageMetrics retrieves a bunch of stats from the data store and pushes
+// them out as metrics.
+func (p *BlobPool) updateStorageMetrics() {
+ stats := p.store.Infos()
+
+ var (
+ dataused uint64
+ datareal uint64
+ slotused uint64
+
+ oversizedDataused uint64
+ oversizedDatagaps uint64
+ oversizedSlotused uint64
+ oversizedSlotgaps uint64
+ )
+ for _, shelf := range stats.Shelves {
+ slotDataused := shelf.FilledSlots * uint64(shelf.SlotSize)
+ slotDatagaps := shelf.GappedSlots * uint64(shelf.SlotSize)
+
+ dataused += slotDataused
+ datareal += slotDataused + slotDatagaps
+ slotused += shelf.FilledSlots
+
+ metrics.GetOrRegisterGauge(fmt.Sprintf(shelfDatausedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDataused))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(shelfDatagapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDatagaps))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots))
+
+ if shelf.SlotSize/blobSize > maxBlobsPerTransaction {
+ oversizedDataused += slotDataused
+ oversizedDatagaps += slotDatagaps
+ oversizedSlotused += shelf.FilledSlots
+ oversizedSlotgaps += shelf.GappedSlots
+ }
+ }
+ datausedGauge.Update(int64(dataused))
+ datarealGauge.Update(int64(datareal))
+ slotusedGauge.Update(int64(slotused))
+
+ oversizedDatausedGauge.Update(int64(oversizedDataused))
+ oversizedDatagapsGauge.Update(int64(oversizedDatagaps))
+ oversizedSlotusedGauge.Update(int64(oversizedSlotused))
+ oversizedSlotgapsGauge.Update(int64(oversizedSlotgaps))
+
+ p.updateLimboMetrics()
+}
+
+// updateLimboMetrics retrieves a bunch of stats from the limbo store and pushes
+// // them out as metrics.
+func (p *BlobPool) updateLimboMetrics() {
+ stats := p.limbo.store.Infos()
+
+ var (
+ dataused uint64
+ datareal uint64
+ slotused uint64
+ )
+ for _, shelf := range stats.Shelves {
+ slotDataused := shelf.FilledSlots * uint64(shelf.SlotSize)
+ slotDatagaps := shelf.GappedSlots * uint64(shelf.SlotSize)
+
+ dataused += slotDataused
+ datareal += slotDataused + slotDatagaps
+ slotused += shelf.FilledSlots
+
+ metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfDatausedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDataused))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfDatagapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDatagaps))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots))
+ metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots))
+ }
+ limboDatausedGauge.Update(int64(dataused))
+ limboDatarealGauge.Update(int64(datareal))
+ limboSlotusedGauge.Update(int64(slotused))
+}
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
new file mode 100644
index 0000000000000..f415155d028bf
--- /dev/null
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -0,0 +1,992 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "errors"
+ "math"
+ "math/big"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/misc"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/txpool"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/holiman/billy"
+ "github.com/holiman/uint256"
+)
+
+// Chain configuration with Cancun enabled.
+//
+// TODO(karalabe): replace with params.MainnetChainConfig after Cancun.
+var testChainConfig *params.ChainConfig
+
+func init() {
+ testChainConfig = new(params.ChainConfig)
+ *testChainConfig = *params.MainnetChainConfig
+
+ testChainConfig.CancunTime = new(uint64)
+ *testChainConfig.CancunTime = uint64(time.Now().Unix())
+}
+
+// testBlockChain is a mock of the live chain for testing the pool.
+type testBlockChain struct {
+ config *params.ChainConfig
+ basefee *uint256.Int
+ blobfee *uint256.Int
+ statedb *state.StateDB
+}
+
+func (bc *testBlockChain) Config() *params.ChainConfig {
+ return bc.config
+}
+
+func (bc *testBlockChain) CurrentBlock() *types.Header {
+ // Yolo, life is too short to invert mist.CalcBaseFee and misc.CalcBlobFee,
+ // just binary search it them.
+
+ // The base fee at 5714 ETH translates into the 21000 base gas higher than
+ // mainnet ether existence, use that as a cap for the tests.
+ var (
+ blockNumber = new(big.Int).Add(bc.config.LondonBlock, big.NewInt(1))
+ blockTime = *bc.config.CancunTime + 1
+ gasLimit = uint64(30_000_000)
+ )
+ lo := new(big.Int)
+ hi := new(big.Int).Mul(big.NewInt(5714), new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil))
+
+ for new(big.Int).Add(lo, big.NewInt(1)).Cmp(hi) != 0 {
+ mid := new(big.Int).Add(lo, hi)
+ mid.Div(mid, big.NewInt(2))
+
+ if misc.CalcBaseFee(bc.config, &types.Header{
+ Number: blockNumber,
+ GasLimit: gasLimit,
+ GasUsed: 0,
+ BaseFee: mid,
+ }).Cmp(bc.basefee.ToBig()) > 0 {
+ hi = mid
+ } else {
+ lo = mid
+ }
+ }
+ baseFee := lo
+
+ // The excess data gas at 2^27 translates into a blob fee higher than mainnet
+ // ether existence, use that as a cap for the tests.
+ lo = new(big.Int)
+ hi = new(big.Int).Exp(big.NewInt(2), big.NewInt(27), nil)
+
+ for new(big.Int).Add(lo, big.NewInt(1)).Cmp(hi) != 0 {
+ mid := new(big.Int).Add(lo, hi)
+ mid.Div(mid, big.NewInt(2))
+
+ if misc.CalcBlobFee(mid).Cmp(bc.blobfee.ToBig()) > 0 {
+ hi = mid
+ } else {
+ lo = mid
+ }
+ }
+ excessDataGas := lo
+
+ return &types.Header{
+ Number: blockNumber,
+ Time: blockTime,
+ GasLimit: gasLimit,
+ BaseFee: baseFee,
+ ExcessDataGas: excessDataGas,
+ }
+}
+
+func (bc *testBlockChain) CurrentFinalBlock() *types.Header {
+ return &types.Header{
+ Number: big.NewInt(0),
+ }
+}
+
+func (bt *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
+ return nil
+}
+
+func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {
+ return bc.statedb, nil
+}
+
+// makeTx is a utility method to construct a random blob transaction and sign it
+// with a valid key, only setting the interesting fields from the perspective of
+// the blob pool.
+func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, key *ecdsa.PrivateKey) *types.Transaction {
+ tx, _ := types.SignNewTx(key, types.LatestSigner(testChainConfig), makeUnsignedTx(nonce, gasTipCap, gasFeeCap, blobFeeCap))
+ return tx
+}
+
+// makeUnsignedTx is a utility method to construct a random blob tranasaction
+// without signing it.
+func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64) *types.BlobTx {
+ return &types.BlobTx{
+ ChainID: uint256.MustFromBig(testChainConfig.ChainID),
+ Nonce: nonce,
+ To: new(common.Address),
+ GasTipCap: uint256.NewInt(gasTipCap),
+ GasFeeCap: uint256.NewInt(gasFeeCap),
+ Gas: 21000,
+ BlobFeeCap: uint256.NewInt(blobFeeCap),
+ Value: uint256.NewInt(100),
+ }
+}
+
+// verifyPoolInternals iterates over all the transactions in the pool and checks
+// that sort orders, calculated fields, cumulated fields are correct.
+func verifyPoolInternals(t *testing.T, pool *BlobPool) {
+ // Mark this method as a helper to remove from stack traces
+ t.Helper()
+
+ // Verify that transactions are sorted per account and contain no nonce gaps
+ for addr, txs := range pool.index {
+ for i := 1; i < len(txs); i++ {
+ if txs[i].nonce != txs[i-1].nonce+1 {
+ t.Errorf("addr %v, tx %d nonce mismatch: have %d, want %d", addr, i, txs[i].nonce, txs[i-1].nonce+1)
+ }
+ }
+ }
+ // Verify that calculated evacuation thresholds are correct
+ for addr, txs := range pool.index {
+ if !txs[0].evictionExecTip.Eq(txs[0].execTipCap) {
+ t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, 0, txs[0].evictionExecTip, txs[0].execTipCap)
+ }
+ if math.Abs(txs[0].evictionExecFeeJumps-txs[0].basefeeJumps) > 0.001 {
+ t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, 0, txs[0].evictionExecFeeJumps, txs[0].basefeeJumps)
+ }
+ if math.Abs(txs[0].evictionBlobFeeJumps-txs[0].blobfeeJumps) > 0.001 {
+ t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, 0, txs[0].evictionBlobFeeJumps, txs[0].blobfeeJumps)
+ }
+ for i := 1; i < len(txs); i++ {
+ wantExecTip := txs[i-1].evictionExecTip
+ if wantExecTip.Gt(txs[i].execTipCap) {
+ wantExecTip = txs[i].execTipCap
+ }
+ if !txs[i].evictionExecTip.Eq(wantExecTip) {
+ t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, i, txs[i].evictionExecTip, wantExecTip)
+ }
+
+ wantExecFeeJumps := txs[i-1].evictionExecFeeJumps
+ if wantExecFeeJumps > txs[i].basefeeJumps {
+ wantExecFeeJumps = txs[i].basefeeJumps
+ }
+ if math.Abs(txs[i].evictionExecFeeJumps-wantExecFeeJumps) > 0.001 {
+ t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionExecFeeJumps, wantExecFeeJumps)
+ }
+
+ wantBlobFeeJumps := txs[i-1].evictionBlobFeeJumps
+ if wantBlobFeeJumps > txs[i].blobfeeJumps {
+ wantBlobFeeJumps = txs[i].blobfeeJumps
+ }
+ if math.Abs(txs[i].evictionBlobFeeJumps-wantBlobFeeJumps) > 0.001 {
+ t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionBlobFeeJumps, wantBlobFeeJumps)
+ }
+ }
+ }
+ // Verify that account balance accumulations are correct
+ for addr, txs := range pool.index {
+ spent := new(uint256.Int)
+ for _, tx := range txs {
+ spent.Add(spent, tx.costCap)
+ }
+ if !pool.spent[addr].Eq(spent) {
+ t.Errorf("addr %v expenditure mismatch: have %d, want %d", addr, pool.spent[addr], spent)
+ }
+ }
+ // Verify that pool storage size is correct
+ var stored uint64
+ for _, txs := range pool.index {
+ for _, tx := range txs {
+ stored += uint64(tx.size)
+ }
+ }
+ if pool.stored != stored {
+ t.Errorf("pool storage mismatch: have %d, want %d", pool.stored, stored)
+ }
+ // Verify the price heap internals
+ verifyHeapInternals(t, pool.evict)
+}
+
+// Tests that transactions can be loaded from disk on startup and that they are
+// correctly discarded if invalid.
+//
+// - 1. A transaction that cannot be decoded must be dropped
+// - 2. A transaction that cannot be recovered (bad signature) must be dropped
+// - 3. All transactions after a nonce gap must be dropped
+// - 4. All transactions after an underpriced one (including it) must be dropped
+func TestOpenDrops(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage)
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert a malformed transaction to verify that decoding errors (or format
+ // changes) are handled gracefully (case 1)
+ malformed, _ := store.Put([]byte("this is a badly encoded transaction"))
+
+ // Insert a transaction with a bad signature to verify that stale junk after
+ // potential hard-forks can get evicted (case 2)
+ tx := types.NewTx(&types.BlobTx{
+ ChainID: uint256.MustFromBig(testChainConfig.ChainID),
+ GasTipCap: new(uint256.Int),
+ GasFeeCap: new(uint256.Int),
+ Gas: 0,
+ Value: new(uint256.Int),
+ Data: nil,
+ BlobFeeCap: new(uint256.Int),
+ V: new(uint256.Int),
+ R: new(uint256.Int),
+ S: new(uint256.Int),
+ })
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ badsig, _ := store.Put(blob)
+
+ // Insert a sequence of transactions with a nonce gap in between to verify
+ // that anything gapped will get evicted (case 3)
+ var (
+ gapper, _ = crypto.GenerateKey()
+
+ valids = make(map[uint64]struct{})
+ gapped = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 3, 4, 6, 7} { // first gap at #2, another at #5
+ tx := makeTx(nonce, 1, 1, 1, gapper)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ if nonce < 2 {
+ valids[id] = struct{}{}
+ } else {
+ gapped[id] = struct{}{}
+ }
+ }
+ // Insert a sequence of transactions with a gapped starting nonce to veirfy
+ // that the entire set will get dropped.
+ var (
+ dangler, _ = crypto.GenerateKey()
+ dangling = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{1, 2, 3} { // first gap at #0, all set dangling
+ tx := makeTx(nonce, 1, 1, 1, dangler)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ dangling[id] = struct{}{}
+ }
+ // Insert a sequence of transactions with already passed nonces to veirfy
+ // that the entire set will get dropped.
+ var (
+ filler, _ = crypto.GenerateKey()
+ filled = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 2} { // account nonce at 3, all set filled
+ tx := makeTx(nonce, 1, 1, 1, filler)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ filled[id] = struct{}{}
+ }
+ // Insert a sequence of transactions with partially passed nonces to veirfy
+ // that the included part of the set will get dropped
+ var (
+ overlapper, _ = crypto.GenerateKey()
+ overlapped = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 2, 3} { // account nonce at 2, half filled
+ tx := makeTx(nonce, 1, 1, 1, overlapper)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ if nonce >= 2 {
+ valids[id] = struct{}{}
+ } else {
+ overlapped[id] = struct{}{}
+ }
+ }
+ // Insert a sequence of transactions with an underpriced in between to verify
+ // that it and anything newly gapped will get evicted (case 4)
+ var (
+ pricer, _ = crypto.GenerateKey()
+ outpriced = make(map[uint64]struct{})
+ )
+ for i := 0; i < 5; i++ { // make #2 underpriced
+ var tx *types.Transaction
+ if i == 2 {
+ tx = makeTx(uint64(i), 0, 0, 0, pricer)
+ } else {
+ tx = makeTx(uint64(i), 1, 1, 1, pricer)
+ }
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ if i < 2 {
+ valids[id] = struct{}{}
+ } else {
+ outpriced[id] = struct{}{}
+ }
+ }
+ // Insert a sequence of transactions fully overdrafted to verify that the
+ // entire set will get invalidated.
+ var (
+ exceeder, _ = crypto.GenerateKey()
+ exceeded = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 2} { // nonce 0 overdrafts the account
+ var tx *types.Transaction
+ if nonce == 0 {
+ tx = makeTx(nonce, 1, 100, 1, exceeder)
+ } else {
+ tx = makeTx(nonce, 1, 1, 1, exceeder)
+ }
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ exceeded[id] = struct{}{}
+ }
+ // Insert a sequence of transactions partially overdrafted to verify that part
+ // of the set will get invalidated.
+ var (
+ overdrafter, _ = crypto.GenerateKey()
+ overdrafted = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 2} { // nonce 1 overdrafts the account
+ var tx *types.Transaction
+ if nonce == 1 {
+ tx = makeTx(nonce, 1, 100, 1, overdrafter)
+ } else {
+ tx = makeTx(nonce, 1, 1, 1, overdrafter)
+ }
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+
+ id, _ := store.Put(blob)
+ if nonce < 1 {
+ valids[id] = struct{}{}
+ } else {
+ overdrafted[id] = struct{}{}
+ }
+ }
+ store.Close()
+
+ // Create a blob pool out of the pre-seeded data
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb.AddBalance(crypto.PubkeyToAddress(gapper.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(dangler.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(filler.PublicKey), big.NewInt(1000000))
+ statedb.SetNonce(crypto.PubkeyToAddress(filler.PublicKey), 3)
+ statedb.AddBalance(crypto.PubkeyToAddress(overlapper.PublicKey), big.NewInt(1000000))
+ statedb.SetNonce(crypto.PubkeyToAddress(overlapper.PublicKey), 2)
+ statedb.AddBalance(crypto.PubkeyToAddress(pricer.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(exceeder.PublicKey), big.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(overdrafter.PublicKey), big.NewInt(1000000))
+ statedb.Commit(true)
+
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(params.InitialBaseFee),
+ blobfee: uint256.NewInt(params.BlobTxMinDataGasprice),
+ statedb: statedb,
+ }
+ pool, err := New(Config{Datadir: storage, PriceLimit: 1}, chain)
+ if err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+ defer pool.Close()
+
+ // Verify that the malformed (case 1), badly signed (case 2) and gapped (case
+ // 3) txs have been deleted from the pool
+ alive := make(map[uint64]struct{})
+ for _, txs := range pool.index {
+ for _, tx := range txs {
+ switch tx.id {
+ case malformed:
+ t.Errorf("malformed RLP transaction remained in storage")
+ case badsig:
+ t.Errorf("invalidly signed transaction remained in storage")
+ default:
+ if _, ok := dangling[tx.id]; ok {
+ t.Errorf("dangling transaction remained in storage: %d", tx.id)
+ } else if _, ok := filled[tx.id]; ok {
+ t.Errorf("filled transaction remained in storage: %d", tx.id)
+ } else if _, ok := overlapped[tx.id]; ok {
+ t.Errorf("overlapped transaction remained in storage: %d", tx.id)
+ } else if _, ok := gapped[tx.id]; ok {
+ t.Errorf("gapped transaction remained in storage: %d", tx.id)
+ } else if _, ok := exceeded[tx.id]; ok {
+ t.Errorf("fully overdrafted transaction remained in storage: %d", tx.id)
+ } else if _, ok := overdrafted[tx.id]; ok {
+ t.Errorf("partially overdrafted transaction remained in storage: %d", tx.id)
+ } else {
+ alive[tx.id] = struct{}{}
+ }
+ }
+ }
+ }
+ // Verify that the rest of the transactions remained alive
+ if len(alive) != len(valids) {
+ t.Errorf("valid transaction count mismatch: have %d, want %d", len(alive), len(valids))
+ }
+ for id := range alive {
+ if _, ok := valids[id]; !ok {
+ t.Errorf("extra transaction %d", id)
+ }
+ }
+ for id := range valids {
+ if _, ok := alive[id]; !ok {
+ t.Errorf("missing transaction %d", id)
+ }
+ }
+ // Verify all the calculated pool internals. Interestingly, this is **not**
+ // a duplication of the above checks, this actually validates the verifier
+ // using the above already hard coded checks.
+ //
+ // Do not remove this, nor alter the above to be generic.
+ verifyPoolInternals(t, pool)
+}
+
+// Tests that transactions loaded from disk are indexed corrently.
+//
+// - 1. Transactions must be groupped by sender, sorted by nonce
+// - 2. Eviction thresholds are calculated correctly for the sequences
+// - 3. Balance usage of an account is totals across all transactions
+func TestOpenIndex(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage)
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert a sequence of transactions with varying price points to check that
+ // the cumulative minimumw will be maintained.
+ var (
+ key, _ = crypto.GenerateKey()
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+
+ txExecTipCaps = []uint64{10, 25, 5, 7, 1, 100}
+ txExecFeeCaps = []uint64{100, 90, 200, 10, 80, 300}
+ txBlobFeeCaps = []uint64{55, 66, 77, 33, 22, 11}
+
+ //basefeeJumps = []float64{39.098, 38.204, 44.983, 19.549, 37.204, 48.426} // log 1.125 (exec fee cap)
+ //blobfeeJumps = []float64{34.023, 35.570, 36.879, 29.686, 26.243, 20.358} // log 1.125 (blob fee cap)
+
+ evictExecTipCaps = []uint64{10, 10, 5, 5, 1, 1}
+ evictExecFeeJumps = []float64{39.098, 38.204, 38.204, 19.549, 19.549, 19.549} // min(log 1.125 (exec fee cap))
+ evictBlobFeeJumps = []float64{34.023, 34.023, 34.023, 29.686, 26.243, 20.358} // min(log 1.125 (blob fee cap))
+
+ totalSpent = uint256.NewInt(21000*(100+90+200+10+80+300) + 100*6) // 21000 gas x price + value
+ )
+ for _, i := range []int{5, 3, 4, 2, 0, 1} { // Randomize the tx insertion order to force sorting on load
+ tx := makeTx(uint64(i), txExecTipCaps[i], txExecFeeCaps[i], txBlobFeeCaps[i], key)
+ blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ store.Put(blob)
+ }
+ store.Close()
+
+ // Create a blob pool out of the pre-seeded data
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb.AddBalance(addr, big.NewInt(1_000_000_000))
+ statedb.Commit(true)
+
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(params.InitialBaseFee),
+ blobfee: uint256.NewInt(params.BlobTxMinDataGasprice),
+ statedb: statedb,
+ }
+ pool, err := New(Config{Datadir: storage, PriceLimit: 1}, chain)
+ if err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+ defer pool.Close()
+
+ // Verify that the transactions have been sorted by nonce (case 1)
+ for i := 0; i < len(pool.index[addr]); i++ {
+ if pool.index[addr][i].nonce != uint64(i) {
+ t.Errorf("tx %d nonce mismatch: have %d, want %d", i, pool.index[addr][i].nonce, uint64(i))
+ }
+ }
+ // Verify that the cumulative fee minimums have been correctly calculated (case 2)
+ for i, cap := range evictExecTipCaps {
+ if !pool.index[addr][i].evictionExecTip.Eq(uint256.NewInt(cap)) {
+ t.Errorf("eviction tip cap %d mismatch: have %d, want %d", i, pool.index[addr][i].evictionExecTip, cap)
+ }
+ }
+ for i, jumps := range evictExecFeeJumps {
+ if math.Abs(pool.index[addr][i].evictionExecFeeJumps-jumps) > 0.001 {
+ t.Errorf("eviction fee cap jumps %d mismatch: have %f, want %f", i, pool.index[addr][i].evictionExecFeeJumps, jumps)
+ }
+ }
+ for i, jumps := range evictBlobFeeJumps {
+ if math.Abs(pool.index[addr][i].evictionBlobFeeJumps-jumps) > 0.001 {
+ t.Errorf("eviction blob fee cap jumps %d mismatch: have %f, want %f", i, pool.index[addr][i].evictionBlobFeeJumps, jumps)
+ }
+ }
+ // Verify that the balance usage has been correctly calculated (case 3)
+ if !pool.spent[addr].Eq(totalSpent) {
+ t.Errorf("expenditure mismatch: have %d, want %d", pool.spent[addr], totalSpent)
+ }
+ // Verify all the calculated pool internals. Interestingly, this is **not**
+ // a duplication of the above checks, this actually validates the verifier
+ // using the above already hard coded checks.
+ //
+ // Do not remove this, nor alter the above to be generic.
+ verifyPoolInternals(t, pool)
+}
+
+// Tests that after indexing all the loaded transactions from disk, a price heap
+// is correctly constructed based on the head basefee and blobfee.
+func TestOpenHeap(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage)
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert a few transactions from a few accounts
+ var (
+ key1, _ = crypto.GenerateKey()
+ key2, _ = crypto.GenerateKey()
+ key3, _ = crypto.GenerateKey()
+
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ addr2 = crypto.PubkeyToAddress(key2.PublicKey)
+ addr3 = crypto.PubkeyToAddress(key3.PublicKey)
+
+ tx1 = makeTx(0, 1, 1000, 100, key1)
+ tx2 = makeTx(0, 1, 800, 70, key2)
+ tx3 = makeTx(0, 1, 1500, 110, key3)
+
+ blob1, _ = rlp.EncodeToBytes(&blobTx{Tx: tx1})
+ blob2, _ = rlp.EncodeToBytes(&blobTx{Tx: tx2})
+ blob3, _ = rlp.EncodeToBytes(&blobTx{Tx: tx3})
+
+ heapOrder = []common.Address{addr2, addr1, addr3}
+ heapIndex = map[common.Address]int{addr2: 0, addr1: 1, addr3: 2}
+ )
+ store.Put(blob1)
+ store.Put(blob2)
+ store.Put(blob3)
+ store.Close()
+
+ // Create a blob pool out of the pre-seeded data
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb.AddBalance(addr1, big.NewInt(1_000_000_000))
+ statedb.AddBalance(addr2, big.NewInt(1_000_000_000))
+ statedb.AddBalance(addr3, big.NewInt(1_000_000_000))
+ statedb.Commit(true)
+
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(1050),
+ blobfee: uint256.NewInt(105),
+ statedb: statedb,
+ }
+ pool, err := New(Config{Datadir: storage, PriceLimit: 1}, chain)
+ if err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+ defer pool.Close()
+
+ // Verify that the heap's internal state matches the expectations
+ for i, addr := range pool.evict.addrs {
+ if addr != heapOrder[i] {
+ t.Errorf("slot %d mismatch: have %v, want %v", i, addr, heapOrder[i])
+ }
+ }
+ for addr, i := range pool.evict.index {
+ if i != heapIndex[addr] {
+ t.Errorf("index for %v mismatch: have %d, want %d", addr, i, heapIndex[addr])
+ }
+ }
+ // Verify all the calculated pool internals. Interestingly, this is **not**
+ // a duplication of the above checks, this actually validates the verifier
+ // using the above already hard coded checks.
+ //
+ // Do not remove this, nor alter the above to be generic.
+ verifyPoolInternals(t, pool)
+}
+
+// Tests that after the pool's previous state is loaded back, any transactions
+// over the new storage cap will get dropped.
+func TestOpenCap(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage)
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert a few transactions from a few accounts
+ var (
+ key1, _ = crypto.GenerateKey()
+ key2, _ = crypto.GenerateKey()
+ key3, _ = crypto.GenerateKey()
+
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ addr2 = crypto.PubkeyToAddress(key2.PublicKey)
+ addr3 = crypto.PubkeyToAddress(key3.PublicKey)
+
+ tx1 = makeTx(0, 1, 1000, 100, key1)
+ tx2 = makeTx(0, 1, 800, 70, key2)
+ tx3 = makeTx(0, 1, 1500, 110, key3)
+
+ blob1, _ = rlp.EncodeToBytes(&blobTx{Tx: tx1, Blobs: [][]byte{bytes.Repeat([]byte{0}, blobSize)}})
+ blob2, _ = rlp.EncodeToBytes(&blobTx{Tx: tx2, Blobs: [][]byte{bytes.Repeat([]byte{0}, blobSize)}})
+ blob3, _ = rlp.EncodeToBytes(&blobTx{Tx: tx3, Blobs: [][]byte{bytes.Repeat([]byte{0}, blobSize)}})
+
+ keep = []common.Address{addr1, addr3}
+ drop = []common.Address{addr2}
+ size = uint64(2 * (txAvgSize + blobSize))
+ )
+ store.Put(blob1)
+ store.Put(blob2)
+ store.Put(blob3)
+ store.Close()
+
+ // Verify pool capping twice: first by reducing the data cap, then restarting
+ // with a high cap to ensure everything was persisted previously
+ for _, datacap := range []uint64{2 * (txAvgSize + blobSize), 100 * (txAvgSize + blobSize)} {
+ // Create a blob pool out of the pre-seeded data, but cap it to 2 blob transaction
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb.AddBalance(addr1, big.NewInt(1_000_000_000))
+ statedb.AddBalance(addr2, big.NewInt(1_000_000_000))
+ statedb.AddBalance(addr3, big.NewInt(1_000_000_000))
+ statedb.Commit(true)
+
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(1050),
+ blobfee: uint256.NewInt(105),
+ statedb: statedb,
+ }
+ pool, err := New(Config{Datadir: storage, Datacap: datacap, PriceLimit: 1}, chain)
+ if err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+ // Verify that enough transactions have been dropped to get the pool's size
+ // under the requested limit
+ if len(pool.index) != len(keep) {
+ t.Errorf("tracked account count mismatch: have %d, want %d", len(pool.index), len(keep))
+ }
+ for _, addr := range keep {
+ if _, ok := pool.index[addr]; !ok {
+ t.Errorf("expected account %v missing from pool", addr)
+ }
+ }
+ for _, addr := range drop {
+ if _, ok := pool.index[addr]; ok {
+ t.Errorf("unexpected account %v present in pool", addr)
+ }
+ }
+ if pool.stored != size {
+ t.Errorf("pool stored size mismatch: have %v, want %v", pool.stored, size)
+ }
+ // Verify all the calculated pool internals. Interestingly, this is **not**
+ // a duplication of the above checks, this actually validates the verifier
+ // using the above already hard coded checks.
+ //
+ // Do not remove this, nor alter the above to be generic.
+ verifyPoolInternals(t, pool)
+
+ pool.Close()
+ }
+}
+
+// Tests that adding transaction will correctly store it in the persistent store
+// and update all the indices.
+//
+// Note, this tests mostly checks the pool transaction shuffling logic or things
+// specific to the blob pool. It does not do an exhaustive transaction validity
+// check.
+func TestAdd(t *testing.T) {
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ // seed is a helper tumpe to seed an initial state db and pool
+ type seed struct {
+ balance uint64
+ nonce uint64
+ txs []*types.BlobTx
+ }
+
+ // addtx is a helper sender/tx tuple to represent a new tx addition
+ type addtx struct {
+ from string
+ tx *types.BlobTx
+ err error
+ }
+
+ tests := []struct {
+ seeds map[string]seed
+ adds []addtx
+ }{
+ // Transactions from new accounts should be accepted if their initial
+ // nonce matches the expected one from the statedb. Higher or lower must
+ // be rejected.
+ {
+ seeds: map[string]seed{
+ "alice": {balance: 21100},
+ "bob": {balance: 21100, nonce: 1},
+ "claire": {balance: 21100},
+ "dave": {balance: 21100, nonce: 1},
+ },
+ adds: []addtx{
+ { // New account, no previous txs: accept nonce 0
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 1, 1),
+ err: nil,
+ },
+ { // Old account, 1 tx in chain, 0 pending: accept nonce 1
+ from: "bob",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, no previous txs: reject nonce 1
+ from: "claire",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: core.ErrNonceTooHigh,
+ },
+ { // Old account, 1 tx in chain, 0 pending: reject nonce 0
+ from: "dave",
+ tx: makeUnsignedTx(0, 1, 1, 1),
+ err: core.ErrNonceTooLow,
+ },
+ { // Old account, 1 tx in chain, 0 pending: reject nonce 2
+ from: "dave",
+ tx: makeUnsignedTx(2, 1, 1, 1),
+ err: core.ErrNonceTooHigh,
+ },
+ },
+ },
+ // Transactions from already pooled accounts should only be accepted if
+ // the nonces are contiguous (ignore prices for now, will check later)
+ {
+ seeds: map[string]seed{
+ "alice": {
+ balance: 1000000,
+ txs: []*types.BlobTx{
+ makeUnsignedTx(0, 1, 1, 1),
+ },
+ },
+ "bob": {
+ balance: 1000000,
+ nonce: 1,
+ txs: []*types.BlobTx{
+ makeUnsignedTx(1, 1, 1, 1),
+ },
+ },
+ },
+ adds: []addtx{
+ { // New account, 1 tx pending: reject replacement nonce 0 (ignore price for now)
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 1, 1),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 1 tx pending: accept nonce 1
+ from: "alice",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 2 txs pending: reject nonce 3
+ from: "alice",
+ tx: makeUnsignedTx(3, 1, 1, 1),
+ err: core.ErrNonceTooHigh,
+ },
+ { // New account, 2 txs pending: accept nonce 2
+ from: "alice",
+ tx: makeUnsignedTx(2, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 3 txs pending: accept nonce 3 now
+ from: "alice",
+ tx: makeUnsignedTx(3, 1, 1, 1),
+ err: nil,
+ },
+ { // Old account, 1 tx in chain, 1 tx pending: reject replacement nonce 1 (ignore price for now)
+ from: "bob",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // Old account, 1 tx in chain, 1 tx pending: accept nonce 2 (ignore price for now)
+ from: "bob",
+ tx: makeUnsignedTx(2, 1, 1, 1),
+ err: nil,
+ },
+ },
+ },
+ // Transactions should only be accepted into the pool if the cumulative
+ // expenditure doesn't overflow the account balance
+ {
+ seeds: map[string]seed{
+ "alice": {balance: 63299}, // 3 tx - 1 wei
+ },
+ adds: []addtx{
+ { // New account, no previous txs: accept nonce 0 with 21100 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 1 pooled tx with 21100 wei spent: accept nonce 1 with 21100 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 2 pooled tx with 42200 wei spent: reject nonce 2 with 21100 wei spend (1 wei overflow)
+ from: "alice",
+ tx: makeUnsignedTx(2, 1, 1, 1),
+ err: core.ErrInsufficientFunds,
+ },
+ },
+ },
+ // Previously existing transactions should be allowed to be replaced iff
+ // the new cumulative expenditure can be covered by the account and the
+ // needed price bump is also met.
+ {
+ seeds: map[string]seed{
+ "alice": {balance: 126199},
+ },
+ adds: []addtx{
+ { // New account, no previous txs: reject nonce 0 with 147100 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 7, 1),
+ err: core.ErrInsufficientFunds,
+ },
+ { // New account, no previous txs: accept nonce 0 with 42100 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 2, 1),
+ err: nil,
+ },
+ { // New account, 1 pooled tx with 42100 wei spent: accept nonce 1 with 21100 wei spend
+ from: "alice",
+ tx: makeUnsignedTx(1, 1, 1, 1),
+ err: nil,
+ },
+ { // New account, 2 pooled tx with 63200 wei spent: reject nonce 0 with 105100 wei spend (63000 extra) (would overflow balance at nonce 1)
+ from: "alice",
+ tx: makeUnsignedTx(0, 5, 5, 5),
+ err: core.ErrInsufficientFunds,
+ },
+ { // New account, 2 pooled tx with 63200 wei spent: reject nonce 0 with no-gastip-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 4, 2),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 2 pooled tx with 63200 wei spent: reject nonce 0 with no-gascap-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 2, 2, 2),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 2 pooled tx with 63200 wei spent: reject nonce 0 with no-blobcap-bump
+ from: "alice",
+ tx: makeUnsignedTx(0, 2, 4, 1),
+ err: txpool.ErrReplaceUnderpriced,
+ },
+ { // New account, 2 pooled tx with 63200 wei spent: accept nonce 0 with 84100 wei spend (42000 extra)
+ from: "alice",
+ tx: makeUnsignedTx(0, 2, 4, 2),
+ err: nil,
+ },
+ },
+ },
+ }
+ for i, tt := range tests {
+ // Create a temporary folder for the persistent backend
+ storage, _ := os.MkdirTemp("", "blobpool-")
+ defer os.RemoveAll(storage) // late defer, still ok
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
+
+ // Insert the seed transactions for the pool startup
+ var (
+ keys = make(map[string]*ecdsa.PrivateKey)
+ addrs = make(map[string]common.Address)
+ )
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ for acc, seed := range tt.seeds {
+ // Generate a new random key/address for the seed account
+ keys[acc], _ = crypto.GenerateKey()
+ addrs[acc] = crypto.PubkeyToAddress(keys[acc].PublicKey)
+
+ // Seed the state database with this acocunt
+ statedb.AddBalance(addrs[acc], new(big.Int).SetUint64(seed.balance))
+ statedb.SetNonce(addrs[acc], seed.nonce)
+
+ // Sign the seed transactions and store them in the data store
+ for _, tx := range seed.txs {
+ var (
+ signed, _ = types.SignNewTx(keys[acc], types.LatestSigner(testChainConfig), tx)
+ blob, _ = rlp.EncodeToBytes(&blobTx{Tx: signed, Blobs: [][]byte{bytes.Repeat([]byte{0}, blobSize)}})
+ )
+ store.Put(blob)
+ }
+ }
+ statedb.Commit(true)
+ store.Close()
+
+ // Create a blob pool out of the pre-seeded dats
+ chain := &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(1050),
+ blobfee: uint256.NewInt(105),
+ statedb: statedb,
+ }
+ pool, err := New(Config{Datadir: storage, PriceLimit: 1}, chain)
+ if err != nil {
+ t.Fatalf("test %d: failed to create blob pool: %v", i, err)
+ }
+ verifyPoolInternals(t, pool)
+
+ // Add each transaction one by one, verifying the pool internals in between
+ for j, add := range tt.adds {
+ signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(testChainConfig), add.tx)
+ if err := pool.Add(signed, [][]byte{bytes.Repeat([]byte{0}, blobSize)}); !errors.Is(err, add.err) {
+ t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, err, add.err)
+ }
+ verifyPoolInternals(t, pool)
+ }
+ // Verify the pool internals and close down the test
+ verifyPoolInternals(t, pool)
+ pool.Close()
+ }
+}
diff --git a/core/txpool/blobpool/config.go b/core/txpool/blobpool/config.go
new file mode 100644
index 0000000000000..58ed18fcc0bed
--- /dev/null
+++ b/core/txpool/blobpool/config.go
@@ -0,0 +1,58 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// Config are the configuration parameters of the blob transaction pool.
+type Config struct {
+ Datadir string // Data directory containing the currently executable blobs
+ Datacap uint64 // Soft-cap of database storage (hard cap is larger due to overhead)
+
+ PriceLimit uint64 // Minimum signer gas tip to enforce for acceptance into the pool
+ PriceBump uint64 // Minimum price bump percentage to replace an already existing nonce
+}
+
+// DefaultConfig contains the default configurations for the transaction pool.
+var DefaultConfig = Config{
+ Datadir: "blobpool",
+ Datacap: 10 * 1024 * 1024 * 1024,
+
+ PriceLimit: 1_000_000_000, // 1 gwei is the standard 1559 signer tip, start there
+ PriceBump: 100, // either have patience or be agressive, no mushy ground
+}
+
+// sanitize checks the provided user configurations and changes anything that's
+// unreasonable or unworkable.
+func (config *Config) sanitize() Config {
+ conf := *config
+ if conf.Datacap < 1 {
+ log.Warn("Sanitizing invalid blobpool storage cap", "provided", conf.Datacap, "updated", DefaultConfig.Datacap)
+ conf.Datacap = DefaultConfig.Datacap
+ }
+ if conf.PriceLimit < 1 {
+ log.Warn("Sanitizing invalid blobpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit)
+ conf.PriceLimit = DefaultConfig.PriceLimit
+ }
+ if conf.PriceBump < 1 {
+ log.Warn("Sanitizing invalid blobpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump)
+ conf.PriceBump = DefaultConfig.PriceBump
+ }
+ return conf
+}
diff --git a/core/txpool/blobpool/evictheap.go b/core/txpool/blobpool/evictheap.go
new file mode 100644
index 0000000000000..10a602c2addd7
--- /dev/null
+++ b/core/txpool/blobpool/evictheap.go
@@ -0,0 +1,135 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "container/heap"
+ "math"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/holiman/uint256"
+)
+
+// evictHeap is a helper data structure to keep track of the cheapest bottleneck
+// transaction from each account to determine which account to evict from.
+//
+// The heap internally tracks a slice of cheapest transactions from each account
+// and a mapping from addresses to indices for direct removals/udates.
+//
+// The goal of the heap is to decide which account has the worst bottleneck to
+// evict transactions from.
+type evictHeap struct {
+ metas *map[common.Address][]*blobTxMeta // Pointer to the blob pool's index for price retrievals
+
+ basefeeJumps float64 // Pre-calculated absolute dynamic fee jumps for the base fee
+ blobfeeJumps float64 // Pre-calculated absolute dynamic fee jumps for the blob fee
+
+ addrs []common.Address // Heap of addresses to retrieve the cheapest out of
+ index map[common.Address]int // Indices into the heap for replacements
+}
+
+// newPriceHeap creates a new heap of cheapets accounts in the blob pool to evict
+// from in case of over saturation.
+func newPriceHeap(basefee *uint256.Int, blobfee *uint256.Int, index *map[common.Address][]*blobTxMeta) *evictHeap {
+ heap := &evictHeap{
+ metas: index,
+ index: make(map[common.Address]int),
+ }
+ for addr := range *index {
+ heap.index[addr] = len(heap.addrs)
+ heap.addrs = append(heap.addrs, addr)
+ }
+ heap.reinit(basefee, blobfee, true)
+ return heap
+}
+
+// reinit updates the pre-calculated dynamic fee jumps in the price heap and runs
+// the sorting algorithm from scratch on the entire heap.
+func (h *evictHeap) reinit(basefee *uint256.Int, blobfee *uint256.Int, force bool) {
+ // If the update is mostly the same as the old, don't sort pointlessly
+ basefeeJumps := dynamicFeeJumps(basefee)
+ blobfeeJumps := dynamicFeeJumps(blobfee)
+
+ if !force && math.Abs(h.basefeeJumps-basefeeJumps) < 0.01 && math.Abs(h.blobfeeJumps-blobfeeJumps) < 0.01 { // TODO(karalabe): 0.01 enough, maybe should be smaller? Maybe this optimization is moot?
+ return
+ }
+ // One or both of the dynamic fees jumped, resort the pool
+ h.basefeeJumps = basefeeJumps
+ h.blobfeeJumps = blobfeeJumps
+
+ heap.Init(h)
+}
+
+// Len implements sort.Interface as part of heap.Interface, returning the number
+// of accounts in the pool which can be considered for eviction.
+func (h *evictHeap) Len() int {
+ return len(h.addrs)
+}
+
+// Less implements sort.Interface as part of heap.Interface, returning which of
+// the two requested accounts has a cheaper bottleneck.
+func (h *evictHeap) Less(i, j int) bool {
+ txsI := (*(h.metas))[h.addrs[i]]
+ txsJ := (*(h.metas))[h.addrs[j]]
+
+ lastI := txsI[len(txsI)-1]
+ lastJ := txsJ[len(txsJ)-1]
+
+ prioI := evictionPriority(h.basefeeJumps, lastI.evictionExecFeeJumps, h.blobfeeJumps, lastI.evictionBlobFeeJumps)
+ if prioI > 0 {
+ prioI = 0
+ }
+ prioJ := evictionPriority(h.basefeeJumps, lastJ.evictionExecFeeJumps, h.blobfeeJumps, lastJ.evictionBlobFeeJumps)
+ if prioJ > 0 {
+ prioJ = 0
+ }
+ if prioI == prioJ {
+ return lastI.evictionExecTip.Lt(lastJ.evictionExecTip)
+ }
+ return prioI < prioJ
+}
+
+// Swap implements sort.Interface as part of heap.Interface, maintaining both the
+// order of the accounts according to the heap, and the account->item slot mapping
+// for replacements.
+func (h *evictHeap) Swap(i, j int) {
+ h.index[h.addrs[i]], h.index[h.addrs[j]] = h.index[h.addrs[j]], h.index[h.addrs[i]]
+ h.addrs[i], h.addrs[j] = h.addrs[j], h.addrs[i]
+}
+
+// Push implements heap.Interface, appending an item to the end of the account
+// ordering as well as the address to item slot mapping.
+func (h *evictHeap) Push(x any) {
+ h.index[x.(common.Address)] = len(h.addrs)
+ h.addrs = append(h.addrs, x.(common.Address))
+}
+
+// Pop implements heap.Interface, removing and returning the last element of the
+// heap.
+//
+// Note, use `heap.Pop`, not `priceheap.Pop`. This method is used by Go's heap,
+// to provide the functionality, it does not embed it.
+func (h *evictHeap) Pop() any {
+ // Remove the last element from the heap
+ size := len(h.addrs)
+ addr := h.addrs[size-1]
+ h.addrs = h.addrs[:size-1]
+
+ // Unindex the removed element and return
+ delete(h.index, addr)
+ return addr
+}
diff --git a/core/txpool/blobpool/evictheap_test.go b/core/txpool/blobpool/evictheap_test.go
new file mode 100644
index 0000000000000..b20e1ca3db75a
--- /dev/null
+++ b/core/txpool/blobpool/evictheap_test.go
@@ -0,0 +1,318 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "container/heap"
+ "math/rand"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/holiman/uint256"
+)
+
+// verifyHeapInternals verifies that all accounts present in the index are also
+// present in the heap and internals are consistent across various indices.
+func verifyHeapInternals(t *testing.T, evict *evictHeap) {
+ t.Helper()
+
+ // Ensure that all accounts are present in the heap and no extras
+ seen := make(map[common.Address]struct{})
+ for i, addr := range evict.addrs {
+ seen[addr] = struct{}{}
+ if _, ok := (*evict.metas)[addr]; !ok {
+ t.Errorf("heap contains unexpected address at slot %d: %v", i, addr)
+ }
+ }
+ for addr := range *evict.metas {
+ if _, ok := seen[addr]; !ok {
+ t.Errorf("heap is missing required address %v", addr)
+ }
+ }
+ if len(evict.addrs) != len(*evict.metas) {
+ t.Errorf("heap size %d mismatches metadata size %d", len(evict.addrs), len(*evict.metas))
+ }
+ // Ensure that all accounts are present in the heap order index and no extras
+ have := make([]common.Address, len(evict.index))
+ for addr, i := range evict.index {
+ have[i] = addr
+ }
+ if len(have) != len(evict.addrs) {
+ t.Errorf("heap index size %d mismatches heap size %d", len(have), len(evict.addrs))
+ }
+ for i := 0; i < len(have) && i < len(evict.addrs); i++ {
+ if have[i] != evict.addrs[i] {
+ t.Errorf("heap index for slot %d mismatches: have %v, want %v", i, have[i], evict.addrs[i])
+ }
+ }
+}
+
+// Tests that the price heap can correcty sort its set of transactions based on
+// an input base- and blob fee.
+func TestPriceHeapSorting(t *testing.T) {
+ tests := []struct {
+ execTips []uint64
+ execFees []uint64
+ blobFees []uint64
+
+ basefee uint64
+ blobfee uint64
+
+ order []int
+ }{
+ // If everthing is above the basefee and blobfee, order by miner tip
+ {
+ execTips: []uint64{1, 0, 2},
+ execFees: []uint64{1, 2, 3},
+ blobFees: []uint64{3, 2, 1},
+ basefee: 0,
+ blobfee: 0,
+ order: []int{1, 0, 2},
+ },
+ // If only basefees are used (blob fee matches with network), return the
+ // ones the furthest below the current basefee, splitting same ones with
+ // the tip. Anything above the basefee should be split by tip.
+ {
+ execTips: []uint64{100, 50, 100, 50, 1, 2, 3},
+ execFees: []uint64{1000, 1000, 500, 500, 2000, 2000, 2000},
+ blobFees: []uint64{0, 0, 0, 0, 0, 0, 0},
+ basefee: 1999,
+ blobfee: 0,
+ order: []int{3, 2, 1, 0, 4, 5, 6},
+ },
+ // If only blobfees are used (base fee matches with network), return the
+ // ones the furthest below the current blobfee, splitting same ones with
+ // the tip. Anything above the blobfee should be split by tip.
+ {
+ execTips: []uint64{100, 50, 100, 50, 1, 2, 3},
+ execFees: []uint64{0, 0, 0, 0, 0, 0, 0},
+ blobFees: []uint64{1000, 1000, 500, 500, 2000, 2000, 2000},
+ basefee: 0,
+ blobfee: 1999,
+ order: []int{3, 2, 1, 0, 4, 5, 6},
+ },
+ // If both basefee and blobfee is specified, sort by the larger distance
+ // of the two from the current network conditions, splitting same (loglog)
+ // ones via teh tip.
+ //
+ // Basefee: 1000
+ // Blobfee: 100
+ //
+ // Tx #0: (800, 80) - 2 jumps below both => priority -1
+ // Tx #1: (630, 63) - 4 jumps below both => priority -2
+ // Tx #2: (800, 63) - 2 jumps below basefee, 4 jumps below blobfee => priority -2 (blob penalty dominates)
+ // Tx #3: (630, 80) - 4 jumps below basefee, 2 jumps below blobfee => priority -2 (base penalty dominates)
+ //
+ // Txs 1, 2, 3 share the same priority, split via tip, prefer 0 as the best
+ {
+ execTips: []uint64{1, 2, 3, 4},
+ execFees: []uint64{800, 630, 800, 630},
+ blobFees: []uint64{80, 63, 63, 80},
+ basefee: 1000,
+ blobfee: 100,
+ order: []int{1, 2, 3, 0},
+ },
+ }
+ for i, tt := range tests {
+ // Create an index of the transactions
+ index := make(map[common.Address][]*blobTxMeta)
+ for j := byte(0); j < byte(len(tt.execTips)); j++ {
+ addr := common.Address{j}
+
+ var (
+ execTip = uint256.NewInt(tt.execTips[j])
+ execFee = uint256.NewInt(tt.execFees[j])
+ blobFee = uint256.NewInt(tt.blobFees[j])
+
+ basefeeJumps = dynamicFeeJumps(execFee)
+ blobfeeJumps = dynamicFeeJumps(blobFee)
+ )
+ index[addr] = []*blobTxMeta{{
+ id: uint64(j),
+ size: 128 * 1024,
+ nonce: 0,
+ execTipCap: execTip,
+ execFeeCap: execFee,
+ blobFeeCap: blobFee,
+ basefeeJumps: basefeeJumps,
+ blobfeeJumps: blobfeeJumps,
+ evictionExecTip: execTip,
+ evictionExecFeeJumps: basefeeJumps,
+ evictionBlobFeeJumps: blobfeeJumps,
+ }}
+ }
+ // Create a price heap and check the pop order
+ priceheap := newPriceHeap(uint256.NewInt(tt.basefee), uint256.NewInt(tt.blobfee), &index)
+ verifyHeapInternals(t, priceheap)
+
+ for j := 0; j < len(tt.order); j++ {
+ if next := heap.Pop(priceheap); int(next.(common.Address)[0]) != tt.order[j] {
+ t.Errorf("test %d, item %d: order mismatch: have %d, want %d", i, j, next.(common.Address)[0], tt.order[j])
+ } else {
+ delete(index, next.(common.Address)) // remove to simulate a correct pool for the test
+ }
+ verifyHeapInternals(t, priceheap)
+ }
+ }
+}
+
+// Benchmarks reheaping the entire set of accounts in the blob pool.
+func BenchmarkPriceHeapReinit1MB(b *testing.B) { benchmarkPriceHeapReinit(b, 1024*1024) }
+func BenchmarkPriceHeapReinit10MB(b *testing.B) { benchmarkPriceHeapReinit(b, 10*1024*1024) }
+func BenchmarkPriceHeapReinit100MB(b *testing.B) { benchmarkPriceHeapReinit(b, 100*1024*1024) }
+func BenchmarkPriceHeapReinit1GB(b *testing.B) { benchmarkPriceHeapReinit(b, 1024*1024*1024) }
+func BenchmarkPriceHeapReinit10GB(b *testing.B) { benchmarkPriceHeapReinit(b, 10*1024*1024*1024) }
+func BenchmarkPriceHeapReinit25GB(b *testing.B) { benchmarkPriceHeapReinit(b, 25*1024*1024*1024) }
+func BenchmarkPriceHeapReinit50GB(b *testing.B) { benchmarkPriceHeapReinit(b, 50*1024*1024*1024) }
+func BenchmarkPriceHeapReinit100GB(b *testing.B) { benchmarkPriceHeapReinit(b, 100*1024*1024*1024) }
+
+func benchmarkPriceHeapReinit(b *testing.B, datacap uint64) {
+ // Calculate how many unique transactions we can fit into the provided disk
+ // data cap
+ blobs := datacap / (params.BlobTxBytesPerFieldElement * params.BlobTxFieldElementsPerBlob)
+
+ // Create a random set of transactions with random fees. Use a separate account
+ // for each transaction to make it worse case.
+ index := make(map[common.Address][]*blobTxMeta)
+ for i := 0; i < int(blobs); i++ {
+ var addr common.Address
+ rand.Read(addr[:])
+
+ var (
+ execTip = uint256.NewInt(rand.Uint64())
+ execFee = uint256.NewInt(rand.Uint64())
+ blobFee = uint256.NewInt(rand.Uint64())
+
+ basefeeJumps = dynamicFeeJumps(execFee)
+ blobfeeJumps = dynamicFeeJumps(blobFee)
+ )
+ index[addr] = []*blobTxMeta{{
+ id: uint64(i),
+ size: 128 * 1024,
+ nonce: 0,
+ execTipCap: execTip,
+ execFeeCap: execFee,
+ blobFeeCap: blobFee,
+ basefeeJumps: basefeeJumps,
+ blobfeeJumps: blobfeeJumps,
+ evictionExecTip: execTip,
+ evictionExecFeeJumps: basefeeJumps,
+ evictionBlobFeeJumps: blobfeeJumps,
+ }}
+ }
+ // Create a price heap and reinit it over and over
+ heap := newPriceHeap(uint256.NewInt(rand.Uint64()), uint256.NewInt(rand.Uint64()), &index)
+
+ basefees := make([]*uint256.Int, b.N)
+ blobfees := make([]*uint256.Int, b.N)
+ for i := 0; i < b.N; i++ {
+ basefees[i] = uint256.NewInt(rand.Uint64())
+ blobfees[i] = uint256.NewInt(rand.Uint64())
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ heap.reinit(basefees[i], blobfees[i], true)
+ }
+}
+
+// Benchmarks overflowing the heap over and over (add and then drop).
+func BenchmarkPriceHeapOverflow1MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 1024*1024) }
+func BenchmarkPriceHeapOverflow10MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 10*1024*1024) }
+func BenchmarkPriceHeapOverflow100MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024) }
+func BenchmarkPriceHeapOverflow1GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 1024*1024*1024) }
+func BenchmarkPriceHeapOverflow10GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 10*1024*1024*1024) }
+func BenchmarkPriceHeapOverflow25GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 25*1024*1024*1024) }
+func BenchmarkPriceHeapOverflow50GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 50*1024*1024*1024) }
+func BenchmarkPriceHeapOverflow100GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024*1024) }
+
+func benchmarkPriceHeapOverflow(b *testing.B, datacap uint64) {
+ // Calculate how many unique transactions we can fit into the provided disk
+ // data cap
+ blobs := datacap / (params.BlobTxBytesPerFieldElement * params.BlobTxFieldElementsPerBlob)
+
+ // Create a random set of transactions with random fees. Use a separate account
+ // for each transaction to make it worse case.
+ index := make(map[common.Address][]*blobTxMeta)
+ for i := 0; i < int(blobs); i++ {
+ var addr common.Address
+ rand.Read(addr[:])
+
+ var (
+ execTip = uint256.NewInt(rand.Uint64())
+ execFee = uint256.NewInt(rand.Uint64())
+ blobFee = uint256.NewInt(rand.Uint64())
+
+ basefeeJumps = dynamicFeeJumps(execFee)
+ blobfeeJumps = dynamicFeeJumps(blobFee)
+ )
+ index[addr] = []*blobTxMeta{{
+ id: uint64(i),
+ size: 128 * 1024,
+ nonce: 0,
+ execTipCap: execTip,
+ execFeeCap: execFee,
+ blobFeeCap: blobFee,
+ basefeeJumps: basefeeJumps,
+ blobfeeJumps: blobfeeJumps,
+ evictionExecTip: execTip,
+ evictionExecFeeJumps: basefeeJumps,
+ evictionBlobFeeJumps: blobfeeJumps,
+ }}
+ }
+ // Create a price heap and overflow it over and over
+ evict := newPriceHeap(uint256.NewInt(rand.Uint64()), uint256.NewInt(rand.Uint64()), &index)
+ var (
+ addrs = make([]common.Address, b.N)
+ metas = make([]*blobTxMeta, b.N)
+ )
+ for i := 0; i < b.N; i++ {
+ rand.Read(addrs[i][:])
+
+ var (
+ execTip = uint256.NewInt(rand.Uint64())
+ execFee = uint256.NewInt(rand.Uint64())
+ blobFee = uint256.NewInt(rand.Uint64())
+
+ basefeeJumps = dynamicFeeJumps(execFee)
+ blobfeeJumps = dynamicFeeJumps(blobFee)
+ )
+ metas[i] = &blobTxMeta{
+ id: uint64(int(blobs) + i),
+ size: 128 * 1024,
+ nonce: 0,
+ execTipCap: execTip,
+ execFeeCap: execFee,
+ blobFeeCap: blobFee,
+ basefeeJumps: basefeeJumps,
+ blobfeeJumps: blobfeeJumps,
+ evictionExecTip: execTip,
+ evictionExecFeeJumps: basefeeJumps,
+ evictionBlobFeeJumps: blobfeeJumps,
+ }
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ index[addrs[i]] = []*blobTxMeta{metas[i]}
+ heap.Push(evict, addrs[i])
+
+ drop := heap.Pop(evict)
+ delete(index, drop.(common.Address))
+ }
+}
diff --git a/core/txpool/blobpool/interface.go b/core/txpool/blobpool/interface.go
new file mode 100644
index 0000000000000..198d5245071ac
--- /dev/null
+++ b/core/txpool/blobpool/interface.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+// BlockChain defines the minimal set of methods needed to back a blob pool with
+// a chain. Exists to allow mocking the live chain out of tests.
+type BlockChain interface {
+ // Config retrieves the chain's fork configuration.
+ Config() *params.ChainConfig
+
+ // CurrentBlock returns the current head of the chain.
+ CurrentBlock() *types.Header
+
+ // CurrentFinalBlock returns the current block below which blobs should not
+ // be maintained anymore for reorg purposes.
+ CurrentFinalBlock() *types.Header
+
+ // GetBlock retrieves a specific block, used during poll resets.
+ GetBlock(hash common.Hash, number uint64) *types.Block
+
+ // StateAt returns a state database for a given root hash (generally the head).
+ StateAt(root common.Hash) (*state.StateDB, error)
+}
diff --git a/core/txpool/blobpool/limbo.go b/core/txpool/blobpool/limbo.go
new file mode 100644
index 0000000000000..74af3ac416ac5
--- /dev/null
+++ b/core/txpool/blobpool/limbo.go
@@ -0,0 +1,261 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "errors"
+ "os"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/holiman/billy"
+)
+
+// limboBlob is a wrapper around an opaque blobset that also contains the tx hash
+// to which it belongs as well as the block number in which it was included for
+// finality eviction.
+type limboBlob struct {
+ Owner common.Hash // Owner transaction's hash to support resurrecting reorged txs
+ Block uint64 // Block in which the blob transaction was included
+ Blobs [][]byte // The opaque blobs originally part of the transaction
+}
+
+type limboBlobMeta struct {
+ id uint64 // Storage ID in the limbo's persistent store
+ block uint64 // Block in which the blob transaction was included
+}
+
+// limbo is a ligh, indexed database to temporarilly store recently included
+// blobs until they are finalized. The purpose is to suppoer small reorgs, which
+// would require pulling back up old blobs (which aren't part of the chain).
+//
+// TODO(karalabe): Currently updating the inclusion block of a blob needs a full db rewrite. Can we do without?
+type limbo struct {
+ store billy.Database // Persistent data store for limboed blobs
+
+ index map[common.Hash]uint64 // Mappings from tx hashes to datastore ids
+ groups map[uint64]map[uint64]common.Hash // Set of txs included in past blocks
+}
+
+// newLimbo opens and indexes a set of limboed blob transactions.
+func newLimbo(datadir string) (*limbo, error) {
+ l := &limbo{
+ index: make(map[common.Hash]uint64),
+ groups: make(map[uint64]map[uint64]common.Hash),
+ }
+ // Index all limboed blobs on disk and delete anything inprocessable
+ var fails []uint64
+ index := func(id uint64, size uint32, data []byte) {
+ if l.parseBlob(id, data) != nil {
+ fails = append(fails, id)
+ }
+ }
+ if err := os.MkdirAll(datadir, 0700); err != nil {
+ return nil, err
+ }
+ store, err := billy.Open(billy.Options{Path: datadir}, newSlotter(), index)
+ if err != nil {
+ return nil, err
+ }
+ l.store = store
+
+ if len(fails) > 0 {
+ log.Warn("Dropping invalidated limboed blobs", "ids", fails)
+ for _, id := range fails {
+ if err := l.store.Delete(id); err != nil {
+ l.Close()
+ return nil, err
+ }
+ }
+ }
+ return l, nil
+}
+
+// Close closes down the underlying persistent store.
+func (l *limbo) Close() error {
+ return l.store.Close()
+}
+
+// parseBlob is a callback method on limbo creation that gets called for each
+// limboed blob on disk to create the in-memory metadata index.
+func (l *limbo) parseBlob(id uint64, data []byte) error {
+ item := new(limboBlob)
+ if err := rlp.DecodeBytes(data, item); err != nil {
+ // This path is impossible unless the disk data representation changes
+ // across restarts. For that ever unprobable case, recover gracefully
+ // by ignoring this data entry.
+ log.Error("Failed to decode blob limbo entry", "id", id, "err", err)
+ return err
+ }
+ if _, ok := l.index[item.Owner]; ok {
+ // This path is impossible, unless due to a programming error a blob gets
+ // inserted into the limbo which was already part of if. Recover gracefully
+ // by ignoring this data entry.
+ log.Error("Dropping duplicate blob limbo entry", "owner", item.Owner, "id", id)
+ return errors.New("duplicate blob")
+ }
+ l.index[item.Owner] = id
+
+ if _, ok := l.groups[item.Block]; !ok {
+ l.groups[item.Block] = make(map[uint64]common.Hash)
+ }
+ l.groups[item.Block][id] = item.Owner
+
+ return nil
+}
+
+// finalize evicts all blobs belonging to a recently finalized block or older.
+func (l *limbo) finalize(final *types.Header) {
+ // Just in case there's no final block yet (network not yet merged, weird
+ // restart, sethead, etc), fail gracefully.
+ if final == nil {
+ log.Error("Nil finalized block cannot evict old blobs")
+ return
+ }
+ for block, ids := range l.groups {
+ if block > final.Number.Uint64() {
+ continue
+ }
+ for id, owner := range ids {
+ if err := l.store.Delete(id); err != nil {
+ log.Error("Failed to drop finalized blob", "block", block, "id", id, "err", err)
+ }
+ delete(l.index, owner)
+ }
+ delete(l.groups, block)
+ }
+}
+
+// push stores a new blob transaction into the limbo, waiting until finality for
+// it to be automatically evicted.
+func (l *limbo) push(tx common.Hash, block uint64, blobs [][]byte) error {
+ // If the blobs are already tracked by the limbo, consider it a programming
+ // error. There's not much to do against it, but be loud.
+ if _, ok := l.index[tx]; ok {
+ log.Error("Limbo cannot push already tracked blobs", "tx", tx)
+ return errors.New("already tracked blob transaction")
+ }
+ if err := l.setAndIndex(tx, block, blobs); err != nil {
+ log.Error("Failed to set and index liboed blobs", "tx", tx, "err", err)
+ return err
+ }
+ return nil
+}
+
+// pull retrieves a previously pushed set of blobs back from the limbo, removing
+// it at the same time. This method should be used when a previously included blob
+// transaction gets reorged out.
+func (l *limbo) pull(tx common.Hash) ([][]byte, error) {
+ // If the blobs are not tracked by the limbo, there's not much to do. This
+ // can happen for example if a blob transaction is mined without pushing it
+ // into the network first.
+ id, ok := l.index[tx]
+ if !ok {
+ log.Trace("Limbo cannot pull non-tracked blobs", "tx", tx)
+ return nil, errors.New("unseen blob transaction")
+ }
+ item, err := l.getAndDrop(id)
+ if err != nil {
+ log.Error("Failed to get and drop limboed blobs", "tx", tx, "id", id, "err", err)
+ return nil, err
+ }
+ return item.Blobs, nil
+}
+
+// update changes the block number under which a blob transaction is tracked. This
+// method should be used when a reorg changes a transaction's inclusion block.
+//
+// The method may log errors for various unexpcted scenarios but will not return
+// any of it since there's no clear error case. Some errors may be due to coding
+// issues, others caused by signers mining MEV stuff or swapping transactions. In
+// all cases, the pool needs to continue operating.
+func (l *limbo) update(tx common.Hash, block uint64) {
+ // If the blobs are not tracked by the limbo, there's not much to do. This
+ // can happen for example if a blob transaction is mined without pushing it
+ // into the network first.
+ id, ok := l.index[tx]
+ if !ok {
+ log.Trace("Limbo cannot update non-tracked blobs", "tx", tx)
+ return
+ }
+ // If there was no change in the blob's inclusion block, don't mess around
+ // with heavy database operations.
+ if _, ok := l.groups[block][id]; ok {
+ log.Trace("Blob transaction unchanged in limbo", "tx", tx, "block", block)
+ return
+ }
+ // Retrieve the old blobs from the data store and write tehm back with a new
+ // block number. IF anything fails, there's not much to do, go on.
+ item, err := l.getAndDrop(id)
+ if err != nil {
+ log.Error("Failed to get and drop limboed blobs", "tx", tx, "id", id, "err", err)
+ return
+ }
+ if err := l.setAndIndex(tx, block, item.Blobs); err != nil {
+ log.Error("Failed to set and index limboed blobs", "tx", tx, "err", err)
+ return
+ }
+ log.Trace("Blob transaction updated in limbo", "tx", tx, "old-block", item.Block, "new-block", block)
+}
+
+// getAndDrop retrieves a blob item from the limbo store and deletes it both from
+// the store and indices.
+func (l *limbo) getAndDrop(id uint64) (*limboBlob, error) {
+ data, err := l.store.Get(id)
+ if err != nil {
+ return nil, err
+ }
+ item := new(limboBlob)
+ if err = rlp.DecodeBytes(data, item); err != nil {
+ return nil, err
+ }
+ delete(l.index, item.Owner)
+ delete(l.groups[item.Block], id)
+ if len(l.groups[item.Block]) == 0 {
+ delete(l.groups, item.Block)
+ }
+ if err := l.store.Delete(id); err != nil {
+ return nil, err
+ }
+ return item, nil
+}
+
+// setAndIndex assembles a limbo blob database entry and stores it, also updating
+// the in-memory indices.
+func (l *limbo) setAndIndex(tx common.Hash, block uint64, blobs [][]byte) error {
+ item := &limboBlob{
+ Owner: tx,
+ Block: block,
+ Blobs: blobs,
+ }
+ data, err := rlp.EncodeToBytes(item)
+ if err != nil {
+ panic(err) // cannot happen runtime, dev error
+ }
+ id, err := l.store.Put(data)
+ if err != nil {
+ return err
+ }
+ l.index[tx] = id
+ if _, ok := l.groups[block]; !ok {
+ l.groups[block] = make(map[uint64]common.Hash)
+ }
+ l.groups[block][id] = tx
+ return nil
+}
diff --git a/core/txpool/blobpool/metrics.go b/core/txpool/blobpool/metrics.go
new file mode 100644
index 0000000000000..cb19dc3133bc6
--- /dev/null
+++ b/core/txpool/blobpool/metrics.go
@@ -0,0 +1,76 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import "github.com/ethereum/go-ethereum/metrics"
+
+var (
+ // datacapGauge tracks the user's configured capacity for the blob pool. It
+ // is mostly a way to expose/debug issues.
+ datacapGauge = metrics.NewRegisteredGauge("blobpool/datacap", nil)
+
+ // The below metrics track the per-datastore metrics for the primary blob
+ // store and the temporary limbo store.
+ datausedGauge = metrics.NewRegisteredGauge("blobpool/dataused", nil)
+ datarealGauge = metrics.NewRegisteredGauge("blobpool/datareal", nil)
+ slotusedGauge = metrics.NewRegisteredGauge("blobpool/slotused", nil)
+
+ limboDatausedGauge = metrics.NewRegisteredGauge("blobpool/limbo/dataused", nil)
+ limboDatarealGauge = metrics.NewRegisteredGauge("blobpool/limbo/datareal", nil)
+ limboSlotusedGauge = metrics.NewRegisteredGauge("blobpool/limbo/slotused", nil)
+
+ // The below metrics track the per-shelf metrics for the primary blob store
+ // and the temporary limbo store.
+ shelfDatausedGaugeName = "blobpool/shelf-%d/dataused"
+ shelfDatagapsGaugeName = "blobpool/shelf-%d/datagaps"
+ shelfSlotusedGaugeName = "blobpool/shelf-%d/slotused"
+ shelfSlotgapsGaugeName = "blobpool/shelf-%d/slotgaps"
+
+ limboShelfDatausedGaugeName = "blobpool/limbo/shelf-%d/dataused"
+ limboShelfDatagapsGaugeName = "blobpool/limbo/shelf-%d/datagaps"
+ limboShelfSlotusedGaugeName = "blobpool/limbo/shelf-%d/slotused"
+ limboShelfSlotgapsGaugeName = "blobpool/limbo/shelf-%d/slotgaps"
+
+ // The oversized metrics aggregate the shelf stats above the max blob count
+ // limits to track transactions that are just huge, but don't contain blobs.
+ //
+ // There are no oversized data in the limbo, it only contains blobs and some
+ // constant metadata.
+ oversizedDatausedGauge = metrics.NewRegisteredGauge("blobpool/oversized/dataused", nil)
+ oversizedDatagapsGauge = metrics.NewRegisteredGauge("blobpool/oversized/datagaps", nil)
+ oversizedSlotusedGauge = metrics.NewRegisteredGauge("blobpool/oversized/slotused", nil)
+ oversizedSlotgapsGauge = metrics.NewRegisteredGauge("blobpool/oversized/slotgaps", nil)
+
+ // basefeeGauge and blobfeeGauge track the current network 1559 base fee and
+ // 4844 blob fee respectively.
+ basefeeGauge = metrics.NewRegisteredGauge("blobpool/basefee", nil)
+ blobfeeGauge = metrics.NewRegisteredGauge("blobpool/blobfee", nil)
+
+ // pooltipGague is the configurable miner tip to permit a transaction into
+ // the pool.
+ pooltipGague = metrics.NewRegisteredGauge("blobpool/pooltip", nil)
+
+ // addwait/time, resetwait/time and pendwait/time track the rough health of
+ // the pool and wether or not it's capable of keeping up with the load from
+ // the network.
+ addwaitHist = metrics.NewRegisteredHistogram("blobpool/addwait", nil, metrics.NewExpDecaySample(1028, 0.015))
+ addtimeHist = metrics.NewRegisteredHistogram("blobpool/addtime", nil, metrics.NewExpDecaySample(1028, 0.015))
+ resetwaitHist = metrics.NewRegisteredHistogram("blobpool/resetwait", nil, metrics.NewExpDecaySample(1028, 0.015))
+ resettimeHist = metrics.NewRegisteredHistogram("blobpool/resettime", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pendwaitHist = metrics.NewRegisteredHistogram("blobpool/pendwait", nil, metrics.NewExpDecaySample(1028, 0.015))
+ pendtimeHist = metrics.NewRegisteredHistogram("blobpool/pendtime", nil, metrics.NewExpDecaySample(1028, 0.015))
+)
diff --git a/core/txpool/blobpool/priority.go b/core/txpool/blobpool/priority.go
new file mode 100644
index 0000000000000..c58518aac1b85
--- /dev/null
+++ b/core/txpool/blobpool/priority.go
@@ -0,0 +1,109 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "math"
+ "math/big"
+
+ "github.com/holiman/uint256"
+)
+
+// log2_1_125 is used in the eviction priority calculation.
+var log2_1_125 = math.Log2(1.125)
+
+// evictionPriority calculates the eviction priority based on the algorithm
+// described in the BlobPool docs for a both fee components.
+//
+// This method takes about 8ns on a very recent laptop CPU, recalculating about
+// 125 million transaction priority values per second.
+func evictionPriority(basefeeJumps float64, txBasefeeJumps, blobfeeJumps, txBlobfeeJumps float64) int {
+ var (
+ basefeePriority = evictionPriority1D(basefeeJumps, txBasefeeJumps)
+ blobfeePriority = evictionPriority1D(blobfeeJumps, txBlobfeeJumps)
+ )
+ if basefeePriority < blobfeePriority {
+ return basefeePriority
+ }
+ return blobfeePriority
+}
+
+// evictionPriority1D calculates the eviction priority based on the algorithm
+// described in the BlobPool docs for a single fee component.
+func evictionPriority1D(basefeeJumps float64, txfeeJumps float64) int {
+ jumps := txfeeJumps - basefeeJumps
+ if int(jumps) == 0 {
+ return 0 // can't log2 0
+ }
+ if jumps < 0 {
+ return -intLog2(int(-math.Floor(jumps)))
+ }
+ return intLog2(int(math.Ceil(jumps)))
+}
+
+// dynamicFeeJumps calculates the log1.125(fee), namely the number of fee jumps
+// needed to reach the requested one. We only use it when calculating the jumps
+// between 2 fees, so it doesn't matter from what exact number with returns.
+// it returns the result from (0, 1, 1.125).
+//
+// This method is very expensive, taking about 75ns on a very recent laptop CPU,
+// but the result does not change with the lifetime of a transaction, so it can
+// be cached.
+func dynamicFeeJumps(fee *uint256.Int) float64 {
+ if fee.IsZero() {
+ return 0 // can't log2 zero, should never happen outside tests, but don't choke
+ }
+ n, _ := new(big.Float).SetInt(fee.ToBig()).Float64()
+ return math.Log2(n) / log2_1_125
+}
+
+// intLog2 is a helper to calculate the integral part of a log2 of an unsigned
+// integer. It is a very specific calculation that's not particularly useful in
+// general, but it's what we need here (it's fast).
+func intLog2(n int) int {
+ switch {
+ case n == 0:
+ panic("log2(0) is undefined")
+ case n == 1:
+ return 0
+ case n < 4:
+ return 1
+ case n < 8:
+ return 2
+ case n < 16:
+ return 3
+ case n < 32:
+ return 4
+ case n < 64:
+ return 5
+ case n < 128:
+ return 6
+ case n < 256:
+ return 7
+ case n < 512:
+ return 8
+ case n < 1024:
+ return 9
+ case n < 2048:
+ return 10
+ default:
+ // The input is log1.125(uint256) = log2(uint256) / log2(1.125). At the
+ // most extreme, log2(uint256) will be a bit below 257, and the constant
+ // log2(1.125) ~= 0.17. The larges input thus is ~257 / ~0.17 ~= ~1511.
+ panic("dynamic fee jump diffs cannot reach this")
+ }
+}
diff --git a/core/txpool/blobpool/priority_test.go b/core/txpool/blobpool/priority_test.go
new file mode 100644
index 0000000000000..14a9e83b3fe4b
--- /dev/null
+++ b/core/txpool/blobpool/priority_test.go
@@ -0,0 +1,88 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import (
+ "math/rand"
+ "testing"
+
+ "github.com/holiman/uint256"
+)
+
+// Tests that the priority fees are calculated correctly as the log2 of the fee
+// jumps needed to go from the base fee to the tx's fee cap.
+func TestPriorityCalculation(t *testing.T) {
+ tests := []struct {
+ basefee uint64
+ txfee uint64
+ result int
+ }{
+ {basefee: 7, txfee: 10, result: 2}, // 3.02 jumps, 4 ceil, 2 log2
+ {basefee: 17_200_000_000, txfee: 17_200_000_000, result: 0}, // 0 jumps, special case 0 log2
+ {basefee: 9_853_941_692, txfee: 11_085_092_510, result: 0}, // 0.99 jumps, 1 ceil, 0 log2
+ {basefee: 11_544_106_391, txfee: 10_356_781_100, result: 0}, // -0.92 jumps, -1 floor, 0 log2
+ {basefee: 17_200_000_000, txfee: 7, result: -7}, // -183.57 jumps, -184 floor, -7 log2
+ {basefee: 7, txfee: 17_200_000_000, result: 7}, // 183.57 jumps, 184 ceil, 7 log2
+ }
+ for i, tt := range tests {
+ var (
+ baseJumps = dynamicFeeJumps(uint256.NewInt(tt.basefee))
+ feeJumps = dynamicFeeJumps(uint256.NewInt(tt.txfee))
+ )
+ if prio := evictionPriority1D(baseJumps, feeJumps); prio != tt.result {
+ t.Errorf("test %d priority mismatch: have %d, want %d", i, prio, tt.result)
+ }
+ }
+}
+
+// Benchmarks how many dynamic fee jump values can be done.
+func BenchmarkDynamicFeeJumpCalculation(b *testing.B) {
+ fees := make([]*uint256.Int, b.N)
+ for i := 0; i < b.N; i++ {
+ fees[i] = uint256.NewInt(rand.Uint64())
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ dynamicFeeJumps(fees[i])
+ }
+}
+
+// Benchmarks how many priority recalculations can be done.
+func BenchmarkPriorityCalculation(b *testing.B) {
+ // The basefee and blob fee is constant for all transactions across a block,
+ // so we can assume theit absolute jump counts can be pre-computed.
+ basefee := uint256.NewInt(17_200_000_000) // 17.2 Gwei is the 22.03.2023 zero-emission basefee, random number
+ blobfee := uint256.NewInt(123_456_789_000) // Completely random, no idea what this will be
+
+ basefeeJumps := dynamicFeeJumps(basefee)
+ blobfeeJumps := dynamicFeeJumps(blobfee)
+
+ // The transaction's fee cap and blob fee cap are constant across the life
+ // of the transaction, so we can pre-calculate and cache them.
+ txBasefeeJumps := make([]float64, b.N)
+ txBlobfeeJumps := make([]float64, b.N)
+ for i := 0; i < b.N; i++ {
+ txBasefeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rand.Uint64()))
+ txBlobfeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rand.Uint64()))
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ evictionPriority(basefeeJumps, txBasefeeJumps[i], blobfeeJumps, txBlobfeeJumps[i])
+ }
+}
diff --git a/core/txpool/blobpool/slotter.go b/core/txpool/blobpool/slotter.go
new file mode 100644
index 0000000000000..35349c3445cf5
--- /dev/null
+++ b/core/txpool/blobpool/slotter.go
@@ -0,0 +1,38 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+// newSlotter creates a helper method for the Billy datastore that returns the
+// individual shelf sizes used to store transactions in.
+//
+// The slotter will create shelves for each possible blob count + some tx metadata
+// wiggle room, up to the max permitted limits.
+//
+// The slotter also creates a shelf for 0-blob transactions. Whilst those are not
+// allowed in the current protocol, having an empty shelf is not a relevant use
+// of resources, but it makes stress testing with junk transactions simpler.
+func newSlotter() func() (uint32, bool) {
+ slotsize := uint32(txAvgSize)
+ slotsize -= uint32(blobSize) // underflows, it's ok, will overflow back in the first return
+
+ return func() (size uint32, done bool) {
+ slotsize += blobSize
+ finished := slotsize > maxBlobsPerTransaction*blobSize+txMaxSize
+
+ return slotsize, finished
+ }
+}
diff --git a/core/txpool/blobpool/slotter_test.go b/core/txpool/blobpool/slotter_test.go
new file mode 100644
index 0000000000000..2751a1872541f
--- /dev/null
+++ b/core/txpool/blobpool/slotter_test.go
@@ -0,0 +1,58 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blobpool
+
+import "testing"
+
+// Tests that the slotter creates the expected database shelves.
+func TestNewSlotter(t *testing.T) {
+ // Generate the database shelve sizes
+ slotter := newSlotter()
+
+ var shelves []uint32
+ for {
+ shelf, done := slotter()
+ shelves = append(shelves, shelf)
+ if done {
+ break
+ }
+ }
+ // Compare the database shelves to the expected ones
+ want := []uint32{
+ 0*blobSize + txAvgSize, // 0 blob + some expected tx infos
+ 1*blobSize + txAvgSize, // 1 blob + some expected tx infos
+ 2*blobSize + txAvgSize, // 2 blob + some expected tx infos (could be fewer blobs and more tx data)
+ 3*blobSize + txAvgSize, // 3 blob + some expected tx infos (could be fewer blobs and more tx data)
+ 4*blobSize + txAvgSize, // 4 blob + some expected tx infos (could be fewer blobs and more tx data)
+ 5*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 6*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 7*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 8*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 9*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 10*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 11*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 12*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos >= 4 blobs + max tx metadata size
+ }
+ if len(shelves) != len(want) {
+ t.Errorf("shelves count mismatch: have %d, want %d", len(shelves), len(want))
+ }
+ for i := 0; i < len(shelves) && i < len(want); i++ {
+ if shelves[i] != want[i] {
+ t.Errorf("shelf %d mismatch: have %d, want %d", i, shelves[i], want[i])
+ }
+ }
+}
diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go
new file mode 100644
index 0000000000000..723050ef237cc
--- /dev/null
+++ b/core/txpool/subpool.go
@@ -0,0 +1,38 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package txpool
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+// SubPool represents a specialized transaction pool that lives on its own (e.g.
+// blob pool). Since independent of how many specialized pools we have, they do
+// need to be updated in lockstep and assemble into one coherent view for block
+// production, this interface defines the common methods that allow the primary
+// transaction pool to manage the subpools.
+type SubPool interface {
+ // Reset retrieves the current state of the blockchain and ensures the content
+ // of the transaction pool is valid with regard to the chain state.
+ Reset(oldHead, newHead *types.Header)
+
+ // SetGasTip updates the minimum price required by the subpool for a new
+ // transaction, and drops all transactions below this threshold.
+ SetGasTip(tip *big.Int)
+}
diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go
index 056b1ebe87490..6d1d131db70b9 100644
--- a/core/txpool/txpool.go
+++ b/core/txpool/txpool.go
@@ -178,8 +178,7 @@ type Config struct {
Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
}
-// DefaultConfig contains the default configurations for the transaction
-// pool.
+// DefaultConfig contains the default configurations for the transaction pool.
var DefaultConfig = Config{
Journal: "transactions.rlp",
Rejournal: time.Hour,
@@ -245,7 +244,7 @@ type TxPool struct {
config Config
chainconfig *params.ChainConfig
chain blockChain
- gasPrice *big.Int
+ gasTip *big.Int
txFeed event.Feed
scope event.SubscriptionScope
signer types.Signer
@@ -256,9 +255,9 @@ type TxPool struct {
eip1559 atomic.Bool // Fork indicator whether we are using EIP-1559 type transactions.
shanghai atomic.Bool // Fork indicator whether we are in the Shanghai stage.
+ currentHead *types.Header // Current head of the blockchain
currentState *state.StateDB // Current state in the blockchain head
pendingNonces *noncer // Pending state tracking virtual nonces
- currentMaxGas atomic.Uint64 // Current gas limit for transaction caps
locals *accountSet // Set of local transaction to exempt from eviction rules
journal *journal // Journal of local transaction to back up to disk
@@ -280,15 +279,17 @@ type TxPool struct {
initDoneCh chan struct{} // is closed once the pool is initialized (for tests)
changesSinceReorg int // A counter for how many drops we've performed in-between reorg.
+
+ subpools []SubPool // List of subpools for specialized transaction handling
}
type txpoolResetRequest struct {
oldHead, newHead *types.Header
}
-// NewTxPool creates a new transaction pool to gather, sort and filter inbound
+// New creates a new transaction pool to gather, sort and filter inbound
// transactions from the network.
-func NewTxPool(config Config, chainconfig *params.ChainConfig, chain blockChain) *TxPool {
+func New(config Config, chainconfig *params.ChainConfig, chain blockChain) *TxPool {
// Sanitize the input to ensure no vulnerable gas prices are set
config = (&config).sanitize()
@@ -309,7 +310,7 @@ func NewTxPool(config Config, chainconfig *params.ChainConfig, chain blockChain)
reorgDoneCh: make(chan chan struct{}),
reorgShutdownCh: make(chan struct{}),
initDoneCh: make(chan struct{}),
- gasPrice: new(big.Int).SetUint64(config.PriceLimit),
+ gasTip: new(big.Int).SetUint64(config.PriceLimit),
}
pool.locals = newAccountSet(pool.signer)
for _, addr := range config.Locals {
@@ -343,6 +344,16 @@ func NewTxPool(config Config, chainconfig *params.ChainConfig, chain blockChain)
return pool
}
+// AddSubPool injects a specialized pool into the main transaction pool to have
+// a consistent view of the chain state across both of them.
+func (pool *TxPool) AddSubPool(subpool SubPool) {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ pool.subpools = append(pool.subpools, subpool)
+ subpool.Reset(nil, pool.currentHead)
+}
+
// loop is the transaction pool's main event loop, waiting for and reacting to
// outside blockchain events as well as for various reporting and transaction
// eviction events.
@@ -443,33 +454,29 @@ func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subsc
return pool.scope.Track(pool.txFeed.Subscribe(ch))
}
-// GasPrice returns the current gas price enforced by the transaction pool.
-func (pool *TxPool) GasPrice() *big.Int {
- pool.mu.RLock()
- defer pool.mu.RUnlock()
-
- return new(big.Int).Set(pool.gasPrice)
-}
-
-// SetGasPrice updates the minimum price required by the transaction pool for a
+// SetGasTip updates the minimum gas tip required by the transaction pool for a
// new transaction, and drops all transactions below this threshold.
-func (pool *TxPool) SetGasPrice(price *big.Int) {
+func (pool *TxPool) SetGasTip(tip *big.Int) {
pool.mu.Lock()
defer pool.mu.Unlock()
- old := pool.gasPrice
- pool.gasPrice = price
- // if the min miner fee increased, remove transactions below the new threshold
- if price.Cmp(old) > 0 {
+ old := pool.gasTip
+ pool.gasTip = tip
+
+ // If the min miner fee increased, remove transactions below the new threshold
+ if tip.Cmp(old) > 0 {
// pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
- drop := pool.all.RemotesBelowTip(price)
+ drop := pool.all.RemotesBelowTip(tip)
for _, tx := range drop {
pool.removeTx(tx.Hash(), false)
}
pool.priced.Removed(len(drop))
}
-
- log.Info("Transaction pool price threshold updated", "price", price)
+ // Propagate the new gas tip requirement to all the subpools too
+ for _, subpool := range pool.subpools {
+ subpool.SetGasTip(tip)
+ }
+ log.Info("Transaction pool tip threshold updated", "tip", tip)
}
// Nonce returns the next nonce of an account, with all transactions executable
@@ -556,7 +563,7 @@ func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transacti
// If the miner requests tip enforcement, cap the lists now
if enforceTips && !pool.locals.contains(addr) {
for i, tx := range txs {
- if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 {
+ if tx.EffectiveGasTipIntCmp(pool.gasTip, pool.priced.urgent.baseFee) < 0 {
txs = txs[:i]
break
}
@@ -598,61 +605,21 @@ func (pool *TxPool) local() map[common.Address]types.Transactions {
// This check is meant as an early check which only needs to be performed once,
// and does not require the pool mutex to be held.
func (pool *TxPool) validateTxBasics(tx *types.Transaction, local bool) error {
- // Accept only legacy transactions until EIP-2718/2930 activates.
- if !pool.eip2718.Load() && tx.Type() != types.LegacyTxType {
- return core.ErrTxTypeNotSupported
- }
- // Reject dynamic fee transactions until EIP-1559 activates.
- if !pool.eip1559.Load() && tx.Type() == types.DynamicFeeTxType {
- return core.ErrTxTypeNotSupported
- }
- // Reject blob transactions forever, those will have their own pool.
+ // Sanity check that only appropriate transactions and of a given data limit
+ // are even considered acceptance into the pool.
if tx.Type() == types.BlobTxType {
return core.ErrTxTypeNotSupported
}
- // Reject transactions over defined size to prevent DOS attacks
if tx.Size() > txMaxSize {
- return ErrOversizedData
- }
- // Check whether the init code size has been exceeded.
- if pool.shanghai.Load() && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
- return fmt.Errorf("%w: code size %v limit %v", core.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize)
- }
- // Transactions can't be negative. This may never happen using RLP decoded
- // transactions but may occur if you create a transaction using the RPC.
- if tx.Value().Sign() < 0 {
- return ErrNegativeValue
- }
- // Ensure the transaction doesn't exceed the current block limit gas.
- if pool.currentMaxGas.Load() < tx.Gas() {
- return ErrGasLimit
- }
- // Sanity check for extremely large numbers
- if tx.GasFeeCap().BitLen() > 256 {
- return core.ErrFeeCapVeryHigh
- }
- if tx.GasTipCap().BitLen() > 256 {
- return core.ErrTipVeryHigh
- }
- // Ensure gasFeeCap is greater than or equal to gasTipCap.
- if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 {
- return core.ErrTipAboveFeeCap
+ return fmt.Errorf("%w: transaction size %v, limit %v", ErrOversizedData, tx.Size(), txMaxSize)
}
- // Make sure the transaction is signed properly.
- if _, err := types.Sender(pool.signer, tx); err != nil {
- return ErrInvalidSender
- }
- // Drop non-local transactions under our own minimal accepted gas price or tip
- if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 {
- return ErrUnderpriced
- }
- // Ensure the transaction has more gas than the basic tx fee.
- intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul.Load(), pool.shanghai.Load())
- if err != nil {
+ // Verify the consensus rules to weed out invalid transactions
+ if err := ValidateTransaction(tx, pool.currentHead, pool.chainconfig, pool.signer); err != nil {
return err
}
- if tx.Gas() < intrGas {
- return core.ErrIntrinsicGas
+ // Verify pool specific limits for valid transactions
+ if !local && tx.GasTipCapIntCmp(pool.gasTip) < 0 {
+ return fmt.Errorf("%w: tip needed %v, tip permitted %v", ErrUnderpriced, pool.gasTip, tx.GasTipCap())
}
return nil
}
@@ -663,16 +630,18 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
// Signature has been checked already, this cannot error.
from, _ := types.Sender(pool.signer, tx)
// Ensure the transaction adheres to nonce ordering
- if pool.currentState.GetNonce(from) > tx.Nonce() {
- return core.ErrNonceTooLow
+ if next := pool.currentState.GetNonce(from); next > tx.Nonce() {
+ return fmt.Errorf("%w: next nonce %v, tx nonce %v", core.ErrNonceTooLow, next, tx.Nonce())
}
// Transactor should have enough funds to cover the costs
// cost == V + GP * GL
- balance := pool.currentState.GetBalance(from)
+ var (
+ balance = pool.currentState.GetBalance(from)
+ cost = tx.Cost()
+ )
if balance.Cmp(tx.Cost()) < 0 {
- return core.ErrInsufficientFunds
+ return fmt.Errorf("%w: balance %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, cost, new(big.Int).Sub(cost, balance))
}
-
// Verify that replacing transactions will not result in overdraft
list := pool.pending[from]
if list != nil { // Sender already has pending txs
@@ -1249,7 +1218,9 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt
if reset != nil {
// Reset from the old head to the new, rescheduling any reorged transactions
pool.reset(reset.oldHead, reset.newHead)
-
+ for _, subpool := range pool.subpools {
+ subpool.Reset(reset.oldHead, reset.newHead)
+ }
// Nonces were reset, discard any events that became stale
for addr := range events {
events[addr].Forward(pool.pendingNonces.get(addr))
@@ -1383,9 +1354,9 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
log.Error("Failed to reset txpool state", "err", err)
return
}
+ pool.currentHead = newHead
pool.currentState = statedb
pool.pendingNonces = newNoncer(statedb)
- pool.currentMaxGas.Store(newHead.GasLimit)
// Inject any transactions discarded due to reorgs
log.Debug("Reinjecting stale transactions", "count", len(reinject))
@@ -1421,7 +1392,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans
}
log.Trace("Removed old queued transactions", "count", len(forwards))
// Drop all transactions that are too costly (low balance or out of gas)
- drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load())
+ drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentHead.GasLimit)
for _, tx := range drops {
hash := tx.Hash()
pool.all.Remove(hash)
@@ -1618,7 +1589,7 @@ func (pool *TxPool) demoteUnexecutables() {
log.Trace("Removed old pending transaction", "hash", hash)
}
// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
- drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load())
+ drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentHead.GasLimit)
for _, tx := range drops {
hash := tx.Hash()
log.Trace("Removed unpayable pending transaction", "hash", hash)
diff --git a/core/txpool/txpool2_test.go b/core/txpool/txpool2_test.go
index 7e2a9eb908d39..4b4e6c4b645ae 100644
--- a/core/txpool/txpool2_test.go
+++ b/core/txpool/txpool2_test.go
@@ -83,7 +83,7 @@ func TestTransactionFutureAttack(t *testing.T) {
config := testTxPoolConfig
config.GlobalQueue = 100
config.GlobalSlots = 100
- pool := NewTxPool(config, eip1559Config, blockchain)
+ pool := New(config, eip1559Config, blockchain)
defer pool.Stop()
fillPool(t, pool)
pending, _ := pool.Stats()
@@ -116,7 +116,7 @@ func TestTransactionFuture1559(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain)
+ pool := New(testTxPoolConfig, eip1559Config, blockchain)
defer pool.Stop()
// Create a number of test accounts, fund them and make transactions
@@ -148,7 +148,7 @@ func TestTransactionZAttack(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain)
+ pool := New(testTxPoolConfig, eip1559Config, blockchain)
defer pool.Stop()
// Create a number of test accounts, fund them and make transactions
fillPool(t, pool)
@@ -218,7 +218,7 @@ func BenchmarkFutureAttack(b *testing.B) {
config := testTxPoolConfig
config.GlobalQueue = 100
config.GlobalSlots = 100
- pool := NewTxPool(config, eip1559Config, blockchain)
+ pool := New(config, eip1559Config, blockchain)
defer pool.Stop()
fillPool(b, pool)
diff --git a/core/txpool/txpool_test.go b/core/txpool/txpool_test.go
index a4889fa62f59c..87b1bd70568cf 100644
--- a/core/txpool/txpool_test.go
+++ b/core/txpool/txpool_test.go
@@ -130,7 +130,7 @@ func setupPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey
blockchain := newTestBlockChain(10000000, statedb, new(event.Feed))
key, _ := crypto.GenerateKey()
- pool := NewTxPool(testTxPoolConfig, config, blockchain)
+ pool := New(testTxPoolConfig, config, blockchain)
// wait for the pool to initialize
<-pool.initDoneCh
@@ -247,7 +247,7 @@ func TestStateChangeDuringReset(t *testing.T) {
tx0 := transaction(0, 100000, key)
tx1 := transaction(1, 100000, key)
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
+ pool := New(testTxPoolConfig, params.TestChainConfig, blockchain)
defer pool.Stop()
nonce := pool.Nonce(address)
@@ -313,7 +313,7 @@ func TestInvalidTransactions(t *testing.T) {
}
tx = transaction(1, 100000, key)
- pool.gasPrice = big.NewInt(1000)
+ pool.gasTip = big.NewInt(1000)
if err, want := pool.AddRemote(tx), ErrUnderpriced; !errors.Is(err, want) {
t.Errorf("want %v have %v", want, err)
}
@@ -666,7 +666,7 @@ func TestPostponing(t *testing.T) {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
+ pool := New(testTxPoolConfig, params.TestChainConfig, blockchain)
defer pool.Stop()
// Create two test accounts to produce different gap profiles with
@@ -882,7 +882,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
config.NoLocals = nolocals
config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible)
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
+ pool := New(config, params.TestChainConfig, blockchain)
defer pool.Stop()
// Create a number of test accounts and fund them (last one will be the local)
@@ -974,7 +974,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
config.Lifetime = time.Second
config.NoLocals = nolocals
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
+ pool := New(config, params.TestChainConfig, blockchain)
defer pool.Stop()
// Create two test accounts to ensure remotes expire but locals do not
@@ -1158,7 +1158,7 @@ func TestPendingGlobalLimiting(t *testing.T) {
config := testTxPoolConfig
config.GlobalSlots = config.AccountSlots * 10
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
+ pool := New(config, params.TestChainConfig, blockchain)
defer pool.Stop()
// Create a number of test accounts and fund them
@@ -1210,7 +1210,7 @@ func TestAllowedTxSize(t *testing.T) {
//
// It is assumed the fields in the transaction (except of the data) are:
// - nonce <= 32 bytes
- // - gasPrice <= 32 bytes
+ // - gasTip <= 32 bytes
// - gasLimit <= 32 bytes
// - recipient == 20 bytes
// - value <= 32 bytes
@@ -1218,22 +1218,21 @@ func TestAllowedTxSize(t *testing.T) {
// All those fields are summed up to at most 213 bytes.
baseSize := uint64(213)
dataSize := txMaxSize - baseSize
- maxGas := pool.currentMaxGas.Load()
// Try adding a transaction with maximal allowed size
- tx := pricedDataTransaction(0, maxGas, big.NewInt(1), key, dataSize)
+ tx := pricedDataTransaction(0, pool.currentHead.GasLimit, big.NewInt(1), key, dataSize)
if err := pool.addRemoteSync(tx); err != nil {
t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err)
}
// Try adding a transaction with random allowed size
- if err := pool.addRemoteSync(pricedDataTransaction(1, maxGas, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil {
+ if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentHead.GasLimit, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil {
t.Fatalf("failed to add transaction of random allowed size: %v", err)
}
// Try adding a transaction of minimal not allowed size
- if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, txMaxSize)); err == nil {
+ if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.GasLimit, big.NewInt(1), key, txMaxSize)); err == nil {
t.Fatalf("expected rejection on slightly oversize transaction")
}
// Try adding a transaction of random not allowed size
- if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil {
+ if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.GasLimit, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil {
t.Fatalf("expected rejection on oversize transaction")
}
// Run some sanity checks on the pool internals
@@ -1262,7 +1261,7 @@ func TestCapClearsFromAll(t *testing.T) {
config.AccountQueue = 2
config.GlobalSlots = 8
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
+ pool := New(config, params.TestChainConfig, blockchain)
defer pool.Stop()
// Create a number of test accounts and fund them
@@ -1294,7 +1293,7 @@ func TestPendingMinimumAllowance(t *testing.T) {
config := testTxPoolConfig
config.GlobalSlots = 1
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
+ pool := New(config, params.TestChainConfig, blockchain)
defer pool.Stop()
// Create a number of test accounts and fund them
@@ -1339,7 +1338,7 @@ func TestRepricing(t *testing.T) {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
+ pool := New(testTxPoolConfig, params.TestChainConfig, blockchain)
defer pool.Stop()
// Keep track of transaction events to ensure all executables get announced
@@ -1388,7 +1387,7 @@ func TestRepricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Reprice the pool and check that underpriced transactions get dropped
- pool.SetGasPrice(big.NewInt(2))
+ pool.SetGasTip(big.NewInt(2))
pending, queued = pool.Stats()
if pending != 2 {
@@ -1404,13 +1403,13 @@ func TestRepricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Check that we can't add the old transactions back
- if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); err != ErrUnderpriced {
+ if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !errors.Is(err, ErrUnderpriced) {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced {
+ if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, ErrUnderpriced) {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); err != ErrUnderpriced {
+ if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !errors.Is(err, ErrUnderpriced) {
t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
if err := validateEvents(events, 0); err != nil {
@@ -1509,7 +1508,7 @@ func TestRepricingDynamicFee(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Reprice the pool and check that underpriced transactions get dropped
- pool.SetGasPrice(big.NewInt(2))
+ pool.SetGasTip(big.NewInt(2))
pending, queued = pool.Stats()
if pending != 2 {
@@ -1526,15 +1525,15 @@ func TestRepricingDynamicFee(t *testing.T) {
}
// Check that we can't add the old transactions back
tx := pricedTransaction(1, 100000, big.NewInt(1), keys[0])
- if err := pool.AddRemote(tx); err != ErrUnderpriced {
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
tx = dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1])
- if err := pool.AddRemote(tx); err != ErrUnderpriced {
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
tx = dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2])
- if err := pool.AddRemote(tx); err != ErrUnderpriced {
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) {
t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
if err := validateEvents(events, 0); err != nil {
@@ -1587,7 +1586,7 @@ func TestRepricingKeepsLocals(t *testing.T) {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain)
+ pool := New(testTxPoolConfig, eip1559Config, blockchain)
defer pool.Stop()
// Create a number of test accounts and fund them
@@ -1638,13 +1637,13 @@ func TestRepricingKeepsLocals(t *testing.T) {
validate()
// Reprice the pool and check that nothing is dropped
- pool.SetGasPrice(big.NewInt(2))
+ pool.SetGasTip(big.NewInt(2))
validate()
- pool.SetGasPrice(big.NewInt(2))
- pool.SetGasPrice(big.NewInt(4))
- pool.SetGasPrice(big.NewInt(8))
- pool.SetGasPrice(big.NewInt(100))
+ pool.SetGasTip(big.NewInt(2))
+ pool.SetGasTip(big.NewInt(4))
+ pool.SetGasTip(big.NewInt(8))
+ pool.SetGasTip(big.NewInt(100))
validate()
}
@@ -1664,7 +1663,7 @@ func TestUnderpricing(t *testing.T) {
config.GlobalSlots = 2
config.GlobalQueue = 2
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
+ pool := New(config, params.TestChainConfig, blockchain)
defer pool.Stop()
// Keep track of transaction events to ensure all executables get announced
@@ -1706,7 +1705,7 @@ func TestUnderpricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Ensure that adding an underpriced transaction on block limit fails
- if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced {
+ if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, ErrUnderpriced) {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
// Replace a future transaction with a future transaction
@@ -1778,7 +1777,7 @@ func TestStableUnderpricing(t *testing.T) {
config.GlobalSlots = 128
config.GlobalQueue = 0
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
+ pool := New(config, params.TestChainConfig, blockchain)
defer pool.Stop()
// Keep track of transaction events to ensure all executables get announced
@@ -1886,7 +1885,7 @@ func TestUnderpricingDynamicFee(t *testing.T) {
// Ensure that adding an underpriced transaction fails
tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1])
- if err := pool.AddRemote(tx); err != ErrUnderpriced { // Pend K0:0, K0:1, K2:0; Que K1:1
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
@@ -2006,7 +2005,7 @@ func TestDeduplication(t *testing.T) {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
+ pool := New(testTxPoolConfig, params.TestChainConfig, blockchain)
defer pool.Stop()
// Create a test account to add transactions with
@@ -2072,7 +2071,7 @@ func TestReplacement(t *testing.T) {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
+ pool := New(testTxPoolConfig, params.TestChainConfig, blockchain)
defer pool.Stop()
// Keep track of transaction events to ensure all executables get announced
@@ -2282,7 +2281,7 @@ func testJournaling(t *testing.T, nolocals bool) {
config.Journal = journal
config.Rejournal = time.Second
- pool := NewTxPool(config, params.TestChainConfig, blockchain)
+ pool := New(config, params.TestChainConfig, blockchain)
// Create two test accounts to ensure remotes expire but locals do not
local, _ := crypto.GenerateKey()
@@ -2319,7 +2318,7 @@ func testJournaling(t *testing.T, nolocals bool) {
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
blockchain = newTestBlockChain(1000000, statedb, new(event.Feed))
- pool = NewTxPool(config, params.TestChainConfig, blockchain)
+ pool = New(config, params.TestChainConfig, blockchain)
pending, queued = pool.Stats()
if queued != 0 {
@@ -2345,7 +2344,7 @@ func testJournaling(t *testing.T, nolocals bool) {
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
blockchain = newTestBlockChain(1000000, statedb, new(event.Feed))
- pool = NewTxPool(config, params.TestChainConfig, blockchain)
+ pool = New(config, params.TestChainConfig, blockchain)
pending, queued = pool.Stats()
if pending != 0 {
@@ -2375,7 +2374,7 @@ func TestStatusCheck(t *testing.T) {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
+ pool := New(testTxPoolConfig, params.TestChainConfig, blockchain)
defer pool.Stop()
// Create the test accounts to check various transaction statuses with
diff --git a/core/types/gen_access_tuple.go b/core/types/gen_access_tuple.go
index fc48a84cc0c0e..cc211623caf65 100644
--- a/core/types/gen_access_tuple.go
+++ b/core/types/gen_access_tuple.go
@@ -12,8 +12,8 @@ import (
// MarshalJSON marshals as JSON.
func (a AccessTuple) MarshalJSON() ([]byte, error) {
type AccessTuple struct {
- Address common.Address `json:"address" gencodec:"required"`
- StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"`
+ Address common.Address `json:"address" gencodec:"required"`
+ StorageKeys []common.Hash `json:"storageKeys" gencodec:"required" ssz-max:"16777216"`
}
var enc AccessTuple
enc.Address = a.Address
@@ -24,8 +24,8 @@ func (a AccessTuple) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (a *AccessTuple) UnmarshalJSON(input []byte) error {
type AccessTuple struct {
- Address *common.Address `json:"address" gencodec:"required"`
- StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"`
+ Address *common.Address `json:"address" gencodec:"required"`
+ StorageKeys []common.Hash `json:"storageKeys" gencodec:"required" ssz-max:"16777216"`
}
var dec AccessTuple
if err := json.Unmarshal(input, &dec); err != nil {
diff --git a/core/types/gen_receipt_json.go b/core/types/gen_receipt_json.go
index d83be1447744a..b924e5872ca6c 100644
--- a/core/types/gen_receipt_json.go
+++ b/core/types/gen_receipt_json.go
@@ -26,7 +26,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
ContractAddress common.Address `json:"contractAddress"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"`
- BlockHash common.Hash `json:"blockHash,omitempty"`
+ BlockHash common.Hash `json:"blockHash,omitempty"`
BlockNumber *hexutil.Big `json:"blockNumber,omitempty"`
TransactionIndex hexutil.Uint `json:"transactionIndex"`
}
diff --git a/core/types/transaction.go b/core/types/transaction.go
index f0b674f344330..b7cb36b6026f4 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -198,7 +198,7 @@ func (tx *Transaction) decodeTyped(b []byte) (TxData, error) {
return &inner, err
case BlobTxType:
var inner BlobTx
- err := rlp.DecodeBytes(b[1:], &inner) // TODO(karalabe): This needs to be ssz
+ err := rlp.DecodeBytes(b[1:], &inner)
return &inner, err
default:
return nil, ErrTxTypeNotSupported
@@ -417,11 +417,7 @@ func (tx *Transaction) Size() uint64 {
return size.(uint64)
}
c := writeCounter(0)
- if tx.Type() == BlobTxType {
- rlp.Encode(&c, &tx.inner) // TODO(karalabe): Replace with SSZ encoding
- } else {
- rlp.Encode(&c, &tx.inner)
- }
+ rlp.Encode(&c, &tx.inner)
size := uint64(c)
if tx.Type() != LegacyTxType {
diff --git a/core/types/transaction_marshalling.go b/core/types/transaction_marshalling.go
index 2566d0b8d6566..6c6c50d498d72 100644
--- a/core/types/transaction_marshalling.go
+++ b/core/types/transaction_marshalling.go
@@ -23,28 +23,28 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/holiman/uint256"
)
// txJSON is the JSON representation of transactions.
type txJSON struct {
Type hexutil.Uint64 `json:"type"`
- // Common transaction fields:
+ ChainID *hexutil.Big `json:"chainId,omitempty"`
Nonce *hexutil.Uint64 `json:"nonce"`
+ To *common.Address `json:"to"`
+ Gas *hexutil.Uint64 `json:"gas"`
GasPrice *hexutil.Big `json:"gasPrice"`
MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"`
MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"`
- Gas *hexutil.Uint64 `json:"gas"`
+ MaxFeePerDataGas *hexutil.Big `json:"maxFeePerDataGas,omitempty"`
Value *hexutil.Big `json:"value"`
- Data *hexutil.Bytes `json:"input"`
+ Input *hexutil.Bytes `json:"input"`
+ AccessList *AccessList `json:"accessList,omitempty"`
+ BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"`
V *hexutil.Big `json:"v"`
R *hexutil.Big `json:"r"`
S *hexutil.Big `json:"s"`
- To *common.Address `json:"to"`
-
- // Access list transaction fields:
- ChainID *hexutil.Big `json:"chainId,omitempty"`
- AccessList *AccessList `json:"accessList,omitempty"`
// Only used for encoding:
Hash common.Hash `json:"hash"`
@@ -61,39 +61,57 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
switch itx := tx.inner.(type) {
case *LegacyTx:
enc.Nonce = (*hexutil.Uint64)(&itx.Nonce)
+ enc.To = tx.To()
enc.Gas = (*hexutil.Uint64)(&itx.Gas)
enc.GasPrice = (*hexutil.Big)(itx.GasPrice)
enc.Value = (*hexutil.Big)(itx.Value)
- enc.Data = (*hexutil.Bytes)(&itx.Data)
- enc.To = tx.To()
+ enc.Input = (*hexutil.Bytes)(&itx.Data)
enc.V = (*hexutil.Big)(itx.V)
enc.R = (*hexutil.Big)(itx.R)
enc.S = (*hexutil.Big)(itx.S)
+
case *AccessListTx:
enc.ChainID = (*hexutil.Big)(itx.ChainID)
- enc.AccessList = &itx.AccessList
enc.Nonce = (*hexutil.Uint64)(&itx.Nonce)
+ enc.To = tx.To()
enc.Gas = (*hexutil.Uint64)(&itx.Gas)
enc.GasPrice = (*hexutil.Big)(itx.GasPrice)
enc.Value = (*hexutil.Big)(itx.Value)
- enc.Data = (*hexutil.Bytes)(&itx.Data)
- enc.To = tx.To()
+ enc.Input = (*hexutil.Bytes)(&itx.Data)
+ enc.AccessList = &itx.AccessList
enc.V = (*hexutil.Big)(itx.V)
enc.R = (*hexutil.Big)(itx.R)
enc.S = (*hexutil.Big)(itx.S)
+
case *DynamicFeeTx:
enc.ChainID = (*hexutil.Big)(itx.ChainID)
- enc.AccessList = &itx.AccessList
enc.Nonce = (*hexutil.Uint64)(&itx.Nonce)
+ enc.To = tx.To()
enc.Gas = (*hexutil.Uint64)(&itx.Gas)
enc.MaxFeePerGas = (*hexutil.Big)(itx.GasFeeCap)
enc.MaxPriorityFeePerGas = (*hexutil.Big)(itx.GasTipCap)
enc.Value = (*hexutil.Big)(itx.Value)
- enc.Data = (*hexutil.Bytes)(&itx.Data)
- enc.To = tx.To()
+ enc.Input = (*hexutil.Bytes)(&itx.Data)
+ enc.AccessList = &itx.AccessList
enc.V = (*hexutil.Big)(itx.V)
enc.R = (*hexutil.Big)(itx.R)
enc.S = (*hexutil.Big)(itx.S)
+
+ case *BlobTx:
+ enc.ChainID = (*hexutil.Big)(itx.ChainID.ToBig())
+ enc.Nonce = (*hexutil.Uint64)(&itx.Nonce)
+ enc.Gas = (*hexutil.Uint64)(&itx.Gas)
+ enc.MaxFeePerGas = (*hexutil.Big)(itx.GasFeeCap.ToBig())
+ enc.MaxPriorityFeePerGas = (*hexutil.Big)(itx.GasTipCap.ToBig())
+ enc.MaxFeePerDataGas = (*hexutil.Big)(itx.BlobFeeCap.ToBig())
+ enc.Value = (*hexutil.Big)(itx.Value.ToBig())
+ enc.Input = (*hexutil.Bytes)(&itx.Data)
+ enc.AccessList = &itx.AccessList
+ enc.BlobVersionedHashes = itx.BlobHashes
+ enc.To = tx.To()
+ enc.V = (*hexutil.Big)(itx.V.ToBig())
+ enc.R = (*hexutil.Big)(itx.R.ToBig())
+ enc.S = (*hexutil.Big)(itx.S.ToBig())
}
return json.Marshal(&enc)
}
@@ -111,29 +129,29 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
case LegacyTxType:
var itx LegacyTx
inner = &itx
- if dec.To != nil {
- itx.To = dec.To
- }
if dec.Nonce == nil {
return errors.New("missing required field 'nonce' in transaction")
}
itx.Nonce = uint64(*dec.Nonce)
- if dec.GasPrice == nil {
- return errors.New("missing required field 'gasPrice' in transaction")
+ if dec.To != nil {
+ itx.To = dec.To
}
- itx.GasPrice = (*big.Int)(dec.GasPrice)
if dec.Gas == nil {
return errors.New("missing required field 'gas' in transaction")
}
itx.Gas = uint64(*dec.Gas)
+ if dec.GasPrice == nil {
+ return errors.New("missing required field 'gasPrice' in transaction")
+ }
+ itx.GasPrice = (*big.Int)(dec.GasPrice)
if dec.Value == nil {
return errors.New("missing required field 'value' in transaction")
}
itx.Value = (*big.Int)(dec.Value)
- if dec.Data == nil {
+ if dec.Input == nil {
return errors.New("missing required field 'input' in transaction")
}
- itx.Data = *dec.Data
+ itx.Data = *dec.Input
if dec.V == nil {
return errors.New("missing required field 'v' in transaction")
}
@@ -156,40 +174,39 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
case AccessListTxType:
var itx AccessListTx
inner = &itx
- // Access list is optional for now.
- if dec.AccessList != nil {
- itx.AccessList = *dec.AccessList
- }
if dec.ChainID == nil {
return errors.New("missing required field 'chainId' in transaction")
}
itx.ChainID = (*big.Int)(dec.ChainID)
- if dec.To != nil {
- itx.To = dec.To
- }
if dec.Nonce == nil {
return errors.New("missing required field 'nonce' in transaction")
}
itx.Nonce = uint64(*dec.Nonce)
- if dec.GasPrice == nil {
- return errors.New("missing required field 'gasPrice' in transaction")
+ if dec.To != nil {
+ itx.To = dec.To
}
- itx.GasPrice = (*big.Int)(dec.GasPrice)
if dec.Gas == nil {
return errors.New("missing required field 'gas' in transaction")
}
itx.Gas = uint64(*dec.Gas)
+ if dec.GasPrice == nil {
+ return errors.New("missing required field 'gasPrice' in transaction")
+ }
+ itx.GasPrice = (*big.Int)(dec.GasPrice)
if dec.Value == nil {
return errors.New("missing required field 'value' in transaction")
}
itx.Value = (*big.Int)(dec.Value)
- if dec.Data == nil {
+ if dec.Input == nil {
return errors.New("missing required field 'input' in transaction")
}
- itx.Data = *dec.Data
+ itx.Data = *dec.Input
if dec.V == nil {
return errors.New("missing required field 'v' in transaction")
}
+ if dec.AccessList != nil {
+ itx.AccessList = *dec.AccessList
+ }
itx.V = (*big.Int)(dec.V)
if dec.R == nil {
return errors.New("missing required field 'r' in transaction")
@@ -209,21 +226,21 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
case DynamicFeeTxType:
var itx DynamicFeeTx
inner = &itx
- // Access list is optional for now.
- if dec.AccessList != nil {
- itx.AccessList = *dec.AccessList
- }
if dec.ChainID == nil {
return errors.New("missing required field 'chainId' in transaction")
}
itx.ChainID = (*big.Int)(dec.ChainID)
- if dec.To != nil {
- itx.To = dec.To
- }
if dec.Nonce == nil {
return errors.New("missing required field 'nonce' in transaction")
}
itx.Nonce = uint64(*dec.Nonce)
+ if dec.To != nil {
+ itx.To = dec.To
+ }
+ if dec.Gas == nil {
+ return errors.New("missing required field 'gas' for txdata")
+ }
+ itx.Gas = uint64(*dec.Gas)
if dec.MaxPriorityFeePerGas == nil {
return errors.New("missing required field 'maxPriorityFeePerGas' for txdata")
}
@@ -232,21 +249,20 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'maxFeePerGas' for txdata")
}
itx.GasFeeCap = (*big.Int)(dec.MaxFeePerGas)
- if dec.Gas == nil {
- return errors.New("missing required field 'gas' for txdata")
- }
- itx.Gas = uint64(*dec.Gas)
if dec.Value == nil {
return errors.New("missing required field 'value' in transaction")
}
itx.Value = (*big.Int)(dec.Value)
- if dec.Data == nil {
+ if dec.Input == nil {
return errors.New("missing required field 'input' in transaction")
}
- itx.Data = *dec.Data
+ itx.Data = *dec.Input
if dec.V == nil {
return errors.New("missing required field 'v' in transaction")
}
+ if dec.AccessList != nil {
+ itx.AccessList = *dec.AccessList
+ }
itx.V = (*big.Int)(dec.V)
if dec.R == nil {
return errors.New("missing required field 'r' in transaction")
@@ -263,6 +279,70 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
}
}
+ case BlobTxType:
+ var itx BlobTx
+ inner = &itx
+ if dec.ChainID == nil {
+ return errors.New("missing required field 'chainId' in transaction")
+ }
+ itx.ChainID = uint256.MustFromBig((*big.Int)(dec.ChainID))
+ if dec.Nonce == nil {
+ return errors.New("missing required field 'nonce' in transaction")
+ }
+ itx.Nonce = uint64(*dec.Nonce)
+ if dec.To != nil {
+ itx.To = dec.To
+ }
+ if dec.Gas == nil {
+ return errors.New("missing required field 'gas' for txdata")
+ }
+ itx.Gas = uint64(*dec.Gas)
+ if dec.MaxPriorityFeePerGas == nil {
+ return errors.New("missing required field 'maxPriorityFeePerGas' for txdata")
+ }
+ itx.GasTipCap = uint256.MustFromBig((*big.Int)(dec.MaxPriorityFeePerGas))
+ if dec.MaxFeePerGas == nil {
+ return errors.New("missing required field 'maxFeePerGas' for txdata")
+ }
+ itx.GasFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerGas))
+ if dec.MaxFeePerDataGas == nil {
+ return errors.New("missing required field 'maxFeePerDataGas' for txdata")
+ }
+ itx.BlobFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerDataGas))
+ if dec.Value == nil {
+ return errors.New("missing required field 'value' in transaction")
+ }
+ itx.Value = uint256.MustFromBig((*big.Int)(dec.Value))
+ if dec.Input == nil {
+ return errors.New("missing required field 'input' in transaction")
+ }
+ itx.Data = *dec.Input
+ if dec.V == nil {
+ return errors.New("missing required field 'v' in transaction")
+ }
+ if dec.AccessList != nil {
+ itx.AccessList = *dec.AccessList
+ }
+ if dec.BlobVersionedHashes == nil {
+ return errors.New("missing required field 'blobVersionedHashes' in transaction")
+ }
+ itx.BlobHashes = dec.BlobVersionedHashes
+ itx.V = uint256.MustFromBig((*big.Int)(dec.V))
+ if dec.R == nil {
+ return errors.New("missing required field 'r' in transaction")
+ }
+ itx.R = uint256.MustFromBig((*big.Int)(dec.R))
+ if dec.S == nil {
+ return errors.New("missing required field 's' in transaction")
+ }
+ itx.S = uint256.MustFromBig((*big.Int)(dec.S))
+ withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0
+ if withSignature {
+ if err := sanityCheckSignature(itx.V.ToBig(), itx.R.ToBig(), itx.S.ToBig(), false); err != nil {
+ return err
+ }
+ }
+
default:
return ErrTxTypeNotSupported
}
diff --git a/core/types/tx_access_list.go b/core/types/tx_access_list.go
index 825942a41d45e..797abd9923441 100644
--- a/core/types/tx_access_list.go
+++ b/core/types/tx_access_list.go
@@ -29,8 +29,8 @@ type AccessList []AccessTuple
// AccessTuple is the element type of an access list.
type AccessTuple struct {
- Address common.Address `json:"address" gencodec:"required"`
- StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"`
+ Address common.Address `json:"address" gencodec:"required"`
+ StorageKeys []common.Hash `json:"storageKeys" gencodec:"required" ssz-max:"16777216"`
}
// StorageKeys returns the total number of storage keys in the access list.
diff --git a/core/types/tx_blob.go b/core/types/tx_blob.go
index 1f64a9871b876..58065d0017d31 100644
--- a/core/types/tx_blob.go
+++ b/core/types/tx_blob.go
@@ -31,7 +31,7 @@ type BlobTx struct {
GasTipCap *uint256.Int // a.k.a. maxPriorityFeePerGas
GasFeeCap *uint256.Int // a.k.a. maxFeePerGas
Gas uint64
- To *common.Address // `rlp:"nil"` // nil means contract creation
+ To *common.Address `rlp:"nil"` // nil means contract creation
Value *uint256.Int
Data []byte
AccessList AccessList
diff --git a/eth/api.go b/eth/api.go
index 5fdd0117dda3b..dd5932b8b912c 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -109,7 +109,7 @@ func (api *MinerAPI) SetGasPrice(gasPrice hexutil.Big) bool {
api.e.gasPrice = (*big.Int)(&gasPrice)
api.e.lock.Unlock()
- api.e.txPool.SetGasPrice((*big.Int)(&gasPrice))
+ api.e.txPool.SetGasTip((*big.Int)(&gasPrice))
return true
}
diff --git a/eth/backend.go b/eth/backend.go
index 4caab9bad6059..4342b642d17df 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -35,6 +35,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/pruner"
"github.com/ethereum/go-ethereum/core/txpool"
+ "github.com/ethereum/go-ethereum/core/txpool/blobpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/downloader"
@@ -66,7 +67,9 @@ type Ethereum struct {
config *ethconfig.Config
// Handlers
- txPool *txpool.TxPool
+ txPool *txpool.TxPool
+ blobPool *blobpool.BlobPool
+
blockchain *core.BlockChain
handler *handler
ethDialCandidates enode.Iterator
@@ -203,10 +206,18 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
}
eth.bloomIndexer.Start(eth.blockchain)
+ if config.BlobPool.Datadir != "" {
+ config.BlobPool.Datadir = stack.ResolvePath(config.BlobPool.Datadir)
+ }
+ eth.blobPool, err = blobpool.New(config.BlobPool, eth.blockchain)
+ if err != nil {
+ return nil, err
+ }
if config.TxPool.Journal != "" {
config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal)
}
- eth.txPool = txpool.NewTxPool(config.TxPool, eth.blockchain.Config(), eth.blockchain)
+ eth.txPool = txpool.New(config.TxPool, eth.blockchain.Config(), eth.blockchain)
+ eth.txPool.AddSubPool(eth.blobPool)
// Permit the downloader to use the trie cache allowance during fast sync
cacheLimit := cacheConfig.TrieCleanLimit + cacheConfig.TrieDirtyLimit + cacheConfig.SnapshotLimit
@@ -214,6 +225,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
Database: chainDb,
Chain: eth.blockchain,
TxPool: eth.txPool,
+ BlobPool: eth.blobPool,
Merger: eth.merger,
Network: config.NetworkId,
Sync: config.SyncMode,
@@ -399,7 +411,7 @@ func (s *Ethereum) StartMining() error {
s.lock.RLock()
price := s.gasPrice
s.lock.RUnlock()
- s.txPool.SetGasPrice(price)
+ s.txPool.SetGasTip(price)
// Configure the local mining address
eb, err := s.Etherbase()
@@ -452,6 +464,7 @@ func (s *Ethereum) Miner() *miner.Miner { return s.miner }
func (s *Ethereum) AccountManager() *accounts.Manager { return s.accountManager }
func (s *Ethereum) BlockChain() *core.BlockChain { return s.blockchain }
func (s *Ethereum) TxPool() *txpool.TxPool { return s.txPool }
+func (s *Ethereum) BlobPool() *blobpool.BlobPool { return s.blobPool }
func (s *Ethereum) EventMux() *event.TypeMux { return s.eventMux }
func (s *Ethereum) Engine() consensus.Engine { return s.engine }
func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb }
@@ -513,6 +526,7 @@ func (s *Ethereum) Stop() error {
s.bloomIndexer.Close()
close(s.closeBloomHandler)
s.txPool.Stop()
+ s.blobPool.Close()
s.miner.Close()
s.blockchain.Stop()
s.engine.Close()
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index a98d9ee4aaffd..482bd76cef547 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/txpool"
+ "github.com/ethereum/go-ethereum/core/txpool/blobpool"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethdb"
@@ -72,6 +73,7 @@ var Defaults = Config{
FilterLogCacheSize: 32,
Miner: miner.DefaultConfig,
TxPool: txpool.DefaultConfig,
+ BlobPool: blobpool.DefaultConfig,
RPCGasCap: 50000000,
RPCEVMTimeout: 5 * time.Second,
GPO: FullNodeGPO,
@@ -139,7 +141,8 @@ type Config struct {
Miner miner.Config
// Transaction pool options
- TxPool txpool.Config
+ TxPool txpool.Config
+ BlobPool blobpool.Config
// Gas Price Oracle options
GPO gasprice.Config
diff --git a/eth/handler.go b/eth/handler.go
index f0b043166efbf..a350dd90adb99 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -20,6 +20,7 @@ import (
"errors"
"math"
"math/big"
+ "math/rand"
"sync"
"sync/atomic"
"time"
@@ -73,12 +74,20 @@ type txPool interface {
SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
}
+// blobPool defines the methods needed from a blob transaction pool implementation
+// to support all the operations needed by the Ethereum chain protocols.
+type blobPool interface {
+ // Add inserts a new blob transaction into the pool.
+ Add(tx *types.Transaction, blobs [][]byte) error
+}
+
// handlerConfig is the collection of initialization parameters to create a full
// node network handler.
type handlerConfig struct {
Database ethdb.Database // Database for direct sync insertions
Chain *core.BlockChain // Blockchain to serve data from
TxPool txPool // Transaction pool to propagate from
+ BlobPool blobPool // Blob transaction pool to propagate from
Merger *consensus.Merger // The manager for eth1/2 transition
Network uint64 // Network identifier to adfvertise
Sync downloader.SyncMode // Whether to snap or full sync
@@ -96,6 +105,7 @@ type handler struct {
database ethdb.Database
txpool txPool
+ blobpool blobPool
chain *core.BlockChain
maxPeers int
@@ -132,6 +142,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
eventMux: config.EventMux,
database: config.Database,
txpool: config.TxPool,
+ blobpool: config.BlobPool,
chain: config.Chain,
peers: newPeerSet(),
merger: config.Merger,
@@ -274,7 +285,21 @@ func newHandler(config *handlerConfig) (*handler, error) {
}
return p.RequestTxs(hashes)
}
- h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, h.txpool.AddRemotes, fetchTx)
+ // TODO(karalabe): delete this after done playing
+ tempAddTxs := func(txs []*types.Transaction) []error {
+ errs := h.txpool.AddRemotes(txs)
+ for i, err := range errs {
+ if err == nil {
+ blobs := make([][]byte, rand.Int()%5)
+ for i := 0; i < len(blobs); i++ {
+ blobs[i] = make([]byte, 128*1024)
+ }
+ h.blobpool.Add(txs[i], blobs)
+ }
+ }
+ return errs
+ }
+ h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, tempAddTxs /*h.txpool.AddRemotes*/, fetchTx)
h.chainSync = newChainSyncer(h)
return h, nil
}
diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go
index bbb9866bd3dfe..21782103ca6a8 100644
--- a/eth/protocols/eth/handler_test.go
+++ b/eth/protocols/eth/handler_test.go
@@ -119,7 +119,7 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int,
return &testBackend{
db: db,
chain: chain,
- txpool: txpool.NewTxPool(txconfig, params.TestChainConfig, chain),
+ txpool: txpool.New(txconfig, params.TestChainConfig, chain),
}
}
diff --git a/go.mod b/go.mod
index 68929fb8af926..4a5087a0909d3 100644
--- a/go.mod
+++ b/go.mod
@@ -2,6 +2,8 @@ module github.com/ethereum/go-ethereum
go 1.19
+replace github.com/holiman/billy => github.com/karalabe/billy v0.0.0-20230331141121-732fa9109cff
+
require (
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0
github.com/VictoriaMetrics/fastcache v1.6.0
@@ -9,7 +11,7 @@ require (
github.com/aws/aws-sdk-go-v2/config v1.1.1
github.com/aws/aws-sdk-go-v2/credentials v1.1.1
github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1
- github.com/btcsuite/btcd/btcec/v2 v2.2.0
+ github.com/btcsuite/btcd/btcec/v2 v2.3.2
github.com/cespare/cp v0.1.0
github.com/cloudflare/cloudflare-go v0.14.0
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811
@@ -19,7 +21,6 @@ require (
github.com/deckarep/golang-set/v2 v2.1.0
github.com/docker/docker v1.6.2
github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7
- github.com/edsrzf/mmap-go v1.0.0
github.com/ethereum/c-kzg-4844 v0.1.0
github.com/fatih/color v1.7.0
github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c
@@ -37,6 +38,7 @@ require (
github.com/gorilla/websocket v1.4.2
github.com/graph-gophers/graphql-go v1.3.0
github.com/hashicorp/go-bexpr v0.1.10
+ github.com/holiman/billy v0.0.0-20230317085645-f164d9ae3da9
github.com/holiman/bloomfilter/v2 v2.0.3
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c
github.com/huin/goupnp v1.0.3
@@ -59,8 +61,8 @@ require (
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/tyler-smith/go-bip39 v1.1.0
- github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
- golang.org/x/crypto v0.1.0
+ github.com/urfave/cli/v2 v2.24.1
+ golang.org/x/crypto v0.5.0
golang.org/x/exp v0.0.0-20230206171751-46f607a40771
golang.org/x/sync v0.1.0
golang.org/x/sys v0.7.0
@@ -90,7 +92,7 @@ require (
github.com/consensys/bavard v0.1.13 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7 // indirect
- github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect
diff --git a/go.sum b/go.sum
index 7f463184be851..125a34425c4f3 100644
--- a/go.sum
+++ b/go.sum
@@ -7,7 +7,7 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSu
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
+github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
@@ -43,12 +43,10 @@ github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bits-and-blooms/bitset v1.5.0 h1:NpE8frKRLGHIcEzkR+gZhiioW1+WbYV6fKwD6ZIpQT8=
-github.com/bits-and-blooms/bitset v1.5.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo=
github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
-github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
-github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
+github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
+github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
@@ -85,8 +83,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHH
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7 h1:6IrxszG5G+O7zhtkWxq6+unVvnrm1fqV2Pe+T95DUzw=
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7/go.mod h1:gFnFS95y8HstDP6P9pPwzrxOOC5TRDkwbM+ao15ChAI=
-github.com/crate-crypto/go-kzg-4844 v0.1.0 h1:2PXr2wKBNTmSsoYLCmaNg5Z6uQUf7LiUAsnDbTfq+0M=
-github.com/crate-crypto/go-kzg-4844 v0.1.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4=
github.com/crate-crypto/go-kzg-4844 v0.2.0 h1:UVuHOE+5tIWrim4zf/Xaa43+MIsDCPyW76QhUpiMGj4=
github.com/crate-crypto/go-kzg-4844 v0.2.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@@ -97,9 +93,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI=
github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
-github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
@@ -120,8 +115,6 @@ github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7/go.mod h1:yRkwfj0CBpOG
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
-github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -269,6 +262,8 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
+github.com/karalabe/billy v0.0.0-20230331141121-732fa9109cff h1:JQWCWWnLDtM1wPKtXpsx2cZrwZswgefUdXBN0JsJBYo=
+github.com/karalabe/billy v0.0.0-20230331141121-732fa9109cff/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8=
@@ -434,8 +429,8 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
-github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
-github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
+github.com/urfave/cli/v2 v2.24.1 h1:/QYYr7g0EhwXEML8jO+8OYt5trPnLHS0p3mrgExJ5NU=
+github.com/urfave/cli/v2 v2.24.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
@@ -466,8 +461,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
-golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
+golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
@@ -554,8 +549,6 @@ golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
-golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
diff --git a/internal/flags/categories.go b/internal/flags/categories.go
index c2db6c6c1d25c..ac71931879b4e 100644
--- a/internal/flags/categories.go
+++ b/internal/flags/categories.go
@@ -23,7 +23,8 @@ const (
LightCategory = "LIGHT CLIENT"
DevCategory = "DEVELOPER CHAIN"
EthashCategory = "ETHASH"
- TxPoolCategory = "TRANSACTION POOL"
+ TxPoolCategory = "TRANSACTION POOL (EVM)"
+ BlobPoolCategory = "TRANSACTION POOL (BLOB)"
PerfCategory = "PERFORMANCE TUNING"
AccountCategory = "ACCOUNT"
APICategory = "API AND CONSOLE"
diff --git a/les/test_helper.go b/les/test_helper.go
index ead97ddd172d3..44a454eaecee8 100644
--- a/les/test_helper.go
+++ b/les/test_helper.go
@@ -234,7 +234,7 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da
txpoolConfig := txpool.DefaultConfig
txpoolConfig.Journal = ""
- txpool := txpool.NewTxPool(txpoolConfig, gspec.Config, simulation.Blockchain())
+ txpool := txpool.New(txpoolConfig, gspec.Config, simulation.Blockchain())
server := &LesServer{
lesCommons: lesCommons{
diff --git a/miner/miner.go b/miner/miner.go
index b1d1f7c4cbfe2..82aafbe52a8af 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/txpool"
+ "github.com/ethereum/go-ethereum/core/txpool/blobpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/event"
@@ -41,6 +42,7 @@ import (
type Backend interface {
BlockChain() *core.BlockChain
TxPool() *txpool.TxPool
+ BlobPool() *blobpool.BlobPool
}
// Config is the configuration parameters of mining.
diff --git a/miner/miner_test.go b/miner/miner_test.go
index 6bf3edae5dbb7..1678c0e98eba6 100644
--- a/miner/miner_test.go
+++ b/miner/miner_test.go
@@ -267,7 +267,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(chainDB), nil)
blockchain := &testBlockChain{statedb, 10000000, new(event.Feed)}
- pool := txpool.NewTxPool(testTxPoolConfig, chainConfig, blockchain)
+ pool := txpool.New(testTxPoolConfig, chainConfig, blockchain)
backend := NewMockBackend(bc, pool)
// Create event Mux
mux := new(event.TypeMux)
diff --git a/miner/worker.go b/miner/worker.go
index 936a9e74a5b6d..98e07c25706eb 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -35,6 +35,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/holiman/uint256"
)
const (
@@ -1056,6 +1057,12 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err
// Split the pending transactions into locals and remotes
// Fill the block with all available pending transactions.
pending := w.eth.TxPool().Pending(true)
+ blobtxs := w.eth.BlobPool().Pending(
+ uint256.MustFromBig(env.header.BaseFee),
+ uint256.MustFromBig(misc.CalcBlobFee(env.header.ExcessDataGas)),
+ )
+ log.Trace("Side-effect log, much wow", "blobs", len(blobtxs))
+
localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending
for _, account := range w.eth.TxPool().Locals() {
if txs := remoteTxs[account]; len(txs) > 0 {
diff --git a/miner/worker_test.go b/miner/worker_test.go
index 683d019d2d551..4de893d338897 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -136,7 +136,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
if err != nil {
t.Fatalf("core.NewBlockChain failed: %v", err)
}
- txpool := txpool.NewTxPool(testTxPoolConfig, chainConfig, chain)
+ txpool := txpool.New(testTxPoolConfig, chainConfig, chain)
// Generate a small n-block chain and an uncle block for it
var uncle *types.Block
diff --git a/params/protocol_params.go b/params/protocol_params.go
index 1fb258c1fc1ef..27bc32bb6df16 100644
--- a/params/protocol_params.go
+++ b/params/protocol_params.go
@@ -160,6 +160,10 @@ const (
RefundQuotient uint64 = 2
RefundQuotientEIP3529 uint64 = 5
+ BlobTxBytesPerFieldElement = 32 // Size in bytes of a field element
+ BlobTxFieldElementsPerBlob = 4096 // Number of field elements stored in a single data blob
+ BlobTxMaxDataGasPerBlock = 1 << 19 // Maximum consumable data gas for data blobs per block
+ BlobTxTargetDataGasPerBlock = 1 << 18 // Target consumable data gas for data blobs per block (for 1559-like pricing)
BlobTxDataGasPerBlob = 1 << 17 // Gas consumption of a single data blob (== blob byte size)
BlobTxMinDataGasprice = 1 // Minimum gas price for data blobs
BlobTxDataGaspriceUpdateFraction = 2225652 // Controls the maximum rate of change for data gas price
diff --git a/tests/fuzzers/les/les-fuzzer.go b/tests/fuzzers/les/les-fuzzer.go
index 926de04585720..c203c87f8169d 100644
--- a/tests/fuzzers/les/les-fuzzer.go
+++ b/tests/fuzzers/les/les-fuzzer.go
@@ -138,7 +138,7 @@ func newFuzzer(input []byte) *fuzzer {
chtKeys: chtKeys,
bloomKeys: bloomKeys,
nonce: uint64(len(txHashes)),
- pool: txpool.NewTxPool(txpool.DefaultConfig, params.TestChainConfig, chain),
+ pool: txpool.New(txpool.DefaultConfig, params.TestChainConfig, chain),
input: bytes.NewReader(input),
}
}