From 702e0caa2947b1271e7168ee755b2fbffc95407d Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Tue, 30 Apr 2024 17:14:39 +0800 Subject: [PATCH 01/23] core: txpool stable underprice drop order, perf fixes (#16494) --- core/tx_list.go | 17 +++++++-- core/tx_pool.go | 23 ++++++------ core/tx_pool_test.go | 85 ++++++++++++++++++++++++++++++++++++++------ 3 files changed, 101 insertions(+), 24 deletions(-) diff --git a/core/tx_list.go b/core/tx_list.go index 030c4cd30012..27cb4632c414 100644 --- a/core/tx_list.go +++ b/core/tx_list.go @@ -378,9 +378,20 @@ func (l *txList) Flatten() types.Transactions { // price-sorted transactions to discard when the pool fills up. type priceHeap []*types.Transaction -func (h priceHeap) Len() int { return len(h) } -func (h priceHeap) Less(i, j int) bool { return h[i].GasPrice().Cmp(h[j].GasPrice()) < 0 } -func (h priceHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h priceHeap) Len() int { return len(h) } +func (h priceHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +func (h priceHeap) Less(i, j int) bool { + // Sort primarily by price, returning the cheaper one + switch h[i].GasPrice().Cmp(h[j].GasPrice()) { + case -1: + return true + case 1: + return false + } + // If the prices match, stabilize via nonces (high nonce is worse) + return h[i].Nonce() > h[j].Nonce() +} func (h *priceHeap) Push(x interface{}) { *h = append(*h, x.(*types.Transaction)) diff --git a/core/tx_pool.go b/core/tx_pool.go index 324946a196ca..d7a37f4f1413 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -342,7 +342,7 @@ func (pool *TxPool) loop() { // Any non-locals old enough should be removed if time.Since(pool.beats[addr]) > pool.config.Lifetime { for _, tx := range pool.queue[addr].Flatten() { - pool.removeTx(tx.Hash()) + pool.removeTx(tx.Hash(), true) } } } @@ -491,7 +491,7 @@ func (pool *TxPool) SetGasPrice(price *big.Int) { pool.gasPrice = price for _, tx := range pool.priced.Cap(price, pool.locals) { - pool.removeTx(tx.Hash()) + pool.removeTx(tx.Hash(), false) } log.Info("Transaction pool price threshold updated", "price", price) } @@ -732,7 +732,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { for _, tx := range drop { log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) underpricedTxCounter.Inc(1) - pool.removeTx(tx.Hash()) + pool.removeTx(tx.Hash(), false) } } // If the transaction is replacing an already pending one, do directly @@ -796,8 +796,10 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er pool.priced.Removed() queuedReplaceCounter.Inc(1) } - pool.all[hash] = tx - pool.priced.Put(tx) + if pool.all[hash] == nil { + pool.all[hash] = tx + pool.priced.Put(tx) + } return old != nil, nil } @@ -1000,7 +1002,7 @@ func (pool *TxPool) Get(hash common.Hash) *types.Transaction { // removeTx removes a single transaction from the queue, moving all subsequent // transactions back to the future queue. -func (pool *TxPool) removeTx(hash common.Hash) { +func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { // Fetch the transaction we wish to delete tx, ok := pool.all[hash] if !ok { @@ -1010,8 +1012,9 @@ func (pool *TxPool) removeTx(hash common.Hash) { // Remove it from the list of known transactions delete(pool.all, hash) - pool.priced.Removed() - + if outofbound { + pool.priced.Removed() + } // Remove the transaction from the pending lists and reset the account nonce if pending := pool.pending[addr]; pending != nil { if removed, invalids := pending.Remove(tx); removed { @@ -1208,7 +1211,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { // Drop all transactions if they are less than the overflow if size := uint64(list.Len()); size <= drop { for _, tx := range list.Flatten() { - pool.removeTx(tx.Hash()) + pool.removeTx(tx.Hash(), true) } drop -= size queuedRateLimitCounter.Inc(int64(size)) @@ -1217,7 +1220,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { // Otherwise drop only last few transactions txs := list.Flatten() for i := len(txs) - 1; i >= 0 && drop > 0; i-- { - pool.removeTx(txs[i].Hash()) + pool.removeTx(txs[i].Hash(), true) drop-- queuedRateLimitCounter.Inc(1) } diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index e65741a7ad0e..99e14547d2c2 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -232,15 +232,10 @@ func TestStateChangeDuringTransactionPoolReset(t *testing.T) { pool.lockedReset(nil, nil) - pendingTx, err := pool.Pending() + _, err := pool.Pending() if err != nil { t.Fatalf("Could not fetch pending transactions: %v", err) } - - for addr, txs := range pendingTx { - t.Logf("%0x: %d\n", addr, len(txs)) - } - nonce = pool.State().GetNonce(address) if nonce != 2 { t.Fatalf("Invalid nonce, want 2, got %d", nonce) @@ -373,7 +368,7 @@ func TestTransactionChainFork(t *testing.T) { if _, err := pool.add(tx, false); err != nil { t.Error("didn't expect error", err) } - pool.removeTx(tx.Hash()) + pool.removeTx(tx.Hash(), true) // reset the pool's internal state resetState() @@ -1418,13 +1413,13 @@ func TestTransactionPoolUnderpricing(t *testing.T) { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } // Ensure that adding high priced transactions drops cheap ones, but not own - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - t.Fatalf("failed to add well priced transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2 t.Fatalf("failed to add well priced transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { + if err := pool.AddRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 t.Fatalf("failed to add well priced transaction: %v", err) } pending, queued = pool.Stats() @@ -1434,7 +1429,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) { if queued != 2 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validateEvents(events, 2); err != nil { + if err := validateEvents(events, 1); err != nil { t.Fatalf("additional event firing failed: %v", err) } if err := validateTxPoolInternals(pool); err != nil { @@ -1460,6 +1455,74 @@ func TestTransactionPoolUnderpricing(t *testing.T) { } } +// Tests that more expensive transactions push out cheap ones from the pool, but +// without producing instability by creating gaps that start jumping transactions +// back and forth between queued/pending. +func TestTransactionPoolStableUnderpricing(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + db := rawdb.NewMemoryDatabase() + statedb, _ := state.New(common.Hash{}, state.NewDatabase(db)) + blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} + + config := testTxPoolConfig + config.GlobalSlots = common.LimitThresholdNonceInQueue + config.GlobalQueue = 0 + + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() + + // Keep track of transaction events to ensure all executables get announced + events := make(chan NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 2) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Fill up the entire queue with the same transaction price points + txs := types.Transactions{} + for i := uint64(0); i < config.GlobalSlots; i++ { + txs = append(txs, pricedTransaction(i, 100000, big.NewInt(1), keys[0])) + } + pool.AddRemotes(txs) + + pending, queued := pool.Stats() + if pending != int(config.GlobalSlots) { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, config.GlobalSlots) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validateEvents(events, int(config.GlobalSlots)); err != nil { + t.Fatalf("original event firing failed: %v", err) + } + if err := validateTxPoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Ensure that adding high priced transactions drops a cheap, but doesn't produce a gap + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { + t.Fatalf("failed to add well priced transaction: %v", err) + } + pending, queued = pool.Stats() + if pending != int(config.GlobalSlots) { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, config.GlobalSlots) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validateEvents(events, 1); err != nil { + t.Fatalf("additional event firing failed: %v", err) + } + if err := validateTxPoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + // Tests that the pool rejects replacement transactions that don't meet the minimum // price bump required. func TestTransactionReplacement(t *testing.T) { From 2060ff7eeb4277b6ad91b3f6afe4e5e09e1bccc5 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Tue, 7 May 2024 18:30:50 +0800 Subject: [PATCH 02/23] core: Ensure that local transactions aren't discarded as underpriced (#16576) --- core/tx_pool.go | 2 +- core/tx_pool_test.go | 22 +++++++++++++--------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index d7a37f4f1413..6eb48ba692c2 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -722,7 +722,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { if uint64(len(pool.all)) >= pool.config.GlobalSlots+pool.config.GlobalQueue { log.Debug("Add transaction to pool full", "hash", hash, "nonce", tx.Nonce()) // If the new transaction is underpriced, don't accept it - if pool.priced.Underpriced(tx, pool.locals) { + if !local && pool.priced.Underpriced(tx, pool.locals) { log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) underpricedTxCounter.Inc(1) return false, ErrUnderpriced diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index 99e14547d2c2..7fff282cf608 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -1376,7 +1376,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) { defer sub.Unsubscribe() // Create a number of test accounts and fund them - keys := make([]*ecdsa.PrivateKey, 3) + keys := make([]*ecdsa.PrivateKey, 4) for i := 0; i < len(keys); i++ { keys[i], _ = crypto.GenerateKey() pool.currentState.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) @@ -1436,18 +1436,22 @@ func TestTransactionPoolUnderpricing(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding local transactions can push out even higher priced ones - tx := pricedTransaction(1, 100000, big.NewInt(1), keys[2]) - if err := pool.AddLocal(tx); err != nil { - t.Fatalf("failed to add underpriced local transaction: %v", err) + ltx = pricedTransaction(1, 100000, big.NewInt(1), keys[2]) + if err := pool.AddLocal(ltx); err != nil { + t.Fatalf("failed to append underpriced local transaction: %v", err) + } + ltx = pricedTransaction(0, 100000, big.NewInt(1), keys[3]) + if err := pool.AddLocal(ltx); err != nil { + t.Fatalf("failed to add new underpriced local transaction: %v", err) } pending, queued = pool.Stats() - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + if pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) } - if queued != 2 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) } - if err := validateEvents(events, 1); err != nil { + if err := validateEvents(events, 2); err != nil { t.Fatalf("local event firing failed: %v", err) } if err := validateTxPoolInternals(pool); err != nil { From 88695caf8f0e31ecfa8cf2613628a0699e97fad3 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Tue, 30 Apr 2024 18:10:21 +0800 Subject: [PATCH 03/23] core: use a wrapped map w/ sync.RWMutex for TxPool.all to remove contention in TxPool.Get. (#16670) --- core/tx_list.go | 21 +++---- core/tx_pool.go | 136 +++++++++++++++++++++++++++++++------------ core/tx_pool_test.go | 50 ++++++++-------- 3 files changed, 135 insertions(+), 72 deletions(-) diff --git a/core/tx_list.go b/core/tx_list.go index 27cb4632c414..b240fc2b972d 100644 --- a/core/tx_list.go +++ b/core/tx_list.go @@ -408,13 +408,13 @@ func (h *priceHeap) Pop() interface{} { // txPricedList is a price-sorted heap to allow operating on transactions pool // contents in a price-incrementing way. type txPricedList struct { - all *map[common.Hash]*types.Transaction // Pointer to the map of all transactions - items *priceHeap // Heap of prices of all the stored transactions - stales int // Number of stale price points to (re-heap trigger) + all *txLookup // Pointer to the map of all transactions + items *priceHeap // Heap of prices of all the stored transactions + stales int // Number of stale price points to (re-heap trigger) } // newTxPricedList creates a new price-sorted transaction heap. -func newTxPricedList(all *map[common.Hash]*types.Transaction) *txPricedList { +func newTxPricedList(all *txLookup) *txPricedList { return &txPricedList{ all: all, items: new(priceHeap), @@ -436,12 +436,13 @@ func (l *txPricedList) Removed() { return } // Seems we've reached a critical number of stale transactions, reheap - reheap := make(priceHeap, 0, len(*l.all)) + reheap := make(priceHeap, 0, l.all.Count()) l.stales, l.items = 0, &reheap - for _, tx := range *l.all { + l.all.Range(func(hash common.Hash, tx *types.Transaction) bool { *l.items = append(*l.items, tx) - } + return true + }) heap.Init(l.items) } @@ -454,7 +455,7 @@ func (l *txPricedList) Cap(threshold *big.Int, local *accountSet) types.Transact for len(*l.items) > 0 { // Discard stale transactions if found during cleanup tx := heap.Pop(l.items).(*types.Transaction) - if _, ok := (*l.all)[tx.Hash()]; !ok { + if l.all.Get(tx.Hash()) == nil { l.stales-- continue } @@ -486,7 +487,7 @@ func (l *txPricedList) Underpriced(tx *types.Transaction, local *accountSet) boo // Discard stale price points if found at the heap start for len(*l.items) > 0 { head := []*types.Transaction(*l.items)[0] - if _, ok := (*l.all)[head.Hash()]; !ok { + if l.all.Get(head.Hash()) == nil { l.stales-- heap.Pop(l.items) continue @@ -511,7 +512,7 @@ func (l *txPricedList) Discard(count int, local *accountSet) types.Transactions for len(*l.items) > 0 && count > 0 { // Discard stale transactions if found during cleanup tx := heap.Pop(l.items).(*types.Transaction) - if _, ok := (*l.all)[tx.Hash()]; !ok { + if l.all.Get(tx.Hash()) == nil { l.stales-- continue } diff --git a/core/tx_pool.go b/core/tx_pool.go index 6eb48ba692c2..20f9ad8bcb05 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -221,11 +221,11 @@ type TxPool struct { locals *accountSet // Set of local transaction to exempt from eviction rules journal *txJournal // Journal of local transaction to back up to disk - pending map[common.Address]*txList // All currently processable transactions - queue map[common.Address]*txList // Queued but non-processable transactions - beats map[common.Address]time.Time // Last heartbeat from each known account - all map[common.Hash]*types.Transaction // All transactions to allow lookups - priced *txPricedList // All transactions sorted by price + pending map[common.Address]*txList // All currently processable transactions + queue map[common.Address]*txList // Queued but non-processable transactions + beats map[common.Address]time.Time // Last heartbeat from each known account + all *txLookup // All transactions to allow lookups + priced *txPricedList // All transactions sorted by price wg sync.WaitGroup // for shutdown sync @@ -249,13 +249,13 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block pending: make(map[common.Address]*txList), queue: make(map[common.Address]*txList), beats: make(map[common.Address]time.Time), - all: make(map[common.Hash]*types.Transaction), + all: newTxLookup(), chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), gasPrice: new(big.Int).SetUint64(config.PriceLimit), trc21FeeCapacity: map[common.Address]*big.Int{}, } pool.locals = newAccountSet(pool.signer) - pool.priced = newTxPricedList(&pool.all) + pool.priced = newTxPricedList(pool.all) pool.reset(nil, chain.CurrentBlock().Header()) // If local transactions and journaling is enabled, load from disk @@ -703,7 +703,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { // If the transaction is already known, discard it hash := tx.Hash() - if pool.all[hash] != nil { + if pool.all.Get(hash) != nil { log.Trace("Discarding already known transaction", "hash", hash) return false, fmt.Errorf("known transaction: %x", hash) } @@ -719,7 +719,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { return pool.promoteSpecialTx(from, tx) } // If the transaction pool is full, discard underpriced transactions - if uint64(len(pool.all)) >= pool.config.GlobalSlots+pool.config.GlobalQueue { + if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue { log.Debug("Add transaction to pool full", "hash", hash, "nonce", tx.Nonce()) // If the new transaction is underpriced, don't accept it if !local && pool.priced.Underpriced(tx, pool.locals) { @@ -728,7 +728,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { return false, ErrUnderpriced } // New transaction is better than our worse ones, make room for it - drop := pool.priced.Discard(len(pool.all)-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals) + drop := pool.priced.Discard(pool.all.Count()-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals) for _, tx := range drop { log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) underpricedTxCounter.Inc(1) @@ -745,11 +745,11 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { } // New transaction is better, replace old one if old != nil { - delete(pool.all, old.Hash()) + pool.all.Remove(old.Hash()) pool.priced.Removed() pendingReplaceCounter.Inc(1) } - pool.all[tx.Hash()] = tx + pool.all.Add(tx) pool.priced.Put(tx) pool.journalTx(from, tx) @@ -792,12 +792,12 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er } // Discard any previous transaction and mark this if old != nil { - delete(pool.all, old.Hash()) + pool.all.Remove(old.Hash()) pool.priced.Removed() queuedReplaceCounter.Inc(1) } - if pool.all[hash] == nil { - pool.all[hash] = tx + if pool.all.Get(hash) == nil { + pool.all.Add(tx) pool.priced.Put(tx) } return old != nil, nil @@ -829,7 +829,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T inserted, old := list.Add(tx, pool.config.PriceBump) if !inserted { // An older transaction was better, discard this - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() pendingDiscardCounter.Inc(1) @@ -837,14 +837,14 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T } // Otherwise discard any previous transaction and mark this if old != nil { - delete(pool.all, old.Hash()) + pool.all.Remove(old.Hash()) pool.priced.Removed() pendingReplaceCounter.Inc(1) } // Failsafe to work around direct pending inserts (tests) - if pool.all[hash] == nil { - pool.all[hash] = tx + if pool.all.Get(hash) == nil { + pool.all.Add(tx) pool.priced.Put(tx) } // Set the potentially new pending nonce and notify any subsystems of the new tx @@ -866,7 +866,7 @@ func (pool *TxPool) promoteSpecialTx(addr common.Address, tx *types.Transaction) } // Otherwise discard any previous transaction and mark this if old != nil { - delete(pool.all, old.Hash()) + pool.all.Remove(old.Hash()) pool.priced.Removed() pendingReplaceCounter.Inc(1) } @@ -878,8 +878,8 @@ func (pool *TxPool) promoteSpecialTx(addr common.Address, tx *types.Transaction) list.gascap = gas } // Failsafe to work around direct pending inserts (tests) - if pool.all[tx.Hash()] == nil { - pool.all[tx.Hash()] = tx + if pool.all.Get(tx.Hash()) == nil { + pool.all.Add(tx) } // Set the potentially new pending nonce and notify any subsystems of the new tx pool.beats[addr] = time.Now() @@ -979,7 +979,7 @@ func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { status := make([]TxStatus, len(hashes)) for i, hash := range hashes { - if tx := pool.all[hash]; tx != nil { + if tx := pool.all.Get(hash); tx != nil { from, _ := types.Sender(pool.signer, tx) // already validated if pool.pending[from] != nil && pool.pending[from].txs.items[tx.Nonce()] != nil { status[i] = TxStatusPending @@ -994,24 +994,21 @@ func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { // Get returns a transaction if it is contained in the pool // and nil otherwise. func (pool *TxPool) Get(hash common.Hash) *types.Transaction { - pool.mu.RLock() - defer pool.mu.RUnlock() - - return pool.all[hash] + return pool.all.Get(hash) } // removeTx removes a single transaction from the queue, moving all subsequent // transactions back to the future queue. func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { // Fetch the transaction we wish to delete - tx, ok := pool.all[hash] - if !ok { + tx := pool.all.Get(hash) + if tx == nil { return } addr, _ := types.Sender(pool.signer, tx) // already validated during insertion // Remove it from the list of known transactions - delete(pool.all, hash) + pool.all.Remove(hash) if outofbound { pool.priced.Removed() } @@ -1072,7 +1069,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { for _, tx := range list.Forward(pool.currentState.GetNonce(addr)) { hash := tx.Hash() log.Trace("Removed old queued transaction", "hash", hash) - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() } // Drop all transactions that are too costly (low balance or out of gas) @@ -1084,7 +1081,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { for _, tx := range drops { hash := tx.Hash() log.Trace("Removed unpayable queued transaction", "hash", hash) - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() queuedNofundsCounter.Inc(1) } @@ -1100,7 +1097,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { if !pool.locals.contains(addr) { for _, tx := range list.Cap(int(pool.config.AccountQueue)) { hash := tx.Hash() - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() queuedRateLimitCounter.Inc(1) log.Trace("Removed cap-exceeding queued transaction", "hash", hash) @@ -1149,7 +1146,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { for _, tx := range list.Cap(list.Len() - 1) { // Drop the transaction from the global pools too hash := tx.Hash() - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() // Update the account nonce to the dropped transaction @@ -1171,7 +1168,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { for _, tx := range list.Cap(list.Len() - 1) { // Drop the transaction from the global pools too hash := tx.Hash() - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() // Update the account nonce to the dropped transaction @@ -1240,7 +1237,7 @@ func (pool *TxPool) demoteUnexecutables() { for _, tx := range list.Forward(nonce) { hash := tx.Hash() log.Trace("Removed old pending transaction", "hash", hash) - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() } // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later @@ -1252,7 +1249,7 @@ func (pool *TxPool) demoteUnexecutables() { for _, tx := range drops { hash := tx.Hash() log.Trace("Removed unpayable pending transaction", "hash", hash) - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() pendingNofundsCounter.Inc(1) } @@ -1324,3 +1321,68 @@ func (as *accountSet) containsTx(tx *types.Transaction) bool { func (as *accountSet) add(addr common.Address) { as.accounts[addr] = struct{}{} } + +// txLookup is used internally by TxPool to track transactions while allowing lookup without +// mutex contention. +// +// Note, although this type is properly protected against concurrent access, it +// is **not** a type that should ever be mutated or even exposed outside of the +// transaction pool, since its internal state is tightly coupled with the pools +// internal mechanisms. The sole purpose of the type is to permit out-of-bound +// peeking into the pool in TxPool.Get without having to acquire the widely scoped +// TxPool.mu mutex. +type txLookup struct { + all map[common.Hash]*types.Transaction + lock sync.RWMutex +} + +// newTxLookup returns a new txLookup structure. +func newTxLookup() *txLookup { + return &txLookup{ + all: make(map[common.Hash]*types.Transaction), + } +} + +// Range calls f on each key and value present in the map. +func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) { + t.lock.RLock() + defer t.lock.RUnlock() + + for key, value := range t.all { + if !f(key, value) { + break + } + } +} + +// Get returns a transaction if it exists in the lookup, or nil if not found. +func (t *txLookup) Get(hash common.Hash) *types.Transaction { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.all[hash] +} + +// Count returns the current number of items in the lookup. +func (t *txLookup) Count() int { + t.lock.RLock() + defer t.lock.RUnlock() + + return len(t.all) +} + +// Add adds a transaction to the lookup. +func (t *txLookup) Add(tx *types.Transaction) { + t.lock.Lock() + defer t.lock.Unlock() + + t.all[tx.Hash()] = tx +} + +// Remove removes a transaction from the lookup. +func (t *txLookup) Remove(hash common.Hash) { + t.lock.Lock() + defer t.lock.Unlock() + + delete(t.all, hash) +} diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index 7fff282cf608..28b1c7c83af2 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -112,7 +112,7 @@ func validateTxPoolInternals(pool *TxPool) error { // Ensure the total transaction set is consistent with pending + queued pending, queued := pool.stats() - if total := len(pool.all); total != pending+queued { + if total := pool.all.Count(); total != pending+queued { return fmt.Errorf("total transaction count %d != %d pending + %d queued", total, pending, queued) } if priced := pool.priced.items.Len() - pool.priced.stales; priced != pending+queued { @@ -423,8 +423,8 @@ func TestTransactionDoubleNonce(t *testing.T) { t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash()) } // Ensure the total transaction count is correct - if len(pool.all) != 1 { - t.Error("expected 1 total transactions, got", len(pool.all)) + if pool.all.Count() != 1 { + t.Error("expected 1 total transactions, got", pool.all.Count()) } } @@ -446,8 +446,8 @@ func TestTransactionMissingNonce(t *testing.T) { if pool.queue[addr].Len() != 1 { t.Error("expected 1 queued transaction, got", pool.queue[addr].Len()) } - if len(pool.all) != 1 { - t.Error("expected 1 total transactions, got", len(pool.all)) + if pool.all.Count() != 1 { + t.Error("expected 1 total transactions, got", pool.all.Count()) } } @@ -510,8 +510,8 @@ func TestTransactionDropping(t *testing.T) { if pool.queue[account].Len() != 3 { t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) } - if len(pool.all) != 6 { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 6) + if pool.all.Count() != 6 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) } pool.lockedReset(nil, nil) if pool.pending[account].Len() != 3 { @@ -520,8 +520,8 @@ func TestTransactionDropping(t *testing.T) { if pool.queue[account].Len() != 3 { t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) } - if len(pool.all) != 6 { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 6) + if pool.all.Count() != 6 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) } // Reduce the balance of the account, and check that invalidated transactions are dropped pool.currentState.AddBalance(account, big.NewInt(-650)) @@ -545,8 +545,8 @@ func TestTransactionDropping(t *testing.T) { if _, ok := pool.queue[account].txs.items[tx12.Nonce()]; ok { t.Errorf("out-of-fund queued transaction present: %v", tx11) } - if len(pool.all) != 4 { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 4) + if pool.all.Count() != 4 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4) } // Reduce the block gas limit, check that invalidated transactions are dropped pool.chain.(*testBlockChain).gasLimit = 100 @@ -564,8 +564,8 @@ func TestTransactionDropping(t *testing.T) { if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; ok { t.Errorf("over-gased queued transaction present: %v", tx11) } - if len(pool.all) != 2 { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 2) + if pool.all.Count() != 2 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 2) } } @@ -619,8 +619,8 @@ func TestTransactionPostponing(t *testing.T) { if len(pool.queue) != 0 { t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) } - if len(pool.all) != len(txs) { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txs)) + if pool.all.Count() != len(txs) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) } pool.lockedReset(nil, nil) if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { @@ -629,8 +629,8 @@ func TestTransactionPostponing(t *testing.T) { if len(pool.queue) != 0 { t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) } - if len(pool.all) != len(txs) { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txs)) + if pool.all.Count() != len(txs) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) } // Reduce the balance of the account, and check that transactions are reorganised for _, addr := range accs { @@ -679,8 +679,8 @@ func TestTransactionPostponing(t *testing.T) { } } } - if len(pool.all) != len(txs)/2 { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txs)/2) + if pool.all.Count() != len(txs)/2 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)/2) } } @@ -771,8 +771,8 @@ func TestTransactionQueueAccountLimiting(t *testing.T) { } } } - if len(pool.all) != int(testTxPoolConfig.AccountQueue) { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), testTxPoolConfig.AccountQueue) + if pool.all.Count() != int(testTxPoolConfig.AccountQueue) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), testTxPoolConfig.AccountQueue) } } @@ -971,8 +971,8 @@ func TestTransactionPendingLimiting(t *testing.T) { t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0) } } - if len(pool.all) != int(testTxPoolConfig.AccountQueue) { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), testTxPoolConfig.AccountQueue+5) + if pool.all.Count() != int(testTxPoolConfig.AccountQueue) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), testTxPoolConfig.AccountQueue+5) } if err := validateEvents(events, int(testTxPoolConfig.AccountQueue)); err != nil { t.Fatalf("event firing failed: %v", err) @@ -1024,8 +1024,8 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) { if len(pool1.queue) != len(pool2.queue) { t.Errorf("queued transaction count mismatch: one-by-one algo: %d, batch algo: %d", len(pool1.queue), len(pool2.queue)) } - if len(pool1.all) != len(pool2.all) { - t.Errorf("total transaction count mismatch: one-by-one algo %d, batch algo %d", len(pool1.all), len(pool2.all)) + if pool1.all.Count() != pool2.all.Count() { + t.Errorf("total transaction count mismatch: one-by-one algo %d, batch algo %d", pool1.all.Count(), pool2.all.Count()) } if err := validateTxPoolInternals(pool1); err != nil { t.Errorf("pool 1 internal state corrupted: %v", err) From fb89a5406b5d923d2bc00b72e34dc2c1ea22ac2c Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Tue, 7 May 2024 23:17:55 +0800 Subject: [PATCH 04/23] core: fix transaction event asynchronicity (#16843) --- core/tx_pool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index 20f9ad8bcb05..c8c1f0bb1025 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -1110,7 +1110,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { } // Notify subsystem for new promoted transactions. if len(promoted) > 0 { - pool.txFeed.Send(NewTxsEvent{promoted}) + go pool.txFeed.Send(NewTxsEvent{promoted}) } // If the pending limit is overflown, start equalizing allowances pending := uint64(0) From 4f0f6e08c708e8a0c3931a7fba25a8c01f1bb2b8 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Wed, 8 May 2024 00:00:43 +0800 Subject: [PATCH 05/23] core: concurrent background transaction sender ecrecover (#16882) --- core/blockchain.go | 7 +++ core/tx_cacher.go | 105 +++++++++++++++++++++++++++++++++++++++++++++ core/tx_pool.go | 1 + 3 files changed, 113 insertions(+) create mode 100644 core/tx_cacher.go diff --git a/core/blockchain.go b/core/blockchain.go index a73bcd0c43f7..6b55beb878b5 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1450,6 +1450,10 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { // only reason this method exists as a separate one is to make locking cleaner // with deferred statements. func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) { + // Sanity check that we have something meaningful to import + if len(chain) == 0 { + return 0, nil, nil, nil + } engine, _ := bc.Engine().(*XDPoS.XDPoS) // Do a sanity check that the provided chain is actually ordered and linked @@ -1491,6 +1495,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, [] abort, results := bc.engine.VerifyHeaders(bc, headers, seals) defer close(abort) + // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) + senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) + // Iterate over the blocks and insert when the verifier permits for i, block := range chain { // If the chain is terminating, stop processing blocks diff --git a/core/tx_cacher.go b/core/tx_cacher.go new file mode 100644 index 000000000000..ea4ab6cc07f6 --- /dev/null +++ b/core/tx_cacher.go @@ -0,0 +1,105 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "runtime" + + "github.com/XinFinOrg/XDPoSChain/core/types" +) + +// senderCacher is a concurrent tranaction sender recoverer anc cacher. +var senderCacher = newTxSenderCacher(runtime.NumCPU()) + +// txSenderCacherRequest is a request for recovering transaction senders with a +// specific signature scheme and caching it into the transactions themselves. +// +// The inc field defines the number of transactions to skip after each recovery, +// which is used to feed the same underlying input array to different threads but +// ensure they process the early transactions fast. +type txSenderCacherRequest struct { + signer types.Signer + txs []*types.Transaction + inc int +} + +// txSenderCacher is a helper structure to concurrently ecrecover transaction +// senders from digital signatures on background threads. +type txSenderCacher struct { + threads int + tasks chan *txSenderCacherRequest +} + +// newTxSenderCacher creates a new transaction sender background cacher and starts +// as many procesing goroutines as allowed by the GOMAXPROCS on construction. +func newTxSenderCacher(threads int) *txSenderCacher { + cacher := &txSenderCacher{ + tasks: make(chan *txSenderCacherRequest, threads), + threads: threads, + } + for i := 0; i < threads; i++ { + go cacher.cache() + } + return cacher +} + +// cache is an infinite loop, caching transaction senders from various forms of +// data structures. +func (cacher *txSenderCacher) cache() { + for task := range cacher.tasks { + for i := 0; i < len(task.txs); i += task.inc { + types.Sender(task.signer, task.txs[i]) + } + } +} + +// recover recovers the senders from a batch of transactions and caches them +// back into the same data structures. There is no validation being done, nor +// any reaction to invalid signatures. That is up to calling code later. +func (cacher *txSenderCacher) recover(signer types.Signer, txs []*types.Transaction) { + // If there's nothing to recover, abort + if len(txs) == 0 { + return + } + // Ensure we have meaningful task sizes and schedule the recoveries + tasks := cacher.threads + if len(txs) < tasks*4 { + tasks = (len(txs) + 3) / 4 + } + for i := 0; i < tasks; i++ { + cacher.tasks <- &txSenderCacherRequest{ + signer: signer, + txs: txs[i:], + inc: tasks, + } + } +} + +// recoverFromBlocks recovers the senders from a batch of blocks and caches them +// back into the same data structures. There is no validation being done, nor +// any reaction to invalid signatures. That is up to calling code later. +func (cacher *txSenderCacher) recoverFromBlocks(signer types.Signer, blocks []*types.Block) { + count := 0 + for _, block := range blocks { + count += len(block.Transactions()) + } + txs := make([]*types.Transaction, 0, count) + for _, block := range blocks { + txs = append(txs, block.Transactions()...) + } + cacher.recover(signer, txs) +} diff --git a/core/tx_pool.go b/core/tx_pool.go index c8c1f0bb1025..180d245f9492 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -436,6 +436,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { // Inject any transactions discarded due to reorgs log.Debug("Reinjecting stale transactions", "count", len(reinject)) + senderCacher.recover(pool.signer, reinject) pool.addTxsLocked(reinject, false) // validate the pool of pending transactions, this will remove From 859308c03fef52b927e0ac52a868e7c5f8fb7535 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Wed, 8 May 2024 12:53:19 +0800 Subject: [PATCH 06/23] core: change comment to match code more closely (#16963) --- core/tx_pool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index 180d245f9492..d7c4a7ffd4ed 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -1259,7 +1259,7 @@ func (pool *TxPool) demoteUnexecutables() { log.Trace("Demoting pending transaction", "hash", hash) pool.enqueueTx(hash, tx) } - // If there's a gap in front, warn (should never happen) and postpone all transactions + // If there's a gap in front, alert (should never happen) and postpone all transactions if list.Len() > 0 && list.txs.Get(nonce) == nil { for _, tx := range list.Cap(0) { hash := tx.Hash() From 5b883dee302f65dd812f1bf52d78d517592471a9 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Wed, 8 May 2024 13:50:49 +0800 Subject: [PATCH 07/23] core/tx_pool: reduce judgement levels (#16980) --- core/tx_pool.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index d7c4a7ffd4ed..92b1b3cce61d 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -954,11 +954,9 @@ func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error { for i, tx := range txs { var replace bool - if replace, errs[i] = pool.add(tx, local); errs[i] == nil { - if !replace { - from, _ := types.Sender(pool.signer, tx) // already validated - dirty[from] = struct{}{} - } + if replace, errs[i] = pool.add(tx, local); errs[i] == nil && !replace { + from, _ := types.Sender(pool.signer, tx) // already validated + dirty[from] = struct{}{} } } // Only reprocess the internal state if something was actually added From 241201cf0eebe1abfd00d25ecfd432d8a693fae6 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Wed, 8 May 2024 13:59:51 +0800 Subject: [PATCH 08/23] core: fixed typo (#17214) --- core/tx_pool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index 92b1b3cce61d..46313dbb0ab1 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -151,7 +151,7 @@ type TxPoolConfig struct { PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) - AccountSlots uint64 // Minimum number of executable transaction slots guaranteed per account + AccountSlots uint64 // Number of executable transaction slots guaranteed per account GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts From 41b29a8cf84497fdea1fde9b2bde116f91801b8d Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Wed, 8 May 2024 14:10:08 +0800 Subject: [PATCH 09/23] Fixed typo addresssByHeartbeat -> addressesByHeartbeat (#17243) --- core/lending_pool.go | 2 +- core/order_pool.go | 2 +- core/tx_pool.go | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/core/lending_pool.go b/core/lending_pool.go index fdeeb9066cda..64c34ab63e69 100644 --- a/core/lending_pool.go +++ b/core/lending_pool.go @@ -1066,7 +1066,7 @@ func (pool *LendingPool) promoteExecutables(accounts []common.Address) { } if queued > pool.config.GlobalQueue { // Sort all accounts with queued transactions by heartbeat - addresses := make(addresssByHeartbeat, 0, len(pool.queue)) + addresses := make(addressesByHeartbeat, 0, len(pool.queue)) for addr := range pool.queue { if !pool.locals.contains(addr) { // don't drop locals addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) diff --git a/core/order_pool.go b/core/order_pool.go index a4b8b3827a54..6f65bc3fb1a8 100644 --- a/core/order_pool.go +++ b/core/order_pool.go @@ -982,7 +982,7 @@ func (pool *OrderPool) promoteExecutables(accounts []common.Address) { } if queued > pool.config.GlobalQueue { // Sort all accounts with queued transactions by heartbeat - addresses := make(addresssByHeartbeat, 0, len(pool.queue)) + addresses := make(addressesByHeartbeat, 0, len(pool.queue)) for addr := range pool.queue { if !pool.locals.contains(addr) { // don't drop locals addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) diff --git a/core/tx_pool.go b/core/tx_pool.go index 46313dbb0ab1..99df0373b78d 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -1189,7 +1189,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { } if queued > pool.config.GlobalQueue { // Sort all accounts with queued transactions by heartbeat - addresses := make(addresssByHeartbeat, 0, len(pool.queue)) + addresses := make(addressesByHeartbeat, 0, len(pool.queue)) for addr := range pool.queue { if !pool.locals.contains(addr) { // don't drop locals addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) @@ -1279,11 +1279,11 @@ type addressByHeartbeat struct { heartbeat time.Time } -type addresssByHeartbeat []addressByHeartbeat +type addressesByHeartbeat []addressByHeartbeat -func (a addresssByHeartbeat) Len() int { return len(a) } -func (a addresssByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } -func (a addresssByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a addressesByHeartbeat) Len() int { return len(a) } +func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } +func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // accountSet is simply a set of addresses to check for existence, and a signer // capable of deriving addresses from transactions. From 6c657ef6af8d2c274e106e08db56abc30793c494 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Wed, 8 May 2024 19:27:41 +0800 Subject: [PATCH 10/23] core: priority mining (#17472) --- core/tx_pool.go | 39 +++++++++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index 99df0373b78d..7a1f6f0a83d1 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -144,9 +144,10 @@ type blockChain interface { // TxPoolConfig are the configuration parameters of the transaction pool. type TxPoolConfig struct { - NoLocals bool // Whether local transaction handling should be disabled - Journal string // Journal of local transactions to survive node restarts - Rejournal time.Duration // Time interval to regenerate the local transaction journal + Locals []common.Address // Addresses that should be treated by default as local + NoLocals bool // Whether local transaction handling should be disabled + Journal string // Journal of local transactions to survive node restarts + Rejournal time.Duration // Time interval to regenerate the local transaction journal PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) @@ -255,6 +256,10 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block trc21FeeCapacity: map[common.Address]*big.Int{}, } pool.locals = newAccountSet(pool.signer) + for _, addr := range config.Locals { + log.Info("Setting new local account", "address", addr) + pool.locals.add(addr) + } pool.priced = newTxPricedList(pool.all) pool.reset(nil, chain.CurrentBlock().Header()) @@ -559,6 +564,14 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) { return pending, nil } +// Locals retrieves the accounts currently considered local by the pool. +func (pool *TxPool) Locals() []common.Address { + pool.mu.Lock() + defer pool.mu.Unlock() + + return pool.locals.flatten() +} + // local retrieves all currently known local transactions, groupped by origin // account and sorted by nonce. The returned transaction set is a copy and can be // freely modified by calling code. @@ -768,7 +781,10 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { } // Mark local addresses and journal local transactions if local { - pool.locals.add(from) + if !pool.locals.contains(from) { + log.Info("Setting new local account", "address", from) + pool.locals.add(from) + } } pool.journalTx(from, tx) @@ -1290,6 +1306,7 @@ func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } type accountSet struct { accounts map[common.Address]struct{} signer types.Signer + cache *[]common.Address } // newAccountSet creates a new address set with an associated signer for sender @@ -1319,6 +1336,20 @@ func (as *accountSet) containsTx(tx *types.Transaction) bool { // add inserts a new address into the set to track. func (as *accountSet) add(addr common.Address) { as.accounts[addr] = struct{}{} + as.cache = nil +} + +// flatten returns the list of addresses within this set, also caching it for later +// reuse. The returned slice should not be changed! +func (as *accountSet) flatten() []common.Address { + if as.cache == nil { + accounts := make([]common.Address, 0, len(as.accounts)) + for account := range as.accounts { + accounts = append(accounts, account) + } + as.cache = &accounts + } + return *as.cache } // txLookup is used internally by TxPool to track transactions while allowing lookup without From ec50ca36d9b83d7d9fb92c351c8c7c46fefc4f2a Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Thu, 9 May 2024 11:46:19 +0800 Subject: [PATCH 11/23] core, eth, trie: use common/prque (#17508) --- XDCx/XDCx.go | 11 +++++------ XDCxlending/XDCxlending.go | 11 +++++------ consensus/XDPoS/utils/types.go | 2 +- core/blockchain.go | 14 ++++++-------- core/lending_pool.go | 12 +++++------- core/order_pool.go | 9 ++++----- core/tx_pool.go | 9 ++++----- eth/downloader/queue.go | 34 +++++++++++++++++----------------- eth/fetcher/fetcher.go | 8 ++++---- go.mod | 1 - go.sum | 2 -- 11 files changed, 51 insertions(+), 62 deletions(-) diff --git a/XDCx/XDCx.go b/XDCx/XDCx.go index bae3d00374c0..6506929984d7 100644 --- a/XDCx/XDCx.go +++ b/XDCx/XDCx.go @@ -9,14 +9,13 @@ import ( "github.com/XinFinOrg/XDPoSChain/XDCx/tradingstate" "github.com/XinFinOrg/XDPoSChain/XDCxDAO" - "github.com/XinFinOrg/XDPoSChain/consensus" - "github.com/XinFinOrg/XDPoSChain/core/types" - "github.com/XinFinOrg/XDPoSChain/p2p" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" - "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/prque" + "github.com/XinFinOrg/XDPoSChain/consensus" "github.com/XinFinOrg/XDPoSChain/core/state" + "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/log" + "github.com/XinFinOrg/XDPoSChain/p2p" "github.com/XinFinOrg/XDPoSChain/rpc" lru "github.com/hashicorp/golang-lru" "golang.org/x/sync/syncmap" @@ -105,7 +104,7 @@ func New(cfg *Config) *XDCX { } XDCX := &XDCX{ orderNonce: make(map[common.Address]*big.Int), - Triegc: prque.New(), + Triegc: prque.New(nil), tokenDecimalCache: tokenDecimalCache, orderCache: orderCache, } diff --git a/XDCxlending/XDCxlending.go b/XDCxlending/XDCxlending.go index 6818b375a20d..352c224d0a20 100644 --- a/XDCxlending/XDCxlending.go +++ b/XDCxlending/XDCxlending.go @@ -12,14 +12,13 @@ import ( "github.com/XinFinOrg/XDPoSChain/XDCx/tradingstate" "github.com/XinFinOrg/XDPoSChain/XDCxDAO" "github.com/XinFinOrg/XDPoSChain/XDCxlending/lendingstate" - "github.com/XinFinOrg/XDPoSChain/consensus" - "github.com/XinFinOrg/XDPoSChain/core/types" - "github.com/XinFinOrg/XDPoSChain/p2p" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" - "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/prque" + "github.com/XinFinOrg/XDPoSChain/consensus" "github.com/XinFinOrg/XDPoSChain/core/state" + "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/log" + "github.com/XinFinOrg/XDPoSChain/p2p" "github.com/XinFinOrg/XDPoSChain/rpc" lru "github.com/hashicorp/golang-lru" ) @@ -67,7 +66,7 @@ func New(XDCx *XDCx.XDCX) *Lending { lendingTradeCache, _ := lru.New(defaultCacheLimit) lending := &Lending{ orderNonce: make(map[common.Address]*big.Int), - Triegc: prque.New(), + Triegc: prque.New(nil), lendingItemHistory: itemCache, lendingTradeHistory: lendingTradeCache, } diff --git a/consensus/XDPoS/utils/types.go b/consensus/XDPoS/utils/types.go index 4073fb522bd9..897e984b4811 100644 --- a/consensus/XDPoS/utils/types.go +++ b/consensus/XDPoS/utils/types.go @@ -7,11 +7,11 @@ import ( "github.com/XinFinOrg/XDPoSChain/XDCx/tradingstate" "github.com/XinFinOrg/XDPoSChain/XDCxlending/lendingstate" "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/prque" "github.com/XinFinOrg/XDPoSChain/consensus" "github.com/XinFinOrg/XDPoSChain/consensus/clique" "github.com/XinFinOrg/XDPoSChain/core/state" "github.com/XinFinOrg/XDPoSChain/core/types" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) type Masternode struct { diff --git a/core/blockchain.go b/core/blockchain.go index 6b55beb878b5..090a52b4bff7 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -28,13 +28,12 @@ import ( "sync/atomic" "time" - "github.com/XinFinOrg/XDPoSChain/XDCxlending/lendingstate" - "github.com/XinFinOrg/XDPoSChain/XDCx/tradingstate" + "github.com/XinFinOrg/XDPoSChain/XDCxlending/lendingstate" "github.com/XinFinOrg/XDPoSChain/accounts/abi/bind" - "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/common/mclock" + "github.com/XinFinOrg/XDPoSChain/common/prque" "github.com/XinFinOrg/XDPoSChain/common/sort" "github.com/XinFinOrg/XDPoSChain/consensus" "github.com/XinFinOrg/XDPoSChain/consensus/XDPoS" @@ -53,7 +52,6 @@ import ( "github.com/XinFinOrg/XDPoSChain/rlp" "github.com/XinFinOrg/XDPoSChain/trie" lru "github.com/hashicorp/golang-lru" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) var ( @@ -201,7 +199,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par chainConfig: chainConfig, cacheConfig: cacheConfig, db: db, - triegc: prque.New(), + triegc: prque.New(nil), stateCache: state.NewDatabase(db), quit: make(chan struct{}), bodyCache: bodyCache, @@ -1268,18 +1266,18 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. } else { // Full but not archive node, do proper garbage collection triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive - bc.triegc.Push(root, -float32(block.NumberU64())) + bc.triegc.Push(root, -int64(block.NumberU64())) if tradingTrieDb != nil { tradingTrieDb.Reference(tradingRoot, common.Hash{}) } if tradingService != nil { - tradingService.GetTriegc().Push(tradingRoot, -float32(block.NumberU64())) + tradingService.GetTriegc().Push(tradingRoot, -int64(block.NumberU64())) } if lendingTrieDb != nil { lendingTrieDb.Reference(lendingRoot, common.Hash{}) } if lendingService != nil { - lendingService.GetTriegc().Push(lendingRoot, -float32(block.NumberU64())) + lendingService.GetTriegc().Push(lendingRoot, -int64(block.NumberU64())) } if current := block.NumberU64(); current > triesInMemory { // Find the next state trie we need to commit diff --git a/core/lending_pool.go b/core/lending_pool.go index 64c34ab63e69..69b89d83a937 100644 --- a/core/lending_pool.go +++ b/core/lending_pool.go @@ -24,18 +24,16 @@ import ( "sync" "time" - "github.com/XinFinOrg/XDPoSChain/consensus/XDPoS" - "github.com/XinFinOrg/XDPoSChain/XDCxlending/lendingstate" - "github.com/XinFinOrg/XDPoSChain/consensus" - "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/prque" + "github.com/XinFinOrg/XDPoSChain/consensus" + "github.com/XinFinOrg/XDPoSChain/consensus/XDPoS" "github.com/XinFinOrg/XDPoSChain/core/state" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/event" "github.com/XinFinOrg/XDPoSChain/log" "github.com/XinFinOrg/XDPoSChain/params" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) var ( @@ -998,11 +996,11 @@ func (pool *LendingPool) promoteExecutables(accounts []common.Address) { if pending > pool.config.GlobalSlots { pendingBeforeCap := pending // Assemble a spam order to penalize large transactors first - spammers := prque.New() + spammers := prque.New(nil) for addr, list := range pool.pending { // Only evict transactions from high rollers if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { - spammers.Push(addr, float32(list.Len())) + spammers.Push(addr, int64(list.Len())) } } // Gradually drop transactions from offenders diff --git a/core/order_pool.go b/core/order_pool.go index 6f65bc3fb1a8..c72e3aeeb101 100644 --- a/core/order_pool.go +++ b/core/order_pool.go @@ -25,16 +25,15 @@ import ( "time" "github.com/XinFinOrg/XDPoSChain/XDCx/tradingstate" + "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/prque" "github.com/XinFinOrg/XDPoSChain/consensus" "github.com/XinFinOrg/XDPoSChain/consensus/XDPoS" - - "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core/state" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/event" "github.com/XinFinOrg/XDPoSChain/log" "github.com/XinFinOrg/XDPoSChain/params" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) var ( @@ -914,11 +913,11 @@ func (pool *OrderPool) promoteExecutables(accounts []common.Address) { if pending > pool.config.GlobalSlots { pendingBeforeCap := pending // Assemble a spam order to penalize large transactors first - spammers := prque.New() + spammers := prque.New(nil) for addr, list := range pool.pending { // Only evict transactions from high rollers if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { - spammers.Push(addr, float32(list.Len())) + spammers.Push(addr, int64(list.Len())) } } // Gradually drop transactions from offenders diff --git a/core/tx_pool.go b/core/tx_pool.go index 7a1f6f0a83d1..badd3e72c18d 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -25,16 +25,15 @@ import ( "sync" "time" - "github.com/XinFinOrg/XDPoSChain/consensus" - "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/prque" + "github.com/XinFinOrg/XDPoSChain/consensus" "github.com/XinFinOrg/XDPoSChain/core/state" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/event" "github.com/XinFinOrg/XDPoSChain/log" "github.com/XinFinOrg/XDPoSChain/metrics" "github.com/XinFinOrg/XDPoSChain/params" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) const ( @@ -1135,11 +1134,11 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { if pending > pool.config.GlobalSlots { pendingBeforeCap := pending // Assemble a spam order to penalize large transactors first - spammers := prque.New() + spammers := prque.New(nil) for addr, list := range pool.pending { // Only evict transactions from high rollers if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { - spammers.Push(addr, float32(list.Len())) + spammers.Push(addr, int64(list.Len())) } } // Gradually drop transactions from offenders diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index a67f12c47765..b9f16e673f94 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -26,10 +26,10 @@ import ( "time" "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/prque" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/log" "github.com/XinFinOrg/XDPoSChain/metrics" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) var ( @@ -105,11 +105,11 @@ func newQueue() *queue { headerPendPool: make(map[string]*fetchRequest), headerContCh: make(chan bool), blockTaskPool: make(map[common.Hash]*types.Header), - blockTaskQueue: prque.New(), + blockTaskQueue: prque.New(nil), blockPendPool: make(map[string]*fetchRequest), blockDonePool: make(map[common.Hash]struct{}), receiptTaskPool: make(map[common.Hash]*types.Header), - receiptTaskQueue: prque.New(), + receiptTaskQueue: prque.New(nil), receiptPendPool: make(map[string]*fetchRequest), receiptDonePool: make(map[common.Hash]struct{}), resultCache: make([]*fetchResult, blockCacheItems), @@ -278,7 +278,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { } // Shedule all the header retrieval tasks for the skeleton assembly q.headerTaskPool = make(map[uint64]*types.Header) - q.headerTaskQueue = prque.New() + q.headerTaskQueue = prque.New(nil) q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) q.headerProced = 0 @@ -289,7 +289,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { index := from + uint64(i*MaxHeaderFetch) q.headerTaskPool[index] = header - q.headerTaskQueue.Push(index, -float32(index)) + q.headerTaskQueue.Push(index, -int64(index)) } } @@ -335,11 +335,11 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { } // Queue the header for content retrieval q.blockTaskPool[hash] = header - q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) + q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) if q.mode == FastSync { q.receiptTaskPool[hash] = header - q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) + q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) } inserts = append(inserts, header) q.headerHead = hash @@ -437,7 +437,7 @@ func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest { } // Merge all the skipped batches back for _, from := range skip { - q.headerTaskQueue.Push(from, -float32(from)) + q.headerTaskQueue.Push(from, -int64(from)) } // Assemble and return the block download request if send == 0 { @@ -544,7 +544,7 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common } // Merge all the skipped headers back for _, header := range skip { - taskQueue.Push(header, -float32(header.Number.Uint64())) + taskQueue.Push(header, -int64(header.Number.Uint64())) } if progress { // Wake WaitResults, resultCache was modified @@ -587,10 +587,10 @@ func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool m defer q.lock.Unlock() if request.From > 0 { - taskQueue.Push(request.From, -float32(request.From)) + taskQueue.Push(request.From, -int64(request.From)) } for _, header := range request.Headers { - taskQueue.Push(header, -float32(header.Number.Uint64())) + taskQueue.Push(header, -int64(header.Number.Uint64())) } delete(pendPool, request.Peer.id) } @@ -604,13 +604,13 @@ func (q *queue) Revoke(peerId string) { if request, ok := q.blockPendPool[peerId]; ok { for _, header := range request.Headers { - q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) + q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) } delete(q.blockPendPool, peerId) } if request, ok := q.receiptPendPool[peerId]; ok { for _, header := range request.Headers { - q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) + q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) } delete(q.receiptPendPool, peerId) } @@ -659,10 +659,10 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, // Return any non satisfied requests to the pool if request.From > 0 { - taskQueue.Push(request.From, -float32(request.From)) + taskQueue.Push(request.From, -int64(request.From)) } for _, header := range request.Headers { - taskQueue.Push(header, -float32(header.Number.Uint64())) + taskQueue.Push(header, -int64(header.Number.Uint64())) } // Add the peer to the expiry report along the the number of failed requests expiries[id] = len(request.Headers) @@ -733,7 +733,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh } miss[request.From] = struct{}{} - q.headerTaskQueue.Push(request.From, -float32(request.From)) + q.headerTaskQueue.Push(request.From, -int64(request.From)) return 0, errors.New("delivery not accepted") } // Clean up a successful fetch and try to deliver any sub-results @@ -856,7 +856,7 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQ // Return all failed or missing fetches to the queue for _, header := range request.Headers { if header != nil { - taskQueue.Push(header, -float32(header.Number.Uint64())) + taskQueue.Push(header, -int64(header.Number.Uint64())) } } // Wake up WaitResults diff --git a/eth/fetcher/fetcher.go b/eth/fetcher/fetcher.go index c31e05961d9f..7d1e15fd4dea 100644 --- a/eth/fetcher/fetcher.go +++ b/eth/fetcher/fetcher.go @@ -25,10 +25,10 @@ import ( lru "github.com/hashicorp/golang-lru" "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/prque" "github.com/XinFinOrg/XDPoSChain/consensus" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/log" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) const ( @@ -171,7 +171,7 @@ func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, handlePropose fetching: make(map[common.Hash]*announce), fetched: make(map[common.Hash][]*announce), completing: make(map[common.Hash]*announce), - queue: prque.New(), + queue: prque.New(nil), queues: make(map[string]int), queued: make(map[common.Hash]*inject), knowns: knownBlocks, @@ -312,7 +312,7 @@ func (f *Fetcher) loop() { // If too high up the chain or phase, continue later number := op.block.NumberU64() if number > height+1 { - f.queue.Push(op, -float32(op.block.NumberU64())) + f.queue.Push(op, -int64(op.block.NumberU64())) if f.queueChangeHook != nil { f.queueChangeHook(op.block.Hash(), true) } @@ -642,7 +642,7 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) { f.queues[peer] = count f.queued[hash] = op f.knowns.Add(hash, true) - f.queue.Push(op, -float32(block.NumberU64())) + f.queue.Push(op, -int64(block.NumberU64())) if f.queueChangeHook != nil { f.queueChangeHook(op.block.Hash(), true) } diff --git a/go.mod b/go.mod index 8d19bc4956fc..050c400ba875 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,6 @@ require ( golang.org/x/sys v0.14.0 golang.org/x/tools v0.14.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c - gopkg.in/karalabe/cookiejar.v2 v2.0.0-20150724131613-8dcd6a7f4951 gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772 gopkg.in/urfave/cli.v1 v1.20.0 diff --git a/go.sum b/go.sum index 19df00ede131..3d1931b62001 100644 --- a/go.sum +++ b/go.sum @@ -371,8 +371,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/karalabe/cookiejar.v2 v2.0.0-20150724131613-8dcd6a7f4951 h1:DMTcQRFbEH62YPRWwOI647s2e5mHda3oBPMHfrLs2bw= -gopkg.in/karalabe/cookiejar.v2 v2.0.0-20150724131613-8dcd6a7f4951/go.mod h1:owOxCRGGeAx1uugABik6K9oeNu1cgxP/R9ItzLDxNWA= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772 h1:hhsSf/5z74Ck/DJYc+R8zpq8KGm7uJvpdLRQED/IedA= From a6664651c728926b5b0e074cbb283853fe5e5e76 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Thu, 9 May 2024 12:16:52 +0800 Subject: [PATCH 12/23] core: fix a typo (#17733) --- core/tx_pool.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index badd3e72c18d..a851ff276ed0 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -549,7 +549,7 @@ func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common return pending, queued } -// Pending retrieves all currently processable transactions, groupped by origin +// Pending retrieves all currently processable transactions, grouped by origin // account and sorted by nonce. The returned transaction set is a copy and can be // freely modified by calling code. func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) { @@ -571,7 +571,7 @@ func (pool *TxPool) Locals() []common.Address { return pool.locals.flatten() } -// local retrieves all currently known local transactions, groupped by origin +// local retrieves all currently known local transactions, grouped by origin // account and sorted by nonce. The returned transaction set is a copy and can be // freely modified by calling code. func (pool *TxPool) local() map[common.Address]types.Transactions { From 6b87c07876f01f1321c9beb4d120794819076e3c Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Thu, 9 May 2024 12:37:49 +0800 Subject: [PATCH 13/23] core: fix comment typo (#18144) --- core/tx_pool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index a851ff276ed0..563dcf5d57be 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -963,7 +963,7 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) []error { // addTxsLocked attempts to queue a batch of transactions if they are valid, // whilst assuming the transaction pool lock is already held. func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error { - // Add the batch of transaction, tracking the accepted ones + // Add the batch of transactions, tracking the accepted ones dirty := make(map[common.Address]struct{}) errs := make([]error, len(txs)) From 676c4e8ec763bcf4e02ac382e263bc514d742009 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Thu, 9 May 2024 13:37:15 +0800 Subject: [PATCH 14/23] core: sanitize more TxPoolConfig fields (#17210) --- core/tx_pool.go | 20 ++++++++++++++++++++ core/tx_pool_test.go | 4 ++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index 563dcf5d57be..b0e43fd468b0 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -192,6 +192,26 @@ func (config *TxPoolConfig) sanitize() TxPoolConfig { log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) conf.PriceBump = DefaultTxPoolConfig.PriceBump } + if conf.AccountSlots < 1 { + log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) + conf.AccountSlots = DefaultTxPoolConfig.AccountSlots + } + if conf.GlobalSlots < 1 { + log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) + conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots + } + if conf.AccountQueue < 1 { + log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) + conf.AccountQueue = DefaultTxPoolConfig.AccountQueue + } + if conf.GlobalQueue < 1 { + log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) + conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue + } + if conf.Lifetime < 1 { + log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) + conf.Lifetime = DefaultTxPoolConfig.Lifetime + } return conf } diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index 28b1c7c83af2..f4f7758a6ec2 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -1129,9 +1129,8 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) { blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} config := testTxPoolConfig - config.AccountSlots = 10 - config.GlobalSlots = 0 config.AccountSlots = 5 + config.GlobalSlots = 1 pool := NewTxPool(config, params.TestChainConfig, blockchain) defer pool.Stop() @@ -1473,6 +1472,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) { config := testTxPoolConfig config.GlobalSlots = common.LimitThresholdNonceInQueue config.GlobalQueue = 0 + config.AccountSlots = config.GlobalSlots - 1 pool := NewTxPool(config, params.TestChainConfig, blockchain) defer pool.Stop() From c5b22fbc324b8465199a7db472b1e57feb362da6 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Thu, 9 May 2024 19:00:41 +0800 Subject: [PATCH 15/23] core: make txpool handle reorg due to setHead (#19308) --- core/tx_pool.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/core/tx_pool.go b/core/tx_pool.go index b0e43fd468b0..ac3074ce3476 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -415,6 +415,22 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) ) + if rem == nil { + // This can happen if a setHead is performed, where we simply discard the old + // head from the chain. + // If that is the case, we don't have the lost transactions any more, and + // there's nothing to add + if newNum < oldNum { + // If the reorg ended up on a lower number, it's indicative of setHead being the cause + log.Debug("Skipping transaction reset caused by setHead", + "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + } else { + // If we reorged to a same or higher number, then it's not a case of setHead + log.Warn("Transaction pool reset with missing oldhead", + "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + } + return + } for rem.NumberU64() > add.NumberU64() { discarded = append(discarded, rem.Transactions()...) if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { From 65baaaaf809df08080633349d1f097fa79bcd4c8 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Fri, 10 May 2024 10:46:08 +0800 Subject: [PATCH 16/23] core: cache tx signature before obtaining lock (#19351) --- core/tx_pool.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index ac3074ce3476..02677fbc2b8e 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -970,8 +970,9 @@ func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { // addTx enqueues a single transaction into the pool if it is valid. func (pool *TxPool) addTx(tx *types.Transaction, local bool) error { - tx.CacheHash() - types.CacheSigner(pool.signer, tx) + // Cache sender in transaction before obtaining lock (pool.signer is immutable) + types.Sender(pool.signer, tx) + pool.mu.Lock() defer pool.mu.Unlock() @@ -990,6 +991,10 @@ func (pool *TxPool) addTx(tx *types.Transaction, local bool) error { // addTxs attempts to queue a batch of transactions if they are valid. func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) []error { + // Cache senders in transactions before obtaining lock (pool.signer is immutable) + for _, tx := range txs { + types.Sender(pool.signer, tx) + } pool.mu.Lock() defer pool.mu.Unlock() From ddbf5d2782948f71c58f2b50ec20ee9c9a477ec5 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Fri, 10 May 2024 12:04:16 +0800 Subject: [PATCH 17/23] core: expose various counter metrics for grafana (#19692) --- core/lending_pool.go | 22 +++--- core/order_pool.go | 22 +++--- core/tx_list.go | 4 +- core/tx_pool.go | 156 +++++++++++++++++++++++++++++-------------- 4 files changed, 130 insertions(+), 74 deletions(-) diff --git a/core/lending_pool.go b/core/lending_pool.go index 69b89d83a937..fc0deb18215b 100644 --- a/core/lending_pool.go +++ b/core/lending_pool.go @@ -669,7 +669,7 @@ func (pool *LendingPool) add(tx *types.LendingTransaction, local bool) (bool, er // If the transaction fails basic validation, discard it if err := pool.validateTx(tx, local); err != nil { log.Debug("Discarding invalid lending transaction", "hash", hash, "userAddress", tx.UserAddress, "status", tx.Status, "err", err) - invalidTxCounter.Inc(1) + invalidTxMeter.Mark(1) return false, err } from, _ := types.LendingSender(pool.signer, tx) // already validated @@ -683,12 +683,12 @@ func (pool *LendingPool) add(tx *types.LendingTransaction, local bool) (bool, er if list := pool.pending[from]; list != nil && list.Overlaps(tx) { inserted, old := list.Add(tx) if !inserted { - pendingDiscardCounter.Inc(1) + pendingDiscardMeter.Mark(1) return false, ErrPendingNonceTooLow } if old != nil { delete(pool.all, old.Hash()) - pendingReplaceCounter.Inc(1) + pendingReplaceMeter.Mark(1) } pool.all[tx.Hash()] = tx pool.journalTx(from, tx) @@ -724,13 +724,13 @@ func (pool *LendingPool) enqueueTx(hash common.Hash, tx *types.LendingTransactio inserted, old := pool.queue[from].Add(tx) if !inserted { // An older transaction was better, discard this - queuedDiscardCounter.Inc(1) + pendingDiscardMeter.Mark(1) return false, ErrPendingNonceTooLow } // Discard any previous transaction and mark this if old != nil { delete(pool.all, old.Hash()) - queuedReplaceCounter.Inc(1) + queuedReplaceMeter.Mark(1) } pool.all[hash] = tx return old != nil, nil @@ -762,13 +762,13 @@ func (pool *LendingPool) promoteTx(addr common.Address, hash common.Hash, tx *ty if !inserted { // An older transaction was better, discard this delete(pool.all, hash) - pendingDiscardCounter.Inc(1) + pendingDiscardMeter.Mark(1) return } // Otherwise discard any previous transaction and mark this if old != nil { delete(pool.all, old.Hash()) - pendingReplaceCounter.Inc(1) + pendingReplaceMeter.Mark(1) } // Failsafe to work around direct pending inserts (tests) if pool.all[hash] == nil { @@ -979,7 +979,7 @@ func (pool *LendingPool) promoteExecutables(accounts []common.Address) { hash := tx.Hash() delete(pool.all, hash) - queuedRateLimitCounter.Inc(1) + queuedRateLimitMeter.Mark(1) log.Trace("Removed cap-exceeding queued transaction", "hash", hash) } } @@ -1055,7 +1055,7 @@ func (pool *LendingPool) promoteExecutables(accounts []common.Address) { } } } - pendingRateLimitCounter.Inc(int64(pendingBeforeCap - pending)) + pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) } // If we've queued more transactions than the hard limit, drop oldest ones queued := uint64(0) @@ -1085,7 +1085,7 @@ func (pool *LendingPool) promoteExecutables(accounts []common.Address) { pool.removeTx(tx.Hash()) } drop -= size - queuedRateLimitCounter.Inc(int64(size)) + queuedRateLimitMeter.Mark(int64(size)) continue } // Otherwise drop only last few transactions @@ -1093,7 +1093,7 @@ func (pool *LendingPool) promoteExecutables(accounts []common.Address) { for i := len(txs) - 1; i >= 0 && drop > 0; i-- { pool.removeTx(txs[i].Hash()) drop-- - queuedRateLimitCounter.Inc(1) + queuedRateLimitMeter.Mark(1) } } } diff --git a/core/order_pool.go b/core/order_pool.go index c72e3aeeb101..c8708149b89c 100644 --- a/core/order_pool.go +++ b/core/order_pool.go @@ -578,7 +578,7 @@ func (pool *OrderPool) add(tx *types.OrderTransaction, local bool) (bool, error) // If the transaction fails basic validation, discard it if err := pool.validateTx(tx, local); err != nil { log.Debug("Discarding invalid order transaction", "hash", hash, "userAddress", tx.UserAddress().Hex(), "status", tx.Status, "err", err) - invalidTxCounter.Inc(1) + invalidTxMeter.Mark(1) return false, err } from, _ := types.OrderSender(pool.signer, tx) // already validated @@ -592,12 +592,12 @@ func (pool *OrderPool) add(tx *types.OrderTransaction, local bool) (bool, error) if list := pool.pending[from]; list != nil && list.Overlaps(tx) { inserted, old := list.Add(tx) if !inserted { - pendingDiscardCounter.Inc(1) + pendingDiscardMeter.Mark(1) return false, ErrPendingNonceTooLow } if old != nil { delete(pool.all, old.Hash()) - pendingReplaceCounter.Inc(1) + pendingReplaceMeter.Mark(1) } pool.all[tx.Hash()] = tx pool.journalTx(from, tx) @@ -635,13 +635,13 @@ func (pool *OrderPool) enqueueTx(hash common.Hash, tx *types.OrderTransaction) ( inserted, old := pool.queue[from].Add(tx) if !inserted { // An older transaction was better, discard this - queuedDiscardCounter.Inc(1) + queuedDiscardMeter.Mark(1) return false, ErrPendingNonceTooLow } // Discard any previous transaction and mark this if old != nil { delete(pool.all, old.Hash()) - queuedReplaceCounter.Inc(1) + queuedReplaceMeter.Mark(1) } pool.all[hash] = tx return old != nil, nil @@ -674,13 +674,13 @@ func (pool *OrderPool) promoteTx(addr common.Address, hash common.Hash, tx *type if !inserted { // An older transaction was better, discard this delete(pool.all, hash) - pendingDiscardCounter.Inc(1) + pendingDiscardMeter.Mark(1) return } // Otherwise discard any previous transaction and mark this if old != nil { delete(pool.all, old.Hash()) - pendingReplaceCounter.Inc(1) + pendingReplaceMeter.Mark(1) } // Failsafe to work around direct pending inserts (tests) if pool.all[hash] == nil { @@ -895,7 +895,7 @@ func (pool *OrderPool) promoteExecutables(accounts []common.Address) { hash := tx.Hash() delete(pool.all, hash) - queuedRateLimitCounter.Inc(1) + queuedRateLimitMeter.Mark(1) log.Debug("Removed cap-exceeding queued transaction", "addr", tx.UserAddress().Hex(), "nonce", tx.Nonce(), "ohash", tx.OrderHash().Hex(), "status", tx.Status(), "orderid", tx.OrderID()) } } @@ -972,7 +972,7 @@ func (pool *OrderPool) promoteExecutables(accounts []common.Address) { } } } - pendingRateLimitCounter.Inc(int64(pendingBeforeCap - pending)) + pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) } // If we've queued more transactions than the hard limit, drop oldest ones queued := uint64(0) @@ -1002,7 +1002,7 @@ func (pool *OrderPool) promoteExecutables(accounts []common.Address) { pool.removeTx(tx.Hash()) } drop -= size - queuedRateLimitCounter.Inc(int64(size)) + queuedRateLimitMeter.Mark(int64(size)) continue } // Otherwise drop only last few transactions @@ -1010,7 +1010,7 @@ func (pool *OrderPool) promoteExecutables(accounts []common.Address) { for i := len(txs) - 1; i >= 0 && drop > 0; i-- { pool.removeTx(txs[i].Hash()) drop-- - queuedRateLimitCounter.Inc(1) + queuedRateLimitMeter.Mark(1) } } } diff --git a/core/tx_list.go b/core/tx_list.go index b240fc2b972d..2c3c33eb59c0 100644 --- a/core/tx_list.go +++ b/core/tx_list.go @@ -429,9 +429,9 @@ func (l *txPricedList) Put(tx *types.Transaction) { // Removed notifies the prices transaction list that an old transaction dropped // from the pool. The list will just keep a counter of stale objects and update // the heap if a large enough ratio of transactions go stale. -func (l *txPricedList) Removed() { +func (l *txPricedList) Removed(count int) { // Bump the stale counter, but exit if still too low (< 25%) - l.stales++ + l.stales += count if l.stales <= len(*l.items)/4 { return } diff --git a/core/tx_pool.go b/core/tx_pool.go index 02677fbc2b8e..016b2cda450e 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -94,20 +94,25 @@ var ( var ( // Metrics for the pending pool - pendingDiscardCounter = metrics.NewRegisteredCounter("txpool/pending/discard", nil) - pendingReplaceCounter = metrics.NewRegisteredCounter("txpool/pending/replace", nil) - pendingRateLimitCounter = metrics.NewRegisteredCounter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting - pendingNofundsCounter = metrics.NewRegisteredCounter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds + pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) + pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) + pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting + pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds // Metrics for the queued pool - queuedDiscardCounter = metrics.NewRegisteredCounter("txpool/queued/discard", nil) - queuedReplaceCounter = metrics.NewRegisteredCounter("txpool/queued/replace", nil) - queuedRateLimitCounter = metrics.NewRegisteredCounter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting - queuedNofundsCounter = metrics.NewRegisteredCounter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds + queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) + queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) + queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting + queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds // General tx metrics - invalidTxCounter = metrics.NewRegisteredCounter("txpool/invalid", nil) - underpricedTxCounter = metrics.NewRegisteredCounter("txpool/underpriced", nil) + validMeter = metrics.NewRegisteredMeter("txpool/valid", nil) + invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) + underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) + + pendingCounter = metrics.NewRegisteredCounter("txpool/pending", nil) + queuedCounter = metrics.NewRegisteredCounter("txpool/queued", nil) + localCounter = metrics.NewRegisteredCounter("txpool/local", nil) ) // TxStatus is the current status of a transaction as seen by the pool. @@ -760,7 +765,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { // If the transaction fails basic validation, discard it if err := pool.validateTx(tx, local); err != nil { log.Trace("Discarding invalid transaction", "hash", hash, "err", err) - invalidTxCounter.Inc(1) + invalidTxMeter.Mark(1) return false, err } from, _ := types.Sender(pool.signer, tx) // already validated @@ -773,14 +778,14 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { // If the new transaction is underpriced, don't accept it if !local && pool.priced.Underpriced(tx, pool.locals) { log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) - underpricedTxCounter.Inc(1) + underpricedTxMeter.Mark(1) return false, ErrUnderpriced } // New transaction is better than our worse ones, make room for it drop := pool.priced.Discard(pool.all.Count()-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals) for _, tx := range drop { log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) - underpricedTxCounter.Inc(1) + underpricedTxMeter.Mark(1) pool.removeTx(tx.Hash(), false) } } @@ -789,14 +794,14 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { // Nonce already pending, check if required price bump is met inserted, old := list.Add(tx, pool.config.PriceBump) if !inserted { - pendingDiscardCounter.Inc(1) + pendingDiscardMeter.Mark(1) return false, ErrReplaceUnderpriced } // New transaction is better, replace old one if old != nil { pool.all.Remove(old.Hash()) - pool.priced.Removed() - pendingReplaceCounter.Inc(1) + pool.priced.Removed(1) + pendingReplaceMeter.Mark(1) } pool.all.Add(tx) pool.priced.Put(tx) @@ -821,6 +826,9 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { pool.locals.add(from) } } + if local || pool.locals.contains(from) { + localCounter.Inc(1) + } pool.journalTx(from, tx) log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) @@ -839,14 +847,17 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) if !inserted { // An older transaction was better, discard this - queuedDiscardCounter.Inc(1) + queuedDiscardMeter.Mark(1) return false, ErrReplaceUnderpriced } // Discard any previous transaction and mark this if old != nil { pool.all.Remove(old.Hash()) - pool.priced.Removed() - queuedReplaceCounter.Inc(1) + pool.priced.Removed(1) + queuedReplaceMeter.Mark(1) + } else { + // Nothing was replaced, bump the queued counter + queuedCounter.Inc(1) } if pool.all.Get(hash) == nil { pool.all.Add(tx) @@ -882,17 +893,20 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T if !inserted { // An older transaction was better, discard this pool.all.Remove(hash) - pool.priced.Removed() + pool.priced.Removed(1) - pendingDiscardCounter.Inc(1) + pendingDiscardMeter.Mark(1) return false } // Otherwise discard any previous transaction and mark this if old != nil { pool.all.Remove(old.Hash()) - pool.priced.Removed() + pool.priced.Removed(1) - pendingReplaceCounter.Inc(1) + pendingReplaceMeter.Mark(1) + } else { + // Nothing was replaced, bump the pending counter + pendingCounter.Inc(1) } // Failsafe to work around direct pending inserts (tests) if pool.all.Get(hash) == nil { @@ -919,8 +933,11 @@ func (pool *TxPool) promoteSpecialTx(addr common.Address, tx *types.Transaction) // Otherwise discard any previous transaction and mark this if old != nil { pool.all.Remove(old.Hash()) - pool.priced.Removed() - pendingReplaceCounter.Inc(1) + pool.priced.Removed(1) + pendingReplaceMeter.Mark(1) + } else { + // Nothing was replaced, bump the pending counter + pendingCounter.Inc(1) } list.txs.Put(tx) if cost := tx.Cost(); list.costcap.Cmp(cost) < 0 { @@ -981,6 +998,8 @@ func (pool *TxPool) addTx(tx *types.Transaction, local bool) error { if err != nil { return err } + validMeter.Mark(1) + // If we added a new transaction, run promotion checks and return if !replace { from, _ := types.Sender(pool.signer, tx) // already validated @@ -1065,7 +1084,10 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { // Remove it from the list of known transactions pool.all.Remove(hash) if outofbound { - pool.priced.Removed() + pool.priced.Removed(1) + } + if pool.locals.contains(addr) { + localCounter.Dec(1) } // Remove the transaction from the pending lists and reset the account nonce if pending := pool.pending[addr]; pending != nil { @@ -1083,12 +1105,17 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { if nonce := tx.Nonce(); pool.pendingState.GetNonce(addr) > nonce { pool.pendingState.SetNonce(addr, nonce) } + // Reduce the pending counter + pendingCounter.Dec(int64(1 + len(invalids))) return } } // Transaction is in the future queue if future := pool.queue[addr]; future != nil { - future.Remove(tx) + if removed, _ := future.Remove(tx); removed { + // Reduce the queued counter + queuedCounter.Dec(1) + } if future.Empty() { delete(pool.queue, addr) } @@ -1121,11 +1148,11 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { continue // Just in case someone calls with a non existing account } // Drop all transactions that are deemed too old (low nonce) - for _, tx := range list.Forward(pool.currentState.GetNonce(addr)) { + forwards := list.Forward(pool.currentState.GetNonce(addr)) + for _, tx := range forwards { hash := tx.Hash() - log.Trace("Removed old queued transaction", "hash", hash) pool.all.Remove(hash) - pool.priced.Removed() + log.Trace("Removed old queued transaction", "hash", hash) } // Drop all transactions that are too costly (low balance or out of gas) var number *big.Int = nil @@ -1135,28 +1162,38 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas, pool.trc21FeeCapacity, number) for _, tx := range drops { hash := tx.Hash() - log.Trace("Removed unpayable queued transaction", "hash", hash) pool.all.Remove(hash) - pool.priced.Removed() - queuedNofundsCounter.Inc(1) + log.Trace("Removed unpayable queued transaction", "hash", hash) } + queuedNofundsMeter.Mark(int64(len(drops))) + // Gather all executable transactions and promote them - for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) { + readies := list.Ready(pool.pendingState.GetNonce(addr)) + for _, tx := range readies { hash := tx.Hash() if pool.promoteTx(addr, hash, tx) { log.Trace("Promoting queued transaction", "hash", hash) promoted = append(promoted, tx) } } + queuedCounter.Dec(int64(len(readies))) + // Drop all transactions over the allowed limit + var caps types.Transactions if !pool.locals.contains(addr) { - for _, tx := range list.Cap(int(pool.config.AccountQueue)) { + caps = list.Cap(int(pool.config.AccountQueue)) + for _, tx := range caps { hash := tx.Hash() pool.all.Remove(hash) - pool.priced.Removed() - queuedRateLimitCounter.Inc(1) log.Trace("Removed cap-exceeding queued transaction", "hash", hash) } + queuedRateLimitMeter.Mark(int64(len(caps))) + } + // Mark all the items dropped as removed + pool.priced.Removed(len(forwards) + len(drops) + len(caps)) + queuedCounter.Dec(int64(len(forwards) + len(drops) + len(caps))) + if pool.locals.contains(addr) { + localCounter.Dec(int64(len(forwards) + len(drops) + len(caps))) } // Delete the entire queue entry if it became empty. if list.Empty() { @@ -1198,11 +1235,12 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { for i := 0; i < len(offenders)-1; i++ { list := pool.pending[offenders[i]] - for _, tx := range list.Cap(list.Len() - 1) { + + caps := list.Cap(list.Len() - 1) + for _, tx := range caps { // Drop the transaction from the global pools too hash := tx.Hash() pool.all.Remove(hash) - pool.priced.Removed() // Update the account nonce to the dropped transaction if nonce := tx.Nonce(); pool.pendingState.GetNonce(offenders[i]) > nonce { @@ -1210,6 +1248,11 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { } log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } + pool.priced.Removed(len(caps)) + pendingCounter.Dec(int64(len(caps))) + if pool.locals.contains(offenders[i]) { + localCounter.Dec(int64(len(caps))) + } pending-- } } @@ -1220,11 +1263,12 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { for _, addr := range offenders { list := pool.pending[addr] - for _, tx := range list.Cap(list.Len() - 1) { + + caps := list.Cap(list.Len() - 1) + for _, tx := range caps { // Drop the transaction from the global pools too hash := tx.Hash() pool.all.Remove(hash) - pool.priced.Removed() // Update the account nonce to the dropped transaction if nonce := tx.Nonce(); pool.pendingState.GetNonce(addr) > nonce { @@ -1232,11 +1276,16 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { } log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } + pool.priced.Removed(len(caps)) + pendingCounter.Dec(int64(len(caps))) + if pool.locals.contains(addr) { + localCounter.Dec(int64(len(caps))) + } pending-- } } } - pendingRateLimitCounter.Inc(int64(pendingBeforeCap - pending)) + pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) } // If we've queued more transactions than the hard limit, drop oldest ones queued := uint64(0) @@ -1266,7 +1315,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { pool.removeTx(tx.Hash(), true) } drop -= size - queuedRateLimitCounter.Inc(int64(size)) + queuedRateLimitMeter.Mark(int64(size)) continue } // Otherwise drop only last few transactions @@ -1274,7 +1323,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { for i := len(txs) - 1; i >= 0 && drop > 0; i-- { pool.removeTx(txs[i].Hash(), true) drop-- - queuedRateLimitCounter.Inc(1) + queuedRateLimitMeter.Mark(1) } } } @@ -1289,11 +1338,11 @@ func (pool *TxPool) demoteUnexecutables() { nonce := pool.currentState.GetNonce(addr) // Drop all transactions that are deemed too old (low nonce) - for _, tx := range list.Forward(nonce) { + olds := list.Forward(nonce) + for _, tx := range olds { hash := tx.Hash() - log.Trace("Removed old pending transaction", "hash", hash) pool.all.Remove(hash) - pool.priced.Removed() + log.Trace("Removed old pending transaction", "hash", hash) } // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later var number *big.Int = nil @@ -1305,21 +1354,28 @@ func (pool *TxPool) demoteUnexecutables() { hash := tx.Hash() log.Trace("Removed unpayable pending transaction", "hash", hash) pool.all.Remove(hash) - pool.priced.Removed() - pendingNofundsCounter.Inc(1) } + pool.priced.Removed(len(olds) + len(drops)) + pendingNofundsMeter.Mark(int64(len(drops))) + for _, tx := range invalids { hash := tx.Hash() log.Trace("Demoting pending transaction", "hash", hash) pool.enqueueTx(hash, tx) } + pendingCounter.Dec(int64(len(olds) + len(drops) + len(invalids))) + if pool.locals.contains(addr) { + localCounter.Dec(int64(len(olds) + len(drops) + len(invalids))) + } // If there's a gap in front, alert (should never happen) and postpone all transactions if list.Len() > 0 && list.txs.Get(nonce) == nil { - for _, tx := range list.Cap(0) { + gapped := list.Cap(0) + for _, tx := range gapped { hash := tx.Hash() log.Warn("Demoting invalidated transaction", "hash", hash) pool.enqueueTx(hash, tx) } + pendingCounter.Inc(int64(len(gapped))) } // Delete the entire queue entry if it became empty. if list.Empty() { From 74c72363d0cd86f8a1b583670cdaea8d0ba5ea06 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Fri, 10 May 2024 15:48:14 +0800 Subject: [PATCH 18/23] core: move TxPool reorg and events to background goroutine (#19705) --- core/tx_pool.go | 819 ++++++++++++++++++++++++++----------------- core/tx_pool_test.go | 164 +++------ 2 files changed, 537 insertions(+), 446 deletions(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index 016b2cda450e..941c2825b1d0 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -228,16 +228,14 @@ func (config *TxPoolConfig) sanitize() TxPoolConfig { // current state) and future transactions. Transactions move between those // two states over time as they are received and processed. type TxPool struct { - config TxPoolConfig - chainconfig *params.ChainConfig - chain blockChain - gasPrice *big.Int - txFeed event.Feed - scope event.SubscriptionScope - chainHeadCh chan ChainHeadEvent - chainHeadSub event.Subscription - signer types.Signer - mu sync.RWMutex + config TxPoolConfig + chainconfig *params.ChainConfig + chain blockChain + gasPrice *big.Int + txFeed event.Feed + scope event.SubscriptionScope + signer types.Signer + mu sync.RWMutex currentState *state.StateDB // Current state in the blockchain head pendingState *state.ManagedState // Pending state tracking virtual nonces @@ -252,13 +250,23 @@ type TxPool struct { all *txLookup // All transactions to allow lookups priced *txPricedList // All transactions sorted by price - wg sync.WaitGroup // for shutdown sync + chainHeadCh chan ChainHeadEvent + chainHeadSub event.Subscription + reqResetCh chan *txpoolResetRequest + reqPromoteCh chan *accountSet + queueTxEventCh chan *types.Transaction + reorgDoneCh chan chan struct{} + reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop + wg sync.WaitGroup // tracks loop, scheduleReorgLoop - homestead bool IsSigner func(address common.Address) bool trc21FeeCapacity map[common.Address]*big.Int } +type txpoolResetRequest struct { + oldHead, newHead *types.Header +} + // NewTxPool creates a new transaction pool to gather, sort and filter inbound // transactions from the network. func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { @@ -276,6 +284,11 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block beats: make(map[common.Address]time.Time), all: newTxLookup(), chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), + reqResetCh: make(chan *txpoolResetRequest), + reqPromoteCh: make(chan *accountSet), + queueTxEventCh: make(chan *types.Transaction), + reorgDoneCh: make(chan chan struct{}), + reorgShutdownCh: make(chan struct{}), gasPrice: new(big.Int).SetUint64(config.PriceLimit), trc21FeeCapacity: map[common.Address]*big.Int{}, } @@ -287,6 +300,10 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block pool.priced = newTxPricedList(pool.all) pool.reset(nil, chain.CurrentBlock().Header()) + // Start the reorg loop early so it can handle requests generated during journal loading. + pool.wg.Add(1) + go pool.scheduleReorgLoop() + // If local transactions and journaling is enabled, load from disk if !config.NoLocals && config.Journal != "" { pool.journal = newTxJournal(config.Journal) @@ -298,10 +315,9 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block log.Warn("Failed to rotate transaction journal", "err", err) } } - // Subscribe events from blockchain - pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) - // Start the event loop and return + // Subscribe events from blockchain and start the main event loop. + pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) pool.wg.Add(1) go pool.loop() @@ -314,41 +330,34 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block func (pool *TxPool) loop() { defer pool.wg.Done() - // Start the stats reporting and transaction eviction tickers - var prevPending, prevQueued, prevStales int - - report := time.NewTicker(statsReportInterval) + var ( + prevPending, prevQueued, prevStales int + // Start the stats reporting and transaction eviction tickers + report = time.NewTicker(statsReportInterval) + evict = time.NewTicker(evictionInterval) + journal = time.NewTicker(pool.config.Rejournal) + // Track the previous head headers for transaction reorgs + head = pool.chain.CurrentBlock() + ) defer report.Stop() - - evict := time.NewTicker(evictionInterval) defer evict.Stop() - - journal := time.NewTicker(pool.config.Rejournal) defer journal.Stop() - // Track the previous head headers for transaction reorgs - head := pool.chain.CurrentBlock() - - // Keep waiting for and reacting to the various events for { select { // Handle ChainHeadEvent case ev := <-pool.chainHeadCh: if ev.Block != nil { - pool.mu.Lock() - if pool.chainconfig.IsHomestead(ev.Block.Number()) { - pool.homestead = true - } - pool.reset(head.Header(), ev.Block.Header()) + pool.requestReset(head.Header(), ev.Block.Header()) head = ev.Block - - pool.mu.Unlock() } - // Be unsubscribed due to system stopped + + // System shutdown. case <-pool.chainHeadSub.Err(): + close(pool.reorgShutdownCh) return - // Handle stats reporting ticks + // Handle stats reporting ticks case <-report.C: pool.mu.RLock() pending, queued := pool.stats() @@ -390,116 +399,6 @@ func (pool *TxPool) loop() { } } -// lockedReset is a wrapper around reset to allow calling it in a thread safe -// manner. This method is only ever used in the tester! -func (pool *TxPool) lockedReset(oldHead, newHead *types.Header) { - pool.mu.Lock() - defer pool.mu.Unlock() - - pool.reset(oldHead, newHead) -} - -// reset retrieves the current state of the blockchain and ensures the content -// of the transaction pool is valid with regard to the chain state. -func (pool *TxPool) reset(oldHead, newHead *types.Header) { - // If we're reorging an old state, reinject all dropped transactions - var reinject types.Transactions - - if oldHead != nil && oldHead.Hash() != newHead.ParentHash { - // If the reorg is too deep, avoid doing it (will happen during fast sync) - oldNum := oldHead.Number.Uint64() - newNum := newHead.Number.Uint64() - - if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { - log.Debug("Skipping deep transaction reorg", "depth", depth) - } else { - // Reorg seems shallow enough to pull in all transactions into memory - var discarded, included types.Transactions - - var ( - rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) - add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) - ) - if rem == nil { - // This can happen if a setHead is performed, where we simply discard the old - // head from the chain. - // If that is the case, we don't have the lost transactions any more, and - // there's nothing to add - if newNum < oldNum { - // If the reorg ended up on a lower number, it's indicative of setHead being the cause - log.Debug("Skipping transaction reset caused by setHead", - "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) - } else { - // If we reorged to a same or higher number, then it's not a case of setHead - log.Warn("Transaction pool reset with missing oldhead", - "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) - } - return - } - for rem.NumberU64() > add.NumberU64() { - discarded = append(discarded, rem.Transactions()...) - if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { - log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) - return - } - } - for add.NumberU64() > rem.NumberU64() { - included = append(included, add.Transactions()...) - if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { - log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) - return - } - } - for rem.Hash() != add.Hash() { - discarded = append(discarded, rem.Transactions()...) - if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { - log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) - return - } - included = append(included, add.Transactions()...) - if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { - log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) - return - } - } - reinject = types.TxDifference(discarded, included) - } - } - // Initialize the internal state to the current head - if newHead == nil { - newHead = pool.chain.CurrentBlock().Header() // Special case during testing - } - statedb, err := pool.chain.StateAt(newHead.Root) - if err != nil { - log.Error("Failed to reset txpool state", "err", err) - return - } - pool.currentState = statedb - pool.trc21FeeCapacity = state.GetTRC21FeeCapacityFromStateWithCache(newHead.Root, statedb) - pool.pendingState = state.ManageState(statedb) - pool.currentMaxGas = newHead.GasLimit - - // Inject any transactions discarded due to reorgs - log.Debug("Reinjecting stale transactions", "count", len(reinject)) - senderCacher.recover(pool.signer, reinject) - pool.addTxsLocked(reinject, false) - - // validate the pool of pending transactions, this will remove - // any transactions that have been included in the block or - // have been invalidated because of another transaction (e.g. - // higher gas price) - pool.demoteUnexecutables() - - // Update all accounts to the latest known pending nonce - for addr, list := range pool.pending { - txs := list.Flatten() // Heavy but will be cached and is needed by the miner anyway - pool.pendingState.SetNonce(addr, txs[len(txs)-1].Nonce()+1) - } - // Check the queue and move transactions over to the pending if possible - // or remove those that have become invalid - pool.promoteExecutables(nil) -} - // Stop terminates the transaction pool. func (pool *TxPool) Stop() { // Unsubscribe all subscriptions registered from txpool @@ -705,7 +604,8 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { } if tx.To() == nil || (tx.To() != nil && !tx.IsSpecialTransaction()) { - intrGas, err := IntrinsicGas(tx.Data(), tx.To() == nil, pool.homestead) + // Ensure the transaction has more gas than the basic tx fee. + intrGas, err := IntrinsicGas(tx.Data(), tx.To() == nil, true) if err != nil { return err } @@ -715,7 +615,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { } // Check zero gas price. - if tx.GasPrice().Cmp(new(big.Int).SetInt64(0)) == 0 { + if tx.GasPrice().Sign() == 0 { return ErrZeroGasPrice } @@ -746,15 +646,14 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { return nil } -// add validates a transaction and inserts it into the non-executable queue for -// later pending promotion and execution. If the transaction is a replacement for -// an already pending or queued one, it overwrites the previous and returns this -// so outer code doesn't uselessly call promote. +// add validates a transaction and inserts it into the non-executable queue for later +// pending promotion and execution. If the transaction is a replacement for an already +// pending or queued one, it overwrites the previous transaction if its price is higher. // // If a newly added transaction is marked as local, its sending account will be -// whitelisted, preventing any associated transaction from being dropped out of -// the pool due to pricing constraints. -func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { +// whitelisted, preventing any associated transaction from being dropped out of the pool +// due to pricing constraints. +func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { // If the transaction is already known, discard it hash := tx.Hash() if pool.all.Get(hash) != nil { @@ -768,10 +667,12 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { invalidTxMeter.Mark(1) return false, err } + from, _ := types.Sender(pool.signer, tx) // already validated if tx.IsSpecialTransaction() && pool.IsSigner != nil && pool.IsSigner(from) && pool.pendingState.GetNonce(from) == tx.Nonce() { return pool.promoteSpecialTx(from, tx) } + // If the transaction pool is full, discard underpriced transactions if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue { log.Debug("Add transaction to pool full", "hash", hash, "nonce", tx.Nonce()) @@ -789,7 +690,8 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { pool.removeTx(tx.Hash(), false) } } - // If the transaction is replacing an already pending one, do directly + + // Try to replace an existing transaction in the pending pool if list := pool.pending[from]; list != nil && list.Overlaps(tx) { // Nonce already pending, check if required price bump is met inserted, old := list.Add(tx, pool.config.PriceBump) @@ -806,19 +708,17 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { pool.all.Add(tx) pool.priced.Put(tx) pool.journalTx(from, tx) - + pool.queueTxEvent(tx) log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) - - // We've directly injected a replacement transaction, notify subsystems - go pool.txFeed.Send(NewTxsEvent{types.Transactions{tx}}) - return old != nil, nil } + // New transaction isn't replacing a pending one, push into queue - replace, err := pool.enqueueTx(hash, tx) + replaced, err = pool.enqueueTx(hash, tx) if err != nil { return false, err } + // Mark local addresses and journal local transactions if local { if !pool.locals.contains(from) { @@ -832,7 +732,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { pool.journalTx(from, tx) log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) - return replace, nil + return replaced, nil } // enqueueTx inserts a new transaction into the non-executable transaction queue. @@ -957,92 +857,83 @@ func (pool *TxPool) promoteSpecialTx(addr common.Address, tx *types.Transaction) return true, nil } -// AddLocal enqueues a single transaction into the pool if it is valid, marking -// the sender as a local one in the mean time, ensuring it goes around the local -// pricing constraints. -func (pool *TxPool) AddLocal(tx *types.Transaction) error { - return pool.addTx(tx, !pool.config.NoLocals) -} - -// AddRemote enqueues a single transaction into the pool if it is valid. If the -// sender is not among the locally tracked ones, full pricing constraints will -// apply. -func (pool *TxPool) AddRemote(tx *types.Transaction) error { - return pool.addTx(tx, false) +// AddLocals enqueues a batch of transactions into the pool if they are valid, marking the +// senders as a local ones, ensuring they go around the local pricing constraints. +// +// This method is used to add transactions from the RPC API and performs synchronous pool +// reorganization and event propagation. +func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { + return pool.addTxs(txs, !pool.config.NoLocals, true) } -// AddLocals enqueues a batch of transactions into the pool if they are valid, -// marking the senders as a local ones in the mean time, ensuring they go around -// the local pricing constraints. -func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { - return pool.addTxs(txs, !pool.config.NoLocals) +// AddLocal enqueues a single local transaction into the pool if it is valid. This is +// a convenience wrapper aroundd AddLocals. +func (pool *TxPool) AddLocal(tx *types.Transaction) error { + errs := pool.AddLocals([]*types.Transaction{tx}) + return errs[0] } -// AddRemotes enqueues a batch of transactions into the pool if they are valid. -// If the senders are not among the locally tracked ones, full pricing constraints -// will apply. +// AddRemotes enqueues a batch of transactions into the pool if they are valid. If the +// senders are not among the locally tracked ones, full pricing constraints will apply. +// +// This method is used to add transactions from the p2p network and does not wait for pool +// reorganization and internal event propagation. func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { - return pool.addTxs(txs, false) + return pool.addTxs(txs, false, false) } -// addTx enqueues a single transaction into the pool if it is valid. -func (pool *TxPool) addTx(tx *types.Transaction, local bool) error { - // Cache sender in transaction before obtaining lock (pool.signer is immutable) - types.Sender(pool.signer, tx) - - pool.mu.Lock() - defer pool.mu.Unlock() +// This is like AddRemotes, but waits for pool reorganization. Tests use this method. +func (pool *TxPool) addRemotesSync(txs []*types.Transaction) []error { + return pool.addTxs(txs, false, true) +} - // Try to inject the transaction and update any state - replace, err := pool.add(tx, local) - if err != nil { - return err - } - validMeter.Mark(1) +// This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. +func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { + errs := pool.addRemotesSync([]*types.Transaction{tx}) + return errs[0] +} - // If we added a new transaction, run promotion checks and return - if !replace { - from, _ := types.Sender(pool.signer, tx) // already validated - pool.promoteExecutables([]common.Address{from}) - } - return nil +// AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience +// wrapper around AddRemotes. +// +// Deprecated: use AddRemotes +func (pool *TxPool) AddRemote(tx *types.Transaction) error { + errs := pool.AddRemotes([]*types.Transaction{tx}) + return errs[0] } // addTxs attempts to queue a batch of transactions if they are valid. -func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) []error { +func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { // Cache senders in transactions before obtaining lock (pool.signer is immutable) for _, tx := range txs { types.Sender(pool.signer, tx) } + pool.mu.Lock() - defer pool.mu.Unlock() + errs, dirtyAddrs := pool.addTxsLocked(txs, local) + pool.mu.Unlock() - return pool.addTxsLocked(txs, local) + done := pool.requestPromoteExecutables(dirtyAddrs) + if sync { + <-done + } + return errs } -// addTxsLocked attempts to queue a batch of transactions if they are valid, -// whilst assuming the transaction pool lock is already held. -func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error { - // Add the batch of transactions, tracking the accepted ones - dirty := make(map[common.Address]struct{}) +// addTxsLocked attempts to queue a batch of transactions if they are valid. +// The transaction pool lock must be held. +func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { + dirty := newAccountSet(pool.signer) errs := make([]error, len(txs)) - for i, tx := range txs { - var replace bool - if replace, errs[i] = pool.add(tx, local); errs[i] == nil && !replace { - from, _ := types.Sender(pool.signer, tx) // already validated - dirty[from] = struct{}{} - } - } - // Only reprocess the internal state if something was actually added - if len(dirty) > 0 { - addrs := make([]common.Address, 0, len(dirty)) - for addr := range dirty { - addrs = append(addrs, addr) + replaced, err := pool.add(tx, local) + errs[i] = err + if err == nil && !replaced { + dirty.addTx(tx) } - pool.promoteExecutables(addrs) } - return errs + validMeter.Mark(int64(len(dirty.accounts))) + return errs, dirty } // Status returns the status (unknown/pending/queued) of a batch of transactions @@ -1065,8 +956,7 @@ func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { return status } -// Get returns a transaction if it is contained in the pool -// and nil otherwise. +// Get returns a transaction if it is contained in the pool and nil otherwise. func (pool *TxPool) Get(hash common.Hash) *types.Transaction { return pool.all.Get(hash) } @@ -1122,10 +1012,259 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { } } +// requestPromoteExecutables requests a pool reset to the new head block. +// The returned channel is closed when the reset has occurred. +func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { + select { + case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: + return <-pool.reorgDoneCh + case <-pool.reorgShutdownCh: + return pool.reorgShutdownCh + } +} + +// requestPromoteExecutables requests transaction promotion checks for the given addresses. +// The returned channel is closed when the promotion checks have occurred. +func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { + select { + case pool.reqPromoteCh <- set: + return <-pool.reorgDoneCh + case <-pool.reorgShutdownCh: + return pool.reorgShutdownCh + } +} + +// queueTxEvent enqueues a transaction event to be sent in the next reorg run. +func (pool *TxPool) queueTxEvent(tx *types.Transaction) { + select { + case pool.queueTxEventCh <- tx: + case <-pool.reorgShutdownCh: + } +} + +// scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not +// call those methods directly, but request them being run using requestReset and +// requestPromoteExecutables instead. +func (pool *TxPool) scheduleReorgLoop() { + defer pool.wg.Done() + + var ( + curDone chan struct{} // non-nil while runReorg is active + nextDone = make(chan struct{}) + launchNextRun bool + reset *txpoolResetRequest + dirtyAccounts *accountSet + queuedEvents = make(map[common.Address]*txSortedMap) + ) + for { + // Launch next background reorg if needed + if curDone == nil && launchNextRun { + // Run the background reorg and announcements + go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) + + // Prepare everything for the next round of reorg + curDone, nextDone = nextDone, make(chan struct{}) + launchNextRun = false + + reset, dirtyAccounts = nil, nil + queuedEvents = make(map[common.Address]*txSortedMap) + } + + select { + case req := <-pool.reqResetCh: + // Reset request: update head if request is already pending. + if reset == nil { + reset = req + } else { + reset.newHead = req.newHead + } + launchNextRun = true + pool.reorgDoneCh <- nextDone + + case req := <-pool.reqPromoteCh: + // Promote request: update address set if request is already pending. + if dirtyAccounts == nil { + dirtyAccounts = req + } else { + dirtyAccounts.merge(req) + } + launchNextRun = true + pool.reorgDoneCh <- nextDone + + case tx := <-pool.queueTxEventCh: + // Queue up the event, but don't schedule a reorg. It's up to the caller to + // request one later if they want the events sent. + addr, _ := types.Sender(pool.signer, tx) + if _, ok := queuedEvents[addr]; !ok { + queuedEvents[addr] = newTxSortedMap() + } + queuedEvents[addr].Put(tx) + + case <-curDone: + curDone = nil + + case <-pool.reorgShutdownCh: + // Wait for current run to finish. + if curDone != nil { + <-curDone + } + close(nextDone) + return + } + } +} + +// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. +func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { + defer close(done) + + var promoteAddrs []common.Address + if dirtyAccounts != nil { + promoteAddrs = dirtyAccounts.flatten() + } + pool.mu.Lock() + if reset != nil { + // Reset from the old head to the new, rescheduling any reorged transactions + pool.reset(reset.oldHead, reset.newHead) + + // Nonces were reset, discard any events that became stale + for addr := range events { + events[addr].Forward(pool.pendingState.GetNonce(addr)) + if events[addr].Len() == 0 { + delete(events, addr) + } + } + // Reset needs promote for all addresses + promoteAddrs = promoteAddrs[:0] + for addr := range pool.queue { + promoteAddrs = append(promoteAddrs, addr) + } + } + // Check for pending transactions for every account that sent new ones + promoted := pool.promoteExecutables(promoteAddrs) + for _, tx := range promoted { + addr, _ := types.Sender(pool.signer, tx) + if _, ok := events[addr]; !ok { + events[addr] = newTxSortedMap() + } + events[addr].Put(tx) + } + // If a new block appeared, validate the pool of pending transactions. This will + // remove any transaction that has been included in the block or was invalidated + // because of another transaction (e.g. higher gas price). + if reset != nil { + pool.demoteUnexecutables() + } + // Ensure pool.queue and pool.pending sizes stay within the configured limits. + pool.truncatePending() + pool.truncateQueue() + + // Update all accounts to the latest known pending nonce + for addr, list := range pool.pending { + txs := list.Flatten() // Heavy but will be cached and is needed by the miner anyway + pool.pendingState.SetNonce(addr, txs[len(txs)-1].Nonce()+1) + } + pool.mu.Unlock() + + // Notify subsystems for newly added transactions + if len(events) > 0 { + var txs []*types.Transaction + for _, set := range events { + txs = append(txs, set.Flatten()...) + } + pool.txFeed.Send(NewTxsEvent{txs}) + } +} + +// reset retrieves the current state of the blockchain and ensures the content +// of the transaction pool is valid with regard to the chain state. +func (pool *TxPool) reset(oldHead, newHead *types.Header) { + // If we're reorging an old state, reinject all dropped transactions + var reinject types.Transactions + + if oldHead != nil && oldHead.Hash() != newHead.ParentHash { + // If the reorg is too deep, avoid doing it (will happen during fast sync) + oldNum := oldHead.Number.Uint64() + newNum := newHead.Number.Uint64() + + if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { + log.Debug("Skipping deep transaction reorg", "depth", depth) + } else { + // Reorg seems shallow enough to pull in all transactions into memory + var discarded, included types.Transactions + var ( + rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) + add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) + ) + if rem == nil { + // This can happen if a setHead is performed, where we simply discard the old + // head from the chain. + // If that is the case, we don't have the lost transactions any more, and + // there's nothing to add + if newNum < oldNum { + // If the reorg ended up on a lower number, it's indicative of setHead being the cause + log.Debug("Skipping transaction reset caused by setHead", + "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + } else { + // If we reorged to a same or higher number, then it's not a case of setHead + log.Warn("Transaction pool reset with missing oldhead", + "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + } + return + } + for rem.NumberU64() > add.NumberU64() { + discarded = append(discarded, rem.Transactions()...) + if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { + log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) + return + } + } + for add.NumberU64() > rem.NumberU64() { + included = append(included, add.Transactions()...) + if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { + log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) + return + } + } + for rem.Hash() != add.Hash() { + discarded = append(discarded, rem.Transactions()...) + if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { + log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) + return + } + included = append(included, add.Transactions()...) + if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { + log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) + return + } + } + reinject = types.TxDifference(discarded, included) + } + } + // Initialize the internal state to the current head + if newHead == nil { + newHead = pool.chain.CurrentBlock().Header() // Special case during testing + } + statedb, err := pool.chain.StateAt(newHead.Root) + if err != nil { + log.Error("Failed to reset txpool state", "err", err) + return + } + pool.currentState = statedb + pool.trc21FeeCapacity = state.GetTRC21FeeCapacityFromStateWithCache(newHead.Root, statedb) + pool.pendingState = state.ManageState(statedb) + pool.currentMaxGas = newHead.GasLimit + + // Inject any transactions discarded due to reorgs + log.Debug("Reinjecting stale transactions", "count", len(reinject)) + senderCacher.recover(pool.signer, reinject) + pool.addTxsLocked(reinject, false) +} + // promoteExecutables moves transactions that have become processable from the // future queue to the set of pending transactions. During this process, all // invalidated transactions (low nonce, low balance) are deleted. -func (pool *TxPool) promoteExecutables(accounts []common.Address) { +func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { log.Debug("start promoteExecutables") defer func(start time.Time) { log.Debug("end promoteExecutables", "time", common.PrettyDuration(time.Since(start))) @@ -1134,13 +1273,6 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { // Track the promoted transactions to broadcast them at once var promoted []*types.Transaction - // Gather all the accounts potentially needing updates - if accounts == nil { - accounts = make([]common.Address, 0, len(pool.queue)) - for addr := range pool.queue { - accounts = append(accounts, addr) - } - } // Iterate over all accounts and promote any executable transactions for _, addr := range accounts { list := pool.queue[addr] @@ -1200,69 +1332,46 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { delete(pool.queue, addr) } } - // Notify subsystem for new promoted transactions. - if len(promoted) > 0 { - go pool.txFeed.Send(NewTxsEvent{promoted}) - } - // If the pending limit is overflown, start equalizing allowances + return promoted +} + +// truncatePending removes transactions from the pending queue if the pool is above the +// pending limit. The algorithm tries to reduce transaction counts by an approximately +// equal number for all for accounts with many pending transactions. +func (pool *TxPool) truncatePending() { pending := uint64(0) for _, list := range pool.pending { pending += uint64(list.Len()) } - if pending > pool.config.GlobalSlots { - pendingBeforeCap := pending - // Assemble a spam order to penalize large transactors first - spammers := prque.New(nil) - for addr, list := range pool.pending { - // Only evict transactions from high rollers - if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { - spammers.Push(addr, int64(list.Len())) - } - } - // Gradually drop transactions from offenders - offenders := []common.Address{} - for pending > pool.config.GlobalSlots && !spammers.Empty() { - // Retrieve the next offender if not local address - offender, _ := spammers.Pop() - offenders = append(offenders, offender.(common.Address)) - - // Equalize balances until all the same or below threshold - if len(offenders) > 1 { - // Calculate the equalization threshold for all current offenders - threshold := pool.pending[offender.(common.Address)].Len() - - // Iteratively reduce all offenders until below limit or threshold reached - for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { - for i := 0; i < len(offenders)-1; i++ { - list := pool.pending[offenders[i]] - - caps := list.Cap(list.Len() - 1) - for _, tx := range caps { - // Drop the transaction from the global pools too - hash := tx.Hash() - pool.all.Remove(hash) - - // Update the account nonce to the dropped transaction - if nonce := tx.Nonce(); pool.pendingState.GetNonce(offenders[i]) > nonce { - pool.pendingState.SetNonce(offenders[i], nonce) - } - log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) - } - pool.priced.Removed(len(caps)) - pendingCounter.Dec(int64(len(caps))) - if pool.locals.contains(offenders[i]) { - localCounter.Dec(int64(len(caps))) - } - pending-- - } - } - } + if pending <= pool.config.GlobalSlots { + return + } + + pendingBeforeCap := pending + // Assemble a spam order to penalize large transactors first + spammers := prque.New(nil) + for addr, list := range pool.pending { + // Only evict transactions from high rollers + if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { + spammers.Push(addr, int64(list.Len())) } - // If still above threshold, reduce to limit or min allowance - if pending > pool.config.GlobalSlots && len(offenders) > 0 { - for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { - for _, addr := range offenders { - list := pool.pending[addr] + } + // Gradually drop transactions from offenders + offenders := []common.Address{} + for pending > pool.config.GlobalSlots && !spammers.Empty() { + // Retrieve the next offender if not local address + offender, _ := spammers.Pop() + offenders = append(offenders, offender.(common.Address)) + + // Equalize balances until all the same or below threshold + if len(offenders) > 1 { + // Calculate the equalization threshold for all current offenders + threshold := pool.pending[offender.(common.Address)].Len() + + // Iteratively reduce all offenders until below limit or threshold reached + for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { + for i := 0; i < len(offenders)-1; i++ { + list := pool.pending[offenders[i]] caps := list.Cap(list.Len() - 1) for _, tx := range caps { @@ -1271,60 +1380,93 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { pool.all.Remove(hash) // Update the account nonce to the dropped transaction - if nonce := tx.Nonce(); pool.pendingState.GetNonce(addr) > nonce { - pool.pendingState.SetNonce(addr, nonce) + if nonce := tx.Nonce(); pool.pendingState.GetNonce(offenders[i]) > nonce { + pool.pendingState.SetNonce(offenders[i], nonce) } log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } pool.priced.Removed(len(caps)) pendingCounter.Dec(int64(len(caps))) - if pool.locals.contains(addr) { + if pool.locals.contains(offenders[i]) { localCounter.Dec(int64(len(caps))) } pending-- } } } - pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) } - // If we've queued more transactions than the hard limit, drop oldest ones + + // If still above threshold, reduce to limit or min allowance + if pending > pool.config.GlobalSlots && len(offenders) > 0 { + for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { + for _, addr := range offenders { + list := pool.pending[addr] + + caps := list.Cap(list.Len() - 1) + for _, tx := range caps { + // Drop the transaction from the global pools too + hash := tx.Hash() + pool.all.Remove(hash) + + // Update the account nonce to the dropped transaction + if nonce := tx.Nonce(); pool.pendingState.GetNonce(addr) > nonce { + pool.pendingState.SetNonce(addr, nonce) + } + log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) + } + pool.priced.Removed(len(caps)) + pendingCounter.Dec(int64(len(caps))) + if pool.locals.contains(addr) { + localCounter.Dec(int64(len(caps))) + } + pending-- + } + } + } + pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) +} + +// truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. +func (pool *TxPool) truncateQueue() { queued := uint64(0) for _, list := range pool.queue { queued += uint64(list.Len()) } - if queued > pool.config.GlobalQueue { - // Sort all accounts with queued transactions by heartbeat - addresses := make(addressesByHeartbeat, 0, len(pool.queue)) - for addr := range pool.queue { - if !pool.locals.contains(addr) { // don't drop locals - addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) - } + if queued <= pool.config.GlobalQueue { + return + } + + // Sort all accounts with queued transactions by heartbeat + addresses := make(addressesByHeartbeat, 0, len(pool.queue)) + for addr := range pool.queue { + if !pool.locals.contains(addr) { // don't drop locals + addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) } - sort.Sort(addresses) + } + sort.Sort(addresses) - // Drop transactions until the total is below the limit or only locals remain - for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { - addr := addresses[len(addresses)-1] - list := pool.queue[addr.address] + // Drop transactions until the total is below the limit or only locals remain + for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { + addr := addresses[len(addresses)-1] + list := pool.queue[addr.address] - addresses = addresses[:len(addresses)-1] + addresses = addresses[:len(addresses)-1] - // Drop all transactions if they are less than the overflow - if size := uint64(list.Len()); size <= drop { - for _, tx := range list.Flatten() { - pool.removeTx(tx.Hash(), true) - } - drop -= size - queuedRateLimitMeter.Mark(int64(size)) - continue - } - // Otherwise drop only last few transactions - txs := list.Flatten() - for i := len(txs) - 1; i >= 0 && drop > 0; i-- { - pool.removeTx(txs[i].Hash(), true) - drop-- - queuedRateLimitMeter.Mark(1) + // Drop all transactions if they are less than the overflow + if size := uint64(list.Len()); size <= drop { + for _, tx := range list.Flatten() { + pool.removeTx(tx.Hash(), true) } + drop -= size + queuedRateLimitMeter.Mark(int64(size)) + continue + } + // Otherwise drop only last few transactions + txs := list.Flatten() + for i := len(txs) - 1; i >= 0 && drop > 0; i-- { + pool.removeTx(txs[i].Hash(), true) + drop-- + queuedRateLimitMeter.Mark(1) } } } @@ -1375,7 +1517,7 @@ func (pool *TxPool) demoteUnexecutables() { log.Warn("Demoting invalidated transaction", "hash", hash) pool.enqueueTx(hash, tx) } - pendingCounter.Inc(int64(len(gapped))) + pendingCounter.Dec(int64(len(gapped))) } // Delete the entire queue entry if it became empty. if list.Empty() { @@ -1407,11 +1549,15 @@ type accountSet struct { // newAccountSet creates a new address set with an associated signer for sender // derivations. -func newAccountSet(signer types.Signer) *accountSet { - return &accountSet{ +func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { + as := &accountSet{ accounts: make(map[common.Address]struct{}), signer: signer, } + for _, addr := range addrs { + as.add(addr) + } + return as } // contains checks if a given address is contained within the set. @@ -1435,6 +1581,13 @@ func (as *accountSet) add(addr common.Address) { as.cache = nil } +// addTx adds the sender of tx into the set. +func (as *accountSet) addTx(tx *types.Transaction) { + if addr, err := types.Sender(as.signer, tx); err == nil { + as.add(addr) + } +} + // flatten returns the list of addresses within this set, also caching it for later // reuse. The returned slice should not be changed! func (as *accountSet) flatten() []common.Address { @@ -1448,6 +1601,14 @@ func (as *accountSet) flatten() []common.Address { return *as.cache } +// merge adds all addresses from the 'other' set into 'as'. +func (as *accountSet) merge(other *accountSet) { + for addr := range other.accounts { + as.accounts[addr] = struct{}{} + } + as.cache = nil +} + // txLookup is used internally by TxPool to track transactions while allowing lookup without // mutex contention. // diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index f4f7758a6ec2..21bf244519e9 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -220,7 +220,7 @@ func TestStateChangeDuringTransactionPoolReset(t *testing.T) { t.Fatalf("Invalid nonce, want 0, got %d", nonce) } - pool.AddRemotes(types.Transactions{tx0, tx1}) + pool.addRemotesSync([]*types.Transaction{tx0, tx1}) nonce = pool.State().GetNonce(address) if nonce != 2 { @@ -229,8 +229,7 @@ func TestStateChangeDuringTransactionPoolReset(t *testing.T) { // trigger state change in the background trigger = true - - pool.lockedReset(nil, nil) + <-pool.requestReset(nil, nil) _, err := pool.Pending() if err != nil { @@ -288,10 +287,10 @@ func TestTransactionQueue(t *testing.T) { tx := transaction(0, 100, key) from, _ := deriveSender(tx) pool.currentState.AddBalance(from, big.NewInt(1000)) - pool.lockedReset(nil, nil) - pool.enqueueTx(tx.Hash(), tx) + <-pool.requestReset(nil, nil) - pool.promoteExecutables([]common.Address{from}) + pool.enqueueTx(tx.Hash(), tx) + <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) if len(pool.pending) != 1 { t.Error("expected valid txs to be 1 is", len(pool.pending)) } @@ -300,7 +299,8 @@ func TestTransactionQueue(t *testing.T) { from, _ = deriveSender(tx) pool.currentState.SetNonce(from, 2) pool.enqueueTx(tx.Hash(), tx) - pool.promoteExecutables([]common.Address{from}) + + <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok { t.Error("expected transaction to be in tx pool") } @@ -308,25 +308,28 @@ func TestTransactionQueue(t *testing.T) { if len(pool.queue) > 0 { t.Error("expected transaction queue to be empty. is", len(pool.queue)) } +} + +func TestTransactionQueue2(t *testing.T) { + t.Parallel() - pool, key = setupTxPool() + pool, key := setupTxPool() defer pool.Stop() tx1 := transaction(0, 100, key) tx2 := transaction(10, 100, key) tx3 := transaction(11, 100, key) - from, _ = deriveSender(tx1) + from, _ := deriveSender(tx1) pool.currentState.AddBalance(from, big.NewInt(1000)) - pool.lockedReset(nil, nil) + pool.reset(nil, nil) pool.enqueueTx(tx1.Hash(), tx1) pool.enqueueTx(tx2.Hash(), tx2) pool.enqueueTx(tx3.Hash(), tx3) pool.promoteExecutables([]common.Address{from}) - if len(pool.pending) != 1 { - t.Error("expected tx pool to be 1, got", len(pool.pending)) + t.Error("expected pending length to be 1, got", len(pool.pending)) } if pool.queue[from].Len() != 2 { t.Error("expected len(queue) == 2, got", pool.queue[from].Len()) @@ -360,7 +363,7 @@ func TestTransactionChainFork(t *testing.T) { statedb.AddBalance(addr, big.NewInt(100000000000000)) pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)} - pool.lockedReset(nil, nil) + <-pool.requestReset(nil, nil) } resetState() @@ -390,7 +393,7 @@ func TestTransactionDoubleNonce(t *testing.T) { statedb.AddBalance(addr, big.NewInt(100000000000000)) pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)} - pool.lockedReset(nil, nil) + <-pool.requestReset(nil, nil) } resetState() @@ -406,16 +409,17 @@ func TestTransactionDoubleNonce(t *testing.T) { if replace, err := pool.add(tx2, false); err != nil || !replace { t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace) } - pool.promoteExecutables([]common.Address{addr}) + <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) if pool.pending[addr].Len() != 1 { t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) } if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() { t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash()) } + // Add the third transaction and ensure it's not saved (smaller price) pool.add(tx3, false) - pool.promoteExecutables([]common.Address{addr}) + <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) if pool.pending[addr].Len() != 1 { t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) } @@ -461,7 +465,7 @@ func TestTransactionNonceRecovery(t *testing.T) { addr := crypto.PubkeyToAddress(key.PublicKey) pool.currentState.SetNonce(addr, n) pool.currentState.AddBalance(addr, big.NewInt(100000000000000)) - pool.lockedReset(nil, nil) + <-pool.requestReset(nil, nil) tx := transaction(n, 100000, key) if err := pool.AddRemote(tx); err != nil { @@ -469,7 +473,7 @@ func TestTransactionNonceRecovery(t *testing.T) { } // simulate some weird re-order of transactions and missing nonce(s) pool.currentState.SetNonce(addr, n-1) - pool.lockedReset(nil, nil) + <-pool.requestReset(nil, nil) if fn := pool.pendingState.GetNonce(addr); fn != n-1 { t.Errorf("expected nonce to be %d, got %d", n-1, fn) } @@ -513,7 +517,7 @@ func TestTransactionDropping(t *testing.T) { if pool.all.Count() != 6 { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) } - pool.lockedReset(nil, nil) + <-pool.requestReset(nil, nil) if pool.pending[account].Len() != 3 { t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) } @@ -525,7 +529,7 @@ func TestTransactionDropping(t *testing.T) { } // Reduce the balance of the account, and check that invalidated transactions are dropped pool.currentState.AddBalance(account, big.NewInt(-650)) - pool.lockedReset(nil, nil) + <-pool.requestReset(nil, nil) if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { t.Errorf("funded pending transaction missing: %v", tx0) @@ -550,7 +554,7 @@ func TestTransactionDropping(t *testing.T) { } // Reduce the block gas limit, check that invalidated transactions are dropped pool.chain.(*testBlockChain).gasLimit = 100 - pool.lockedReset(nil, nil) + <-pool.requestReset(nil, nil) if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { t.Errorf("funded pending transaction missing: %v", tx0) @@ -607,7 +611,7 @@ func TestTransactionPostponing(t *testing.T) { txs = append(txs, tx) } } - for i, err := range pool.AddRemotes(txs) { + for i, err := range pool.addRemotesSync(txs) { if err != nil { t.Fatalf("tx %d: failed to add transactions: %v", i, err) } @@ -622,7 +626,7 @@ func TestTransactionPostponing(t *testing.T) { if pool.all.Count() != len(txs) { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) } - pool.lockedReset(nil, nil) + <-pool.requestReset(nil, nil) if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) } @@ -636,7 +640,7 @@ func TestTransactionPostponing(t *testing.T) { for _, addr := range accs { pool.currentState.AddBalance(addr, big.NewInt(-1)) } - pool.lockedReset(nil, nil) + <-pool.requestReset(nil, nil) // The first account's first transaction remains valid, check that subsequent // ones are either filtered out, or queued up for later. @@ -703,12 +707,10 @@ func TestTransactionGapFilling(t *testing.T) { defer sub.Unsubscribe() // Create a pending and a queued transaction with a nonce-gap in between - if err := pool.AddRemote(transaction(0, 100000, key)); err != nil { - t.Fatalf("failed to add pending transaction: %v", err) - } - if err := pool.AddRemote(transaction(2, 100000, key)); err != nil { - t.Fatalf("failed to add queued transaction: %v", err) - } + pool.addRemotesSync([]*types.Transaction{ + transaction(0, 100000, key), + transaction(2, 100000, key), + }) pending, queued := pool.Stats() if pending != 1 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1) @@ -723,7 +725,7 @@ func TestTransactionGapFilling(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Fill the nonce gap and ensure all transactions become pending - if err := pool.AddRemote(transaction(1, 100000, key)); err != nil { + if err := pool.addRemoteSync(transaction(1, 100000, key)); err != nil { t.Fatalf("failed to add gapped transaction: %v", err) } pending, queued = pool.Stats() @@ -755,7 +757,7 @@ func TestTransactionQueueAccountLimiting(t *testing.T) { testTxPoolConfig.AccountQueue = 10 // Keep queuing up transactions and make sure all above a limit are dropped for i := uint64(1); i <= testTxPoolConfig.AccountQueue; i++ { - if err := pool.AddRemote(transaction(i, 100000, key)); err != nil { + if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil { t.Fatalf("tx %d: failed to add transaction: %v", i, err) } if len(pool.pending) != 0 { @@ -824,7 +826,7 @@ func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) { nonces[addr]++ } // Import the batch and verify that limits have been enforced - pool.AddRemotes(txs) + pool.addRemotesSync(txs) queued := 0 for addr, list := range pool.queue { @@ -961,7 +963,7 @@ func TestTransactionPendingLimiting(t *testing.T) { // Keep queuing up transactions and make sure all above a limit are dropped for i := uint64(0); i < testTxPoolConfig.AccountQueue; i++ { - if err := pool.AddRemote(transaction(i, 100000, key)); err != nil { + if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil { t.Fatalf("tx %d: failed to add transaction: %v", i, err) } if pool.pending[account].Len() != int(i)+1 { @@ -982,59 +984,6 @@ func TestTransactionPendingLimiting(t *testing.T) { } } -// Tests that the transaction limits are enforced the same way irrelevant whether -// the transactions are added one by one or in batches. -func TestTransactionQueueLimitingEquivalency(t *testing.T) { testTransactionLimitingEquivalency(t, 1) } -func TestTransactionPendingLimitingEquivalency(t *testing.T) { - testTransactionLimitingEquivalency(t, 0) -} - -func testTransactionLimitingEquivalency(t *testing.T, origin uint64) { - t.Parallel() - - // Add a batch of transactions to a pool one by one - pool1, key1 := setupTxPool() - defer pool1.Stop() - - account1, _ := deriveSender(transaction(0, 0, key1)) - pool1.currentState.AddBalance(account1, big.NewInt(1000000)) - testTxPoolConfig.AccountQueue = 10 - for i := uint64(0); i < testTxPoolConfig.AccountQueue; i++ { - if err := pool1.AddRemote(transaction(origin+i, 100000, key1)); err != nil { - t.Fatalf("tx %d: failed to add transaction: %v", i, err) - } - } - // Add a batch of transactions to a pool in one big batch - pool2, key2 := setupTxPool() - defer pool2.Stop() - - account2, _ := deriveSender(transaction(0, 0, key2)) - pool2.currentState.AddBalance(account2, big.NewInt(1000000)) - - txs := []*types.Transaction{} - for i := uint64(0); i < testTxPoolConfig.AccountQueue; i++ { - txs = append(txs, transaction(origin+i, 100000, key2)) - } - pool2.AddRemotes(txs) - - // Ensure the batch optimization honors the same pool mechanics - if len(pool1.pending) != len(pool2.pending) { - t.Errorf("pending transaction count mismatch: one-by-one algo: %d, batch algo: %d", len(pool1.pending), len(pool2.pending)) - } - if len(pool1.queue) != len(pool2.queue) { - t.Errorf("queued transaction count mismatch: one-by-one algo: %d, batch algo: %d", len(pool1.queue), len(pool2.queue)) - } - if pool1.all.Count() != pool2.all.Count() { - t.Errorf("total transaction count mismatch: one-by-one algo %d, batch algo %d", pool1.all.Count(), pool2.all.Count()) - } - if err := validateTxPoolInternals(pool1); err != nil { - t.Errorf("pool 1 internal state corrupted: %v", err) - } - if err := validateTxPoolInternals(pool2); err != nil { - t.Errorf("pool 2 internal state corrupted: %v", err) - } -} - // Tests that if the transaction count belonging to multiple accounts go above // some hard threshold, the higher transactions are dropped to prevent DOS // attacks. @@ -1070,7 +1019,7 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { } } // Import the batch and verify that limits have been enforced - pool.AddRemotes(txs) + pool.addRemotesSync(txs) pending := 0 for _, list := range pool.pending { @@ -1152,7 +1101,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) { } } // Import the batch and verify that limits have been enforced - pool.AddRemotes(txs) + pool.addRemotesSync(txs) for addr, list := range pool.pending { if list.Len() != int(config.AccountSlots) { @@ -1209,7 +1158,7 @@ func TestTransactionPoolRepricing(t *testing.T) { ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3]) // Import the batch and that both pending and queued transactions match up - pool.AddRemotes(txs) + pool.addRemotesSync(txs) pool.AddLocal(ltx) pending, queued := pool.Stats() @@ -1493,7 +1442,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) { for i := uint64(0); i < config.GlobalSlots; i++ { txs = append(txs, pricedTransaction(i, 100000, big.NewInt(1), keys[0])) } - pool.AddRemotes(txs) + pool.addRemotesSync(txs) pending, queued := pool.Stats() if pending != int(config.GlobalSlots) { @@ -1509,7 +1458,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding high priced transactions drops a cheap, but doesn't produce a gap - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { t.Fatalf("failed to add well priced transaction: %v", err) } pending, queued = pool.Stats() @@ -1553,7 +1502,7 @@ func TestTransactionReplacement(t *testing.T) { price := int64(100) threshold := (price * (100 + int64(testTxPoolConfig.PriceBump))) / 100 - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil { + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil { t.Fatalf("failed to add original cheap pending transaction: %v", err) } if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced { @@ -1566,7 +1515,7 @@ func TestTransactionReplacement(t *testing.T) { t.Fatalf("cheap replacement event firing failed: %v", err) } - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil { + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil { t.Fatalf("failed to add original proper pending transaction: %v", err) } if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced { @@ -1578,6 +1527,7 @@ func TestTransactionReplacement(t *testing.T) { if err := validateEvents(events, 2); err != nil { t.Fatalf("proper replacement event firing failed: %v", err) } + // Add queued transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil { t.Fatalf("failed to add original cheap queued transaction: %v", err) @@ -1656,7 +1606,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { if err := pool.AddLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add local transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil { + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil { t.Fatalf("failed to add remote transaction: %v", err) } pending, queued := pool.Stats() @@ -1694,7 +1644,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { } // Bump the nonce temporarily and ensure the newly invalidated transaction is removed statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2) - pool.lockedReset(nil, nil) + <-pool.requestReset(nil, nil) time.Sleep(2 * config.Rejournal) pool.Stop() @@ -1749,7 +1699,7 @@ func TestTransactionStatusCheck(t *testing.T) { txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) // Queued only // Import the transaction and ensure they are correctly added - pool.AddRemotes(txs) + pool.addRemotesSync(txs) pending, queued := pool.Stats() if pending != 2 { @@ -1828,26 +1778,6 @@ func benchmarkFuturePromotion(b *testing.B, size int) { } } -// Benchmarks the speed of iterative transaction insertion. -func BenchmarkPoolInsert(b *testing.B) { - // Generate a batch of transactions to enqueue into the pool - pool, key := setupTxPool() - defer pool.Stop() - - account, _ := deriveSender(transaction(0, 0, key)) - pool.currentState.AddBalance(account, big.NewInt(1000000)) - - txs := make(types.Transactions, b.N) - for i := 0; i < b.N; i++ { - txs[i] = transaction(uint64(i), 100000, key) - } - // Benchmark importing the transactions into the queue - b.ResetTimer() - for _, tx := range txs { - pool.AddRemote(tx) - } -} - // Benchmarks the speed of batched transaction insertion. func BenchmarkPoolBatchInsert100(b *testing.B) { benchmarkPoolBatchInsert(b, 100) } func BenchmarkPoolBatchInsert1000(b *testing.B) { benchmarkPoolBatchInsert(b, 1000) } From 6338a4195bda364352df3f54b355e5b76f1b72de Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Fri, 10 May 2024 16:54:40 +0800 Subject: [PATCH 19/23] core: kill off managed state, use own tiny noncer for txpool (#19810) --- contracts/utils.go | 2 +- core/state/managed_state.go | 143 ------------------------------- core/state/managed_state_test.go | 124 --------------------------- core/tx_noncer.go | 53 ++++++++++++ core/tx_pool.go | 43 +++++----- core/tx_pool_test.go | 10 +-- eth/api_backend.go | 2 +- 7 files changed, 82 insertions(+), 295 deletions(-) delete mode 100644 core/state/managed_state.go delete mode 100644 core/state/managed_state_test.go create mode 100644 core/tx_noncer.go diff --git a/contracts/utils.go b/contracts/utils.go index 5d6963e08ffe..8f605a5e862e 100644 --- a/contracts/utils.go +++ b/contracts/utils.go @@ -86,7 +86,7 @@ func CreateTransactionSign(chainConfig *params.ChainConfig, pool *core.TxPool, m } // Create and send tx to smart contract for sign validate block. - nonce := pool.State().GetNonce(account.Address) + nonce := pool.Nonce(account.Address) tx := CreateTxSign(block.Number(), block.Hash(), nonce, common.HexToAddress(common.BlockSigners)) txSigned, err := wallet.SignTx(account, tx, chainConfig.ChainId) if err != nil { diff --git a/core/state/managed_state.go b/core/state/managed_state.go deleted file mode 100644 index fbd5d2959376..000000000000 --- a/core/state/managed_state.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package state - -import ( - "sync" - - "github.com/XinFinOrg/XDPoSChain/common" -) - -type account struct { - stateObject *stateObject - nstart uint64 - nonces []bool -} - -type ManagedState struct { - *StateDB - - mu sync.RWMutex - - accounts map[common.Address]*account -} - -// ManagedState returns a new managed state with the statedb as it's backing layer -func ManageState(statedb *StateDB) *ManagedState { - return &ManagedState{ - StateDB: statedb.Copy(), - accounts: make(map[common.Address]*account), - } -} - -// SetState sets the backing layer of the managed state -func (ms *ManagedState) SetState(statedb *StateDB) { - ms.mu.Lock() - defer ms.mu.Unlock() - ms.StateDB = statedb -} - -// RemoveNonce removed the nonce from the managed state and all future pending nonces -func (ms *ManagedState) RemoveNonce(addr common.Address, n uint64) { - if ms.hasAccount(addr) { - ms.mu.Lock() - defer ms.mu.Unlock() - - account := ms.getAccount(addr) - if n-account.nstart <= uint64(len(account.nonces)) { - reslice := make([]bool, n-account.nstart) - copy(reslice, account.nonces[:n-account.nstart]) - account.nonces = reslice - } - } -} - -// NewNonce returns the new canonical nonce for the managed account -func (ms *ManagedState) NewNonce(addr common.Address) uint64 { - ms.mu.Lock() - defer ms.mu.Unlock() - - account := ms.getAccount(addr) - for i, nonce := range account.nonces { - if !nonce { - return account.nstart + uint64(i) - } - } - account.nonces = append(account.nonces, true) - - return uint64(len(account.nonces)-1) + account.nstart -} - -// GetNonce returns the canonical nonce for the managed or unmanaged account. -// -// Because GetNonce mutates the DB, we must take a write lock. -func (ms *ManagedState) GetNonce(addr common.Address) uint64 { - ms.mu.Lock() - defer ms.mu.Unlock() - - if ms.hasAccount(addr) { - account := ms.getAccount(addr) - return uint64(len(account.nonces)) + account.nstart - } else { - return ms.StateDB.GetNonce(addr) - } -} - -// SetNonce sets the new canonical nonce for the managed state -func (ms *ManagedState) SetNonce(addr common.Address, nonce uint64) { - ms.mu.Lock() - defer ms.mu.Unlock() - - so := ms.GetOrNewStateObject(addr) - so.SetNonce(nonce) - - ms.accounts[addr] = newAccount(so) -} - -// HasAccount returns whether the given address is managed or not -func (ms *ManagedState) HasAccount(addr common.Address) bool { - ms.mu.RLock() - defer ms.mu.RUnlock() - return ms.hasAccount(addr) -} - -func (ms *ManagedState) hasAccount(addr common.Address) bool { - _, ok := ms.accounts[addr] - return ok -} - -// populate the managed state -func (ms *ManagedState) getAccount(addr common.Address) *account { - if account, ok := ms.accounts[addr]; !ok { - so := ms.GetOrNewStateObject(addr) - ms.accounts[addr] = newAccount(so) - } else { - // Always make sure the state account nonce isn't actually higher - // than the tracked one. - so := ms.StateDB.getStateObject(addr) - if so != nil && uint64(len(account.nonces))+account.nstart < so.Nonce() { - ms.accounts[addr] = newAccount(so) - } - - } - - return ms.accounts[addr] -} - -func newAccount(so *stateObject) *account { - return &account{so, so.Nonce(), nil} -} diff --git a/core/state/managed_state_test.go b/core/state/managed_state_test.go deleted file mode 100644 index 13f35a8a51fc..000000000000 --- a/core/state/managed_state_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package state - -import ( - "github.com/XinFinOrg/XDPoSChain/core/rawdb" - "testing" - - "github.com/XinFinOrg/XDPoSChain/common" -) - -var addr = common.BytesToAddress([]byte("test")) - -func create() (*ManagedState, *account) { - db := rawdb.NewMemoryDatabase() - statedb, _ := New(common.Hash{}, NewDatabase(db)) - ms := ManageState(statedb) - ms.StateDB.SetNonce(addr, 100) - ms.accounts[addr] = newAccount(ms.StateDB.getStateObject(addr)) - return ms, ms.accounts[addr] -} - -func TestNewNonce(t *testing.T) { - ms, _ := create() - - nonce := ms.NewNonce(addr) - if nonce != 100 { - t.Error("expected nonce 100. got", nonce) - } - - nonce = ms.NewNonce(addr) - if nonce != 101 { - t.Error("expected nonce 101. got", nonce) - } -} - -func TestRemove(t *testing.T) { - ms, account := create() - - nn := make([]bool, 10) - for i := range nn { - nn[i] = true - } - account.nonces = append(account.nonces, nn...) - - i := uint64(5) - ms.RemoveNonce(addr, account.nstart+i) - if len(account.nonces) != 5 { - t.Error("expected", i, "'th index to be false") - } -} - -func TestReuse(t *testing.T) { - ms, account := create() - - nn := make([]bool, 10) - for i := range nn { - nn[i] = true - } - account.nonces = append(account.nonces, nn...) - - i := uint64(5) - ms.RemoveNonce(addr, account.nstart+i) - nonce := ms.NewNonce(addr) - if nonce != 105 { - t.Error("expected nonce to be 105. got", nonce) - } -} - -func TestRemoteNonceChange(t *testing.T) { - ms, account := create() - nn := make([]bool, 10) - for i := range nn { - nn[i] = true - } - account.nonces = append(account.nonces, nn...) - ms.NewNonce(addr) - - ms.StateDB.stateObjects[addr].data.Nonce = 200 - nonce := ms.NewNonce(addr) - if nonce != 200 { - t.Error("expected nonce after remote update to be", 200, "got", nonce) - } - ms.NewNonce(addr) - ms.NewNonce(addr) - ms.NewNonce(addr) - ms.StateDB.stateObjects[addr].data.Nonce = 200 - nonce = ms.NewNonce(addr) - if nonce != 204 { - t.Error("expected nonce after remote update to be", 204, "got", nonce) - } -} - -func TestSetNonce(t *testing.T) { - ms, _ := create() - - var addr common.Address - ms.SetNonce(addr, 10) - - if ms.GetNonce(addr) != 10 { - t.Error("Expected nonce of 10, got", ms.GetNonce(addr)) - } - - addr[0] = 1 - ms.StateDB.SetNonce(addr, 1) - - if ms.GetNonce(addr) != 1 { - t.Error("Expected nonce of 1, got", ms.GetNonce(addr)) - } -} diff --git a/core/tx_noncer.go b/core/tx_noncer.go new file mode 100644 index 000000000000..bf5f43f142f2 --- /dev/null +++ b/core/tx_noncer.go @@ -0,0 +1,53 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/core/state" +) + +// txNoncer is a tiny virtual state database to manage the executable nonces of +// accounts in the pool, falling back to reading from a real state database if +// an account is unknown. +type txNoncer struct { + fallback *state.StateDB + nonces map[common.Address]uint64 +} + +// newTxNoncer creates a new virtual state database to track the pool nonces. +func newTxNoncer(statedb *state.StateDB) *txNoncer { + return &txNoncer{ + fallback: statedb.Copy(), + nonces: make(map[common.Address]uint64), + } +} + +// get returns the current nonce of an account, falling back to a real state +// database if the account is unknown. +func (txn *txNoncer) get(addr common.Address) uint64 { + if _, ok := txn.nonces[addr]; !ok { + txn.nonces[addr] = txn.fallback.GetNonce(addr) + } + return txn.nonces[addr] +} + +// set inserts a new virtual nonce into the virtual state database to be returned +// whenever the pool requests it instead of reaching into the real state database. +func (txn *txNoncer) set(addr common.Address, nonce uint64) { + txn.nonces[addr] = nonce +} diff --git a/core/tx_pool.go b/core/tx_pool.go index 941c2825b1d0..57fb5d06aabb 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -237,9 +237,9 @@ type TxPool struct { signer types.Signer mu sync.RWMutex - currentState *state.StateDB // Current state in the blockchain head - pendingState *state.ManagedState // Pending state tracking virtual nonces - currentMaxGas uint64 // Current gas limit for transaction caps + currentState *state.StateDB // Current state in the blockchain head + pendingNonces *txNoncer // Pending state tracking virtual nonces + currentMaxGas uint64 // Current gas limit for transaction caps locals *accountSet // Set of local transaction to exempt from eviction rules journal *txJournal // Journal of local transaction to back up to disk @@ -441,12 +441,13 @@ func (pool *TxPool) SetGasPrice(price *big.Int) { log.Info("Transaction pool price threshold updated", "price", price) } -// State returns the virtual managed state of the transaction pool. -func (pool *TxPool) State() *state.ManagedState { +// Nonce returns the next nonce of an account, with all transactions executable +// by the pool already applied on top. +func (pool *TxPool) Nonce(addr common.Address) uint64 { pool.mu.RLock() defer pool.mu.RUnlock() - return pool.pendingState + return pool.pendingNonces.get(addr) } // Stats retrieves the current pool stats, namely the number of pending and the @@ -576,7 +577,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { if pool.currentState.GetNonce(from) > tx.Nonce() { return ErrNonceTooLow } - if pool.pendingState.GetNonce(from)+common.LimitThresholdNonceInQueue < tx.Nonce() { + if pool.pendingNonces.get(from)+common.LimitThresholdNonceInQueue < tx.Nonce() { return ErrNonceTooHigh } // Transactor should have enough funds to cover the costs @@ -593,7 +594,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { if tx.To() != nil { if value, ok := pool.trc21FeeCapacity[*tx.To()]; ok { feeCapacity = value - if !state.ValidateTRC21Tx(pool.pendingState.StateDB, from, *tx.To(), tx.Data()) { + if !state.ValidateTRC21Tx(pool.currentState, from, *tx.To(), tx.Data()) { return ErrInsufficientFunds } cost = tx.TxCost(number) @@ -669,7 +670,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e } from, _ := types.Sender(pool.signer, tx) // already validated - if tx.IsSpecialTransaction() && pool.IsSigner != nil && pool.IsSigner(from) && pool.pendingState.GetNonce(from) == tx.Nonce() { + if tx.IsSpecialTransaction() && pool.IsSigner != nil && pool.IsSigner(from) && pool.pendingNonces.get(from) == tx.Nonce() { return pool.promoteSpecialTx(from, tx) } @@ -815,7 +816,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T } // Set the potentially new pending nonce and notify any subsystems of the new tx pool.beats[addr] = time.Now() - pool.pendingState.SetNonce(addr, tx.Nonce()+1) + pool.pendingNonces.set(addr, tx.Nonce()+1) return true } @@ -852,7 +853,7 @@ func (pool *TxPool) promoteSpecialTx(addr common.Address, tx *types.Transaction) } // Set the potentially new pending nonce and notify any subsystems of the new tx pool.beats[addr] = time.Now() - pool.pendingState.SetNonce(addr, tx.Nonce()+1) + pool.pendingNonces.set(addr, tx.Nonce()+1) go pool.txFeed.Send(NewTxsEvent{types.Transactions{tx}}) return true, nil } @@ -992,8 +993,8 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { pool.enqueueTx(tx.Hash(), tx) } // Update the account nonce if needed - if nonce := tx.Nonce(); pool.pendingState.GetNonce(addr) > nonce { - pool.pendingState.SetNonce(addr, nonce) + if nonce := tx.Nonce(); pool.pendingNonces.get(addr) > nonce { + pool.pendingNonces.set(addr, nonce) } // Reduce the pending counter pendingCounter.Dec(int64(1 + len(invalids))) @@ -1129,7 +1130,7 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt // Nonces were reset, discard any events that became stale for addr := range events { - events[addr].Forward(pool.pendingState.GetNonce(addr)) + events[addr].Forward(pool.pendingNonces.get(addr)) if events[addr].Len() == 0 { delete(events, addr) } @@ -1162,7 +1163,7 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt // Update all accounts to the latest known pending nonce for addr, list := range pool.pending { txs := list.Flatten() // Heavy but will be cached and is needed by the miner anyway - pool.pendingState.SetNonce(addr, txs[len(txs)-1].Nonce()+1) + pool.pendingNonces.set(addr, txs[len(txs)-1].Nonce()+1) } pool.mu.Unlock() @@ -1252,7 +1253,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { } pool.currentState = statedb pool.trc21FeeCapacity = state.GetTRC21FeeCapacityFromStateWithCache(newHead.Root, statedb) - pool.pendingState = state.ManageState(statedb) + pool.pendingNonces = newTxNoncer(statedb) pool.currentMaxGas = newHead.GasLimit // Inject any transactions discarded due to reorgs @@ -1300,7 +1301,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans queuedNofundsMeter.Mark(int64(len(drops))) // Gather all executable transactions and promote them - readies := list.Ready(pool.pendingState.GetNonce(addr)) + readies := list.Ready(pool.pendingNonces.get(addr)) for _, tx := range readies { hash := tx.Hash() if pool.promoteTx(addr, hash, tx) { @@ -1380,8 +1381,8 @@ func (pool *TxPool) truncatePending() { pool.all.Remove(hash) // Update the account nonce to the dropped transaction - if nonce := tx.Nonce(); pool.pendingState.GetNonce(offenders[i]) > nonce { - pool.pendingState.SetNonce(offenders[i], nonce) + if nonce := tx.Nonce(); pool.pendingNonces.get(offenders[i]) > nonce { + pool.pendingNonces.set(offenders[i], nonce) } log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } @@ -1409,8 +1410,8 @@ func (pool *TxPool) truncatePending() { pool.all.Remove(hash) // Update the account nonce to the dropped transaction - if nonce := tx.Nonce(); pool.pendingState.GetNonce(addr) > nonce { - pool.pendingState.SetNonce(addr, nonce) + if nonce := tx.Nonce(); pool.pendingNonces.get(addr) > nonce { + pool.pendingNonces.set(addr, nonce) } log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index 21bf244519e9..3801d5896e9a 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -127,7 +127,7 @@ func validateTxPoolInternals(pool *TxPool) error { last = nonce } } - if nonce := pool.pendingState.GetNonce(addr); nonce != last+1 { + if nonce := pool.Nonce(addr); nonce != last+1 { return fmt.Errorf("pending nonce mismatch: have %v, want %v", nonce, last+1) } } @@ -215,14 +215,14 @@ func TestStateChangeDuringTransactionPoolReset(t *testing.T) { pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() - nonce := pool.State().GetNonce(address) + nonce := pool.Nonce(address) if nonce != 0 { t.Fatalf("Invalid nonce, want 0, got %d", nonce) } pool.addRemotesSync([]*types.Transaction{tx0, tx1}) - nonce = pool.State().GetNonce(address) + nonce = pool.Nonce(address) if nonce != 2 { t.Fatalf("Invalid nonce, want 2, got %d", nonce) } @@ -235,7 +235,7 @@ func TestStateChangeDuringTransactionPoolReset(t *testing.T) { if err != nil { t.Fatalf("Could not fetch pending transactions: %v", err) } - nonce = pool.State().GetNonce(address) + nonce = pool.Nonce(address) if nonce != 2 { t.Fatalf("Invalid nonce, want 2, got %d", nonce) } @@ -474,7 +474,7 @@ func TestTransactionNonceRecovery(t *testing.T) { // simulate some weird re-order of transactions and missing nonce(s) pool.currentState.SetNonce(addr, n-1) <-pool.requestReset(nil, nil) - if fn := pool.pendingState.GetNonce(addr); fn != n-1 { + if fn := pool.Nonce(addr); fn != n-1 { t.Errorf("expected nonce to be %d, got %d", n-1, fn) } } diff --git a/eth/api_backend.go b/eth/api_backend.go index 8e7b2d545232..041e088bb271 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -304,7 +304,7 @@ func (b *EthApiBackend) GetPoolTransaction(hash common.Hash) *types.Transaction } func (b *EthApiBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) { - return b.eth.txPool.State().GetNonce(addr), nil + return b.eth.txPool.Nonce(addr), nil } func (b *EthApiBackend) Stats() (pending int, queued int) { From edaed4fd928348beecfa3ccd00cc779c9c9e26a4 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Fri, 10 May 2024 17:13:53 +0800 Subject: [PATCH 20/23] core: fix write concurrency in txpool (#19835) --- core/tx_noncer.go | 26 ++++++++++++++++++++++++++ core/tx_pool.go | 12 +++--------- 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/core/tx_noncer.go b/core/tx_noncer.go index bf5f43f142f2..cbadc39354a3 100644 --- a/core/tx_noncer.go +++ b/core/tx_noncer.go @@ -17,6 +17,8 @@ package core import ( + "sync" + "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core/state" ) @@ -27,6 +29,7 @@ import ( type txNoncer struct { fallback *state.StateDB nonces map[common.Address]uint64 + lock sync.Mutex } // newTxNoncer creates a new virtual state database to track the pool nonces. @@ -40,6 +43,11 @@ func newTxNoncer(statedb *state.StateDB) *txNoncer { // get returns the current nonce of an account, falling back to a real state // database if the account is unknown. func (txn *txNoncer) get(addr common.Address) uint64 { + // We use mutex for get operation is the underlying + // state will mutate db even for read access. + txn.lock.Lock() + defer txn.lock.Unlock() + if _, ok := txn.nonces[addr]; !ok { txn.nonces[addr] = txn.fallback.GetNonce(addr) } @@ -49,5 +57,23 @@ func (txn *txNoncer) get(addr common.Address) uint64 { // set inserts a new virtual nonce into the virtual state database to be returned // whenever the pool requests it instead of reaching into the real state database. func (txn *txNoncer) set(addr common.Address, nonce uint64) { + txn.lock.Lock() + defer txn.lock.Unlock() + + txn.nonces[addr] = nonce +} + +// setIfLower updates a new virtual nonce into the virtual state database if the +// the new one is lower. +func (txn *txNoncer) setIfLower(addr common.Address, nonce uint64) { + txn.lock.Lock() + defer txn.lock.Unlock() + + if _, ok := txn.nonces[addr]; !ok { + txn.nonces[addr] = txn.fallback.GetNonce(addr) + } + if txn.nonces[addr] <= nonce { + return + } txn.nonces[addr] = nonce } diff --git a/core/tx_pool.go b/core/tx_pool.go index 57fb5d06aabb..30b0977b9865 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -993,9 +993,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { pool.enqueueTx(tx.Hash(), tx) } // Update the account nonce if needed - if nonce := tx.Nonce(); pool.pendingNonces.get(addr) > nonce { - pool.pendingNonces.set(addr, nonce) - } + pool.pendingNonces.setIfLower(addr, tx.Nonce()) // Reduce the pending counter pendingCounter.Dec(int64(1 + len(invalids))) return @@ -1381,9 +1379,7 @@ func (pool *TxPool) truncatePending() { pool.all.Remove(hash) // Update the account nonce to the dropped transaction - if nonce := tx.Nonce(); pool.pendingNonces.get(offenders[i]) > nonce { - pool.pendingNonces.set(offenders[i], nonce) - } + pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } pool.priced.Removed(len(caps)) @@ -1410,9 +1406,7 @@ func (pool *TxPool) truncatePending() { pool.all.Remove(hash) // Update the account nonce to the dropped transaction - if nonce := tx.Nonce(); pool.pendingNonces.get(addr) > nonce { - pool.pendingNonces.set(addr, nonce) - } + pool.pendingNonces.setIfLower(addr, tx.Nonce()) log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } pool.priced.Removed(len(caps)) From b70861447074f73f2a2e1afabe2969ac71d5a277 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Fri, 10 May 2024 18:04:13 +0800 Subject: [PATCH 21/23] core, les: fix les unit tests (#19823) --- core/tx_pool.go | 6 +++--- core/tx_pool_test.go | 32 ++++++++++++++++---------------- les/handler.go | 1 + 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index 30b0977b9865..275dbbfa9bdf 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -884,13 +884,13 @@ func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { } // This is like AddRemotes, but waits for pool reorganization. Tests use this method. -func (pool *TxPool) addRemotesSync(txs []*types.Transaction) []error { +func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { return pool.addTxs(txs, false, true) } // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. -func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { - errs := pool.addRemotesSync([]*types.Transaction{tx}) +func (pool *TxPool) AddRemoteSync(tx *types.Transaction) error { + errs := pool.AddRemotesSync([]*types.Transaction{tx}) return errs[0] } diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index 3801d5896e9a..b494d93ce531 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -220,7 +220,7 @@ func TestStateChangeDuringTransactionPoolReset(t *testing.T) { t.Fatalf("Invalid nonce, want 0, got %d", nonce) } - pool.addRemotesSync([]*types.Transaction{tx0, tx1}) + pool.AddRemotesSync([]*types.Transaction{tx0, tx1}) nonce = pool.Nonce(address) if nonce != 2 { @@ -611,7 +611,7 @@ func TestTransactionPostponing(t *testing.T) { txs = append(txs, tx) } } - for i, err := range pool.addRemotesSync(txs) { + for i, err := range pool.AddRemotesSync(txs) { if err != nil { t.Fatalf("tx %d: failed to add transactions: %v", i, err) } @@ -707,7 +707,7 @@ func TestTransactionGapFilling(t *testing.T) { defer sub.Unsubscribe() // Create a pending and a queued transaction with a nonce-gap in between - pool.addRemotesSync([]*types.Transaction{ + pool.AddRemotesSync([]*types.Transaction{ transaction(0, 100000, key), transaction(2, 100000, key), }) @@ -725,7 +725,7 @@ func TestTransactionGapFilling(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Fill the nonce gap and ensure all transactions become pending - if err := pool.addRemoteSync(transaction(1, 100000, key)); err != nil { + if err := pool.AddRemoteSync(transaction(1, 100000, key)); err != nil { t.Fatalf("failed to add gapped transaction: %v", err) } pending, queued = pool.Stats() @@ -757,7 +757,7 @@ func TestTransactionQueueAccountLimiting(t *testing.T) { testTxPoolConfig.AccountQueue = 10 // Keep queuing up transactions and make sure all above a limit are dropped for i := uint64(1); i <= testTxPoolConfig.AccountQueue; i++ { - if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil { + if err := pool.AddRemoteSync(transaction(i, 100000, key)); err != nil { t.Fatalf("tx %d: failed to add transaction: %v", i, err) } if len(pool.pending) != 0 { @@ -826,7 +826,7 @@ func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) { nonces[addr]++ } // Import the batch and verify that limits have been enforced - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) queued := 0 for addr, list := range pool.queue { @@ -963,7 +963,7 @@ func TestTransactionPendingLimiting(t *testing.T) { // Keep queuing up transactions and make sure all above a limit are dropped for i := uint64(0); i < testTxPoolConfig.AccountQueue; i++ { - if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil { + if err := pool.AddRemoteSync(transaction(i, 100000, key)); err != nil { t.Fatalf("tx %d: failed to add transaction: %v", i, err) } if pool.pending[account].Len() != int(i)+1 { @@ -1019,7 +1019,7 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { } } // Import the batch and verify that limits have been enforced - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) pending := 0 for _, list := range pool.pending { @@ -1101,7 +1101,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) { } } // Import the batch and verify that limits have been enforced - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) for addr, list := range pool.pending { if list.Len() != int(config.AccountSlots) { @@ -1158,7 +1158,7 @@ func TestTransactionPoolRepricing(t *testing.T) { ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3]) // Import the batch and that both pending and queued transactions match up - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) pool.AddLocal(ltx) pending, queued := pool.Stats() @@ -1442,7 +1442,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) { for i := uint64(0); i < config.GlobalSlots; i++ { txs = append(txs, pricedTransaction(i, 100000, big.NewInt(1), keys[0])) } - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) pending, queued := pool.Stats() if pending != int(config.GlobalSlots) { @@ -1458,7 +1458,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding high priced transactions drops a cheap, but doesn't produce a gap - if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { + if err := pool.AddRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { t.Fatalf("failed to add well priced transaction: %v", err) } pending, queued = pool.Stats() @@ -1502,7 +1502,7 @@ func TestTransactionReplacement(t *testing.T) { price := int64(100) threshold := (price * (100 + int64(testTxPoolConfig.PriceBump))) / 100 - if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil { + if err := pool.AddRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil { t.Fatalf("failed to add original cheap pending transaction: %v", err) } if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced { @@ -1515,7 +1515,7 @@ func TestTransactionReplacement(t *testing.T) { t.Fatalf("cheap replacement event firing failed: %v", err) } - if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil { + if err := pool.AddRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil { t.Fatalf("failed to add original proper pending transaction: %v", err) } if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced { @@ -1606,7 +1606,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { if err := pool.AddLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add local transaction: %v", err) } - if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil { + if err := pool.AddRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil { t.Fatalf("failed to add remote transaction: %v", err) } pending, queued := pool.Stats() @@ -1699,7 +1699,7 @@ func TestTransactionStatusCheck(t *testing.T) { txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) // Queued only // Import the transaction and ensure they are correctly added - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) pending, queued := pool.Stats() if pending != 2 { diff --git a/les/handler.go b/les/handler.go index 812c769de54c..6a4ba688ea3b 100644 --- a/les/handler.go +++ b/les/handler.go @@ -91,6 +91,7 @@ type BlockChain interface { type txPool interface { AddRemotes(txs []*types.Transaction) []error + AddRemotesSync(txs []*types.Transaction) []error Status(hashes []common.Hash) []core.TxStatus } From 67825d860be03c4915bde35f82094f080b4c06c8 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Fri, 10 May 2024 19:04:10 +0800 Subject: [PATCH 22/23] core, light, params: implement eip2028 (#19931) --- core/bench_test.go | 2 +- core/state_transition.go | 21 ++++++++++++++------- core/tx_pool.go | 8 +++++++- light/txpool.go | 10 +++++++--- params/protocol_params.go | 37 +++++++++++++++++++------------------ 5 files changed, 48 insertions(+), 30 deletions(-) diff --git a/core/bench_test.go b/core/bench_test.go index 588429240282..25b7faf4aed7 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -85,7 +85,7 @@ func genValueTx(nbytes int) func(int, *BlockGen) { return func(i int, gen *BlockGen) { toaddr := common.Address{} data := make([]byte, nbytes) - gas, _ := IntrinsicGas(data, false, false) + gas, _ := IntrinsicGas(data, false, false, false) tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, nil, data), types.HomesteadSigner{}, benchRootKey) gen.AddTx(tx) } diff --git a/core/state_transition.go b/core/state_transition.go index c9c4fdfefd40..8e9c225c5275 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -42,8 +42,10 @@ The state transitioning model does all all the necessary work to work out a vali 3) Create a new state object if the recipient is \0*32 4) Value transfer == If contract creation == - 4a) Attempt to run transaction data - 4b) If valid, use result as code for the new state object + + 4a) Attempt to run transaction data + 4b) If valid, use result as code for the new state object + == end == 5) Run Script section 6) Derive new state root @@ -77,10 +79,10 @@ type Message interface { } // IntrinsicGas computes the 'intrinsic gas' for a message with the given data. -func IntrinsicGas(data []byte, contractCreation, homestead bool) (uint64, error) { +func IntrinsicGas(data []byte, contractCreation, isEIP155 bool, isEIP2028 bool) (uint64, error) { // Set the starting gas for the raw transaction var gas uint64 - if contractCreation && homestead { + if contractCreation && isEIP155 { gas = params.TxGasContractCreation } else { gas = params.TxGas @@ -95,10 +97,14 @@ func IntrinsicGas(data []byte, contractCreation, homestead bool) (uint64, error) } } // Make sure we don't exceed uint64 for all data combinations - if (math.MaxUint64-gas)/params.TxDataNonZeroGas < nz { + nonZeroGas := params.TxDataNonZeroGasFrontier + if isEIP2028 { + nonZeroGas = params.TxDataNonZeroGasEIP2028 + } + if (math.MaxUint64-gas)/nonZeroGas < nz { return 0, vm.ErrOutOfGas } - gas += nz * params.TxDataNonZeroGas + gas += nz * nonZeroGas z := uint64(len(data)) - nz if (math.MaxUint64-gas)/params.TxDataZeroGas < z { @@ -223,10 +229,11 @@ func (st *StateTransition) TransitionDb(owner common.Address) (ret []byte, usedG sender := st.from() // err checked in preCheck homestead := st.evm.ChainConfig().IsHomestead(st.evm.BlockNumber) + istanbul := st.evm.ChainConfig().IsIstanbul(st.evm.BlockNumber) contractCreation := msg.To() == nil // Pay intrinsic gas - gas, err := IntrinsicGas(st.data, contractCreation, homestead) + gas, err := IntrinsicGas(st.data, contractCreation, homestead, istanbul) if err != nil { return nil, 0, false, err, nil } diff --git a/core/tx_pool.go b/core/tx_pool.go index 275dbbfa9bdf..f097b25501c5 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -237,6 +237,8 @@ type TxPool struct { signer types.Signer mu sync.RWMutex + istanbul bool // Fork indicator whether we are in the istanbul stage. + currentState *state.StateDB // Current state in the blockchain head pendingNonces *txNoncer // Pending state tracking virtual nonces currentMaxGas uint64 // Current gas limit for transaction caps @@ -606,7 +608,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { if tx.To() == nil || (tx.To() != nil && !tx.IsSpecialTransaction()) { // Ensure the transaction has more gas than the basic tx fee. - intrGas, err := IntrinsicGas(tx.Data(), tx.To() == nil, true) + intrGas, err := IntrinsicGas(tx.Data(), tx.To() == nil, true, pool.istanbul) if err != nil { return err } @@ -1258,6 +1260,10 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { log.Debug("Reinjecting stale transactions", "count", len(reinject)) senderCacher.recover(pool.signer, reinject) pool.addTxsLocked(reinject, false) + + // Update all fork indicator by next pending block number. + next := new(big.Int).Add(newHead.Number, big.NewInt(1)) + pool.istanbul = pool.chainconfig.IsIstanbul(next) } // promoteExecutables moves transactions that have become processable from the diff --git a/light/txpool.go b/light/txpool.go index 281af18b2a12..f1bcf3c9f491 100644 --- a/light/txpool.go +++ b/light/txpool.go @@ -19,6 +19,7 @@ package light import ( "context" "fmt" + "math/big" "sync" "time" @@ -66,7 +67,7 @@ type TxPool struct { mined map[common.Hash][]*types.Transaction // mined transactions by block hash clearIdx uint64 // earliest block nr that can contain mined tx info - homestead bool + istanbul bool // Fork indicator whether we are in the istanbul stage. } // TxRelayBackend provides an interface to the mechanism that forwards transacions @@ -310,7 +311,10 @@ func (pool *TxPool) setNewHead(head *types.Header) { txc, _ := pool.reorgOnNewHead(ctx, head) m, r := txc.getLists() pool.relay.NewHead(pool.head, m, r) - pool.homestead = pool.config.IsHomestead(head.Number) + + // Update fork indicator by next pending block number + next := new(big.Int).Add(head.Number, big.NewInt(1)) + pool.istanbul = pool.config.IsIstanbul(next) pool.signer = types.MakeSigner(pool.config, head.Number) } @@ -403,7 +407,7 @@ func (pool *TxPool) validateTx(ctx context.Context, tx *types.Transaction) error } // Should supply enough intrinsic gas - gas, err := core.IntrinsicGas(tx.Data(), tx.To() == nil, pool.homestead) + gas, err := core.IntrinsicGas(tx.Data(), tx.To() == nil, true, pool.istanbul) if err != nil { return err } diff --git a/params/protocol_params.go b/params/protocol_params.go index 93419fc367a5..87491b3ee23d 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -23,10 +23,10 @@ var ( ) const ( - GasLimitBoundDivisor uint64 = 1024 // The bound divisor of the gas limit, used in update calculations. - MinGasLimit uint64 = 5000 // Minimum the gas limit may ever be. + GasLimitBoundDivisor uint64 = 1024 // The bound divisor of the gas limit, used in update calculations. + MinGasLimit uint64 = 5000 // Minimum the gas limit may ever be. MaxGasLimit uint64 = 0x7fffffffffffffff // Maximum the gas limit (2^63-1). - GenesisGasLimit uint64 = 4712388 // Gas limit of the Genesis block. + GenesisGasLimit uint64 = 4712388 // Gas limit of the Genesis block. XDCGenesisGasLimit uint64 = 84000000 MaximumExtraDataSize uint64 = 32 // Maximum size extra data may be after Genesis. @@ -50,17 +50,23 @@ const ( JumpdestGas uint64 = 1 // Refunded gas, once per SSTORE operation if the zeroness changes to zero. EpochDuration uint64 = 30000 // Duration between proof-of-work epochs. CallGas uint64 = 40 // Once per CALL operation & message call transaction. - CreateDataGas uint64 = 200 // - CallCreateDepth uint64 = 1024 // Maximum depth of call/create stack. - ExpGas uint64 = 10 // Once per EXP instruction - LogGas uint64 = 375 // Per LOG* operation. - CopyGas uint64 = 3 // - StackLimit uint64 = 1024 // Maximum size of VM stack allowed. - TierStepGas uint64 = 0 // Once per operation, for a selection of them. - LogTopicGas uint64 = 375 // Multiplied by the * of the LOG*, per LOG transaction. e.g. LOG0 incurs 0 * c_txLogTopicGas, LOG4 incurs 4 * c_txLogTopicGas. - CreateGas uint64 = 32000 // Once per CREATE operation & contract-creation transaction. + + CreateDataGas uint64 = 200 // + CallCreateDepth uint64 = 1024 // Maximum depth of call/create stack. + ExpGas uint64 = 10 // Once per EXP instruction + LogGas uint64 = 375 // Per LOG* operation. + CopyGas uint64 = 3 // + StackLimit uint64 = 1024 // Maximum size of VM stack allowed. + TierStepGas uint64 = 0 // Once per operation, for a selection of them. + LogTopicGas uint64 = 375 // Multiplied by the * of the LOG*, per LOG transaction. e.g. LOG0 incurs 0 * c_txLogTopicGas, LOG4 incurs 4 * c_txLogTopicGas. + CreateGas uint64 = 32000 // Once per CREATE operation & contract-creation transaction. + Create2Gas uint64 = 32000 // Once per CREATE2 operation + SelfdestructRefundGas uint64 = 24000 // Refunded following a selfdestruct operation. + MemoryGas uint64 = 3 // Times the address of the (highest referenced byte in memory + 1). NOTE: referencing happens on read, write and in instructions such as RETURN and CALL. + TxDataNonZeroGasFrontier uint64 = 68 // Per byte of data attached to a transaction that is not equal to zero. NOTE: Not payable on data of calls between transactions. + TxDataNonZeroGasEIP2028 uint64 = 16 // Per byte of non zero data attached to a transaction after EIP 2028 (part in Istanbul) + SuicideRefundGas uint64 = 24000 // Refunded following a suicide operation. - MemoryGas uint64 = 3 // Times the address of the (highest referenced byte in memory + 1). NOTE: referencing happens on read, write and in instructions such as RETURN and CALL. TxDataNonZeroGas uint64 = 68 // Per byte of data attached to a transaction that is not equal to zero. NOTE: Not payable on data of calls between transactions. MaxCodeSize = 24576 // Maximum bytecode to permit for a contract @@ -104,11 +110,6 @@ const ( SstoreResetGasEIP2200 uint64 = 5000 // Once per SSTORE operation from clean non-zero to something else SstoreClearsScheduleRefundEIP2200 uint64 = 15000 // Once per SSTORE operation for clearing an originally existing storage slot - Create2Gas uint64 = 32000 // Once per CREATE2 operation - SelfdestructRefundGas uint64 = 24000 // Refunded following a selfdestruct operation. - TxDataNonZeroGasFrontier uint64 = 68 // Per byte of data attached to a transaction that is not equal to zero. NOTE: Not payable on data of calls between transactions. - TxDataNonZeroGasEIP2028 uint64 = 16 // Per byte of non zero data attached to a transaction after EIP 2028 (part in Istanbul) - // These have been changed during the course of the chain CallGasFrontier uint64 = 40 // Once per CALL operation & message call transaction. CallGasEIP150 uint64 = 700 // Static portion of gas for CALL-derivates after EIP 150 (Tangerine) From 742a7f9348731ea45c54f3e7d84999dcc7f50053 Mon Sep 17 00:00:00 2001 From: Daniel Liu Date: Fri, 10 May 2024 19:25:45 +0800 Subject: [PATCH 23/23] core, metrics: switch some invalid counters to gauges (#20047) --- core/tx_pool.go | 40 ++++++++++++++++++++-------------------- metrics/gauge.go | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 20 deletions(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index f097b25501c5..fab4995c36dc 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -110,9 +110,9 @@ var ( invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) - pendingCounter = metrics.NewRegisteredCounter("txpool/pending", nil) - queuedCounter = metrics.NewRegisteredCounter("txpool/queued", nil) - localCounter = metrics.NewRegisteredCounter("txpool/local", nil) + pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) + queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) + localGauge = metrics.NewRegisteredGauge("txpool/local", nil) ) // TxStatus is the current status of a transaction as seen by the pool. @@ -730,7 +730,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e } } if local || pool.locals.contains(from) { - localCounter.Inc(1) + localGauge.Inc(1) } pool.journalTx(from, tx) @@ -760,7 +760,7 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er queuedReplaceMeter.Mark(1) } else { // Nothing was replaced, bump the queued counter - queuedCounter.Inc(1) + queuedGauge.Inc(1) } if pool.all.Get(hash) == nil { pool.all.Add(tx) @@ -809,7 +809,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T pendingReplaceMeter.Mark(1) } else { // Nothing was replaced, bump the pending counter - pendingCounter.Inc(1) + pendingGauge.Inc(1) } // Failsafe to work around direct pending inserts (tests) if pool.all.Get(hash) == nil { @@ -840,7 +840,7 @@ func (pool *TxPool) promoteSpecialTx(addr common.Address, tx *types.Transaction) pendingReplaceMeter.Mark(1) } else { // Nothing was replaced, bump the pending counter - pendingCounter.Inc(1) + pendingGauge.Inc(1) } list.txs.Put(tx) if cost := tx.Cost(); list.costcap.Cmp(cost) < 0 { @@ -980,7 +980,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { pool.priced.Removed(1) } if pool.locals.contains(addr) { - localCounter.Dec(1) + localGauge.Dec(1) } // Remove the transaction from the pending lists and reset the account nonce if pending := pool.pending[addr]; pending != nil { @@ -997,7 +997,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { // Update the account nonce if needed pool.pendingNonces.setIfLower(addr, tx.Nonce()) // Reduce the pending counter - pendingCounter.Dec(int64(1 + len(invalids))) + pendingGauge.Dec(int64(1 + len(invalids))) return } } @@ -1005,7 +1005,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { if future := pool.queue[addr]; future != nil { if removed, _ := future.Remove(tx); removed { // Reduce the queued counter - queuedCounter.Dec(1) + queuedGauge.Dec(1) } if future.Empty() { delete(pool.queue, addr) @@ -1313,7 +1313,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans promoted = append(promoted, tx) } } - queuedCounter.Dec(int64(len(readies))) + queuedGauge.Dec(int64(len(readies))) // Drop all transactions over the allowed limit var caps types.Transactions @@ -1328,9 +1328,9 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans } // Mark all the items dropped as removed pool.priced.Removed(len(forwards) + len(drops) + len(caps)) - queuedCounter.Dec(int64(len(forwards) + len(drops) + len(caps))) + queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) if pool.locals.contains(addr) { - localCounter.Dec(int64(len(forwards) + len(drops) + len(caps))) + localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) } // Delete the entire queue entry if it became empty. if list.Empty() { @@ -1389,9 +1389,9 @@ func (pool *TxPool) truncatePending() { log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } pool.priced.Removed(len(caps)) - pendingCounter.Dec(int64(len(caps))) + pendingGauge.Dec(int64(len(caps))) if pool.locals.contains(offenders[i]) { - localCounter.Dec(int64(len(caps))) + localGauge.Dec(int64(len(caps))) } pending-- } @@ -1416,9 +1416,9 @@ func (pool *TxPool) truncatePending() { log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } pool.priced.Removed(len(caps)) - pendingCounter.Dec(int64(len(caps))) + pendingGauge.Dec(int64(len(caps))) if pool.locals.contains(addr) { - localCounter.Dec(int64(len(caps))) + localGauge.Dec(int64(len(caps))) } pending-- } @@ -1506,9 +1506,9 @@ func (pool *TxPool) demoteUnexecutables() { log.Trace("Demoting pending transaction", "hash", hash) pool.enqueueTx(hash, tx) } - pendingCounter.Dec(int64(len(olds) + len(drops) + len(invalids))) + pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) if pool.locals.contains(addr) { - localCounter.Dec(int64(len(olds) + len(drops) + len(invalids))) + localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) } // If there's a gap in front, alert (should never happen) and postpone all transactions if list.Len() > 0 && list.txs.Get(nonce) == nil { @@ -1518,7 +1518,7 @@ func (pool *TxPool) demoteUnexecutables() { log.Warn("Demoting invalidated transaction", "hash", hash) pool.enqueueTx(hash, tx) } - pendingCounter.Dec(int64(len(gapped))) + pendingGauge.Dec(int64(len(gapped))) } // Delete the entire queue entry if it became empty. if list.Empty() { diff --git a/metrics/gauge.go b/metrics/gauge.go index 0fbfdb86033b..b6b2758b0d13 100644 --- a/metrics/gauge.go +++ b/metrics/gauge.go @@ -6,6 +6,8 @@ import "sync/atomic" type Gauge interface { Snapshot() Gauge Update(int64) + Dec(int64) + Inc(int64) Value() int64 } @@ -65,6 +67,16 @@ func (GaugeSnapshot) Update(int64) { panic("Update called on a GaugeSnapshot") } +// Dec panics. +func (GaugeSnapshot) Dec(int64) { + panic("Dec called on a GaugeSnapshot") +} + +// Inc panics. +func (GaugeSnapshot) Inc(int64) { + panic("Inc called on a GaugeSnapshot") +} + // Value returns the value at the time the snapshot was taken. func (g GaugeSnapshot) Value() int64 { return int64(g) } @@ -77,6 +89,12 @@ func (NilGauge) Snapshot() Gauge { return NilGauge{} } // Update is a no-op. func (NilGauge) Update(v int64) {} +// Dec is a no-op. +func (NilGauge) Dec(i int64) {} + +// Inc is a no-op. +func (NilGauge) Inc(i int64) {} + // Value is a no-op. func (NilGauge) Value() int64 { return 0 } @@ -101,6 +119,16 @@ func (g *StandardGauge) Value() int64 { return atomic.LoadInt64(&g.value) } +// Dec decrements the gauge's current value by the given amount. +func (g *StandardGauge) Dec(i int64) { + atomic.AddInt64(&g.value, -i) +} + +// Inc increments the gauge's current value by the given amount. +func (g *StandardGauge) Inc(i int64) { + atomic.AddInt64(&g.value, i) +} + // FunctionalGauge returns value from given function type FunctionalGauge struct { value func() int64 @@ -118,3 +146,13 @@ func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) } func (FunctionalGauge) Update(int64) { panic("Update called on a FunctionalGauge") } + +// Dec panics. +func (FunctionalGauge) Dec(int64) { + panic("Dec called on a FunctionalGauge") +} + +// Inc panics. +func (FunctionalGauge) Inc(int64) { + panic("Inc called on a FunctionalGauge") +}