Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
99 changes: 0 additions & 99 deletions .github/workflows/go.yml

This file was deleted.

1 change: 1 addition & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ var (
utils.TxPoolNoLocalsFlag,
utils.TxPoolJournalFlag,
utils.TxPoolRejournalFlag,
utils.TxPoolBroadcastPendingLocalTxFlag,
utils.TxPoolPriceLimitFlag,
utils.TxPoolPriceBumpFlag,
utils.TxPoolAccountSlotsFlag,
Expand Down
15 changes: 11 additions & 4 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -404,11 +404,15 @@ var (
Value: ethconfig.Defaults.TxPool.Rejournal,
Category: flags.TxPoolCategory,
}
TxPoolBroadcastPendingLocalTxFlag = &cli.DurationFlag{
Name: "txpool.broadcastpendinglocaltx",
Usage: "Time interval to broadcast the pending local transaction",
Value: legacypool.DefaultConfig.BroadcastPendingLocalTx,
}
TxPoolPriceLimitFlag = &cli.Uint64Flag{
Name: "txpool.pricelimit",
Usage: "Minimum gas price tip to enforce for acceptance into the pool",
Value: ethconfig.Defaults.TxPool.PriceLimit,
Category: flags.TxPoolCategory,
Name: "txpool.pricelimit",
Usage: "Minimum gas price tip to enforce for acceptance into the pool",
Value: ethconfig.Defaults.TxPool.PriceLimit,
}
TxPoolPriceBumpFlag = &cli.Uint64Flag{
Name: "txpool.pricebump",
Expand Down Expand Up @@ -1516,6 +1520,9 @@ func setTxPool(ctx *cli.Context, cfg *legacypool.Config) {
if ctx.IsSet(TxPoolRejournalFlag.Name) {
cfg.Rejournal = ctx.Duration(TxPoolRejournalFlag.Name)
}
if ctx.IsSet(TxPoolBroadcastPendingLocalTxFlag.Name) {
cfg.BroadcastPendingLocalTx = ctx.Duration(TxPoolBroadcastPendingLocalTxFlag.Name)
}
if ctx.IsSet(TxPoolPriceLimitFlag.Name) {
cfg.PriceLimit = ctx.Uint64(TxPoolPriceLimitFlag.Name)
}
Expand Down
61 changes: 37 additions & 24 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,10 +118,11 @@ var (
)

const (
bodyCacheLimit = 256
blockCacheLimit = 256
receiptsCacheLimit = 32
txLookupCacheLimit = 1024
bodyCacheLimit = 256
blockCacheLimit = 256
receiptsCacheLimit = 32
transferLogsCacheLimit = 32
txLookupCacheLimit = 1024

// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
//
Expand Down Expand Up @@ -319,10 +320,11 @@ type BlockChain struct {
currentSafeBlock atomic.Pointer[types.Header] // Latest (consensus) safe block
historyPrunePoint atomic.Pointer[history.PrunePoint]

bodyCache *lru.Cache[common.Hash, *types.Body]
bodyRLPCache *lru.Cache[common.Hash, rlp.RawValue]
receiptsCache *lru.Cache[common.Hash, []*types.Receipt] // Receipts cache with all fields derived
blockCache *lru.Cache[common.Hash, *types.Block]
bodyCache *lru.Cache[common.Hash, *types.Body]
bodyRLPCache *lru.Cache[common.Hash, rlp.RawValue]
receiptsCache *lru.Cache[common.Hash, []*types.Receipt] // Receipts cache with all fields derived
transferLogsCache *lru.Cache[common.Hash, []*types.TransferLog] // Cache for the most recent receipts per block
blockCache *lru.Cache[common.Hash, *types.Block]

txLookupLock sync.RWMutex
txLookupCache *lru.Cache[common.Hash, txLookup]
Expand Down Expand Up @@ -372,19 +374,20 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
log.Info("")

bc := &BlockChain{
chainConfig: chainConfig,
cfg: cfg,
db: db,
triedb: triedb,
triegc: prque.New[int64, common.Hash](nil),
chainmu: syncx.NewClosableMutex(),
bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit),
bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit),
receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit),
blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit),
engine: engine,
logger: cfg.VmConfig.Tracer,
chainConfig: chainConfig,
cfg: cfg,
db: db,
triedb: triedb,
triegc: prque.New[int64, common.Hash](nil),
chainmu: syncx.NewClosableMutex(),
bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit),
bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit),
receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit),
blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit),
transferLogsCache: lru.NewCache[common.Hash, []*types.TransferLog](transferLogsCacheLimit),
engine: engine,
logger: cfg.VmConfig.Tracer,
}
bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped)
if err != nil {
Expand Down Expand Up @@ -1068,6 +1071,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
// removed by the hc.SetHead function.
rawdb.DeleteBody(db, hash, num)
rawdb.DeleteReceipts(db, hash, num)
rawdb.DeleteTransferLogs(db, hash, num)
}
// Todo(rjl493456442) txlookup, log index, etc
}
Expand All @@ -1092,6 +1096,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
bc.bodyCache.Purge()
bc.bodyRLPCache.Purge()
bc.receiptsCache.Purge()
bc.transferLogsCache.Purge()
bc.blockCache.Purge()
bc.txLookupCache.Purge()

Expand Down Expand Up @@ -1425,7 +1430,11 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Ensure genesis is in the ancient store
if blockChain[0].NumberU64() == 1 {
if frozen, _ := bc.db.Ancients(); frozen == 0 {
writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{bc.genesisBlock}, []rlp.RawValue{rlp.EmptyList})
tfLogs, err := rawdb.ReadTransferLogs(bc.db, bc.genesisBlock.Hash(), frozen)
if err != nil {
return 0, err
}
writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{bc.genesisBlock}, []rlp.RawValue{rlp.EmptyList}, tfLogs)
if err != nil {
log.Error("Error writing genesis to ancients", "err", err)
return 0, err
Expand All @@ -1435,7 +1444,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
}
}
// Write all chain data to ancients.
writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain)
writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, nil)
if err != nil {
log.Error("Error importing chain data to ancients", "err", err)
return 0, err
Expand Down Expand Up @@ -1495,6 +1504,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
rawdb.WriteBlock(batch, block)
rawdb.WriteRawReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])

// We don't have transfer logs for fast sync blocks
rawdb.WriteMissingTransferLogs(batch, block.Hash(), block.NumberU64())

// Write everything belongs to the blocks into the database. So that
// we can ensure all components of body is completed(body, receipts)
// except transaction indexes(will be created once sync is finished).
Expand Down Expand Up @@ -1599,6 +1611,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
blockBatch := bc.db.NewBatch()
rawdb.WriteBlock(blockBatch, block)
rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
rawdb.WriteTransferLogs(blockBatch, block.Hash(), block.NumberU64(), statedb.TransferLogs())
rawdb.WritePreimages(blockBatch, statedb.Preimages())
if err := blockBatch.Write(); err != nil {
log.Crit("Failed to write block into disk", "err", err)
Expand Down Expand Up @@ -2771,7 +2784,7 @@ func (bc *BlockChain) InsertHeadersBeforeCutoff(headers []*types.Header) (int, e
first = headers[0].Number.Uint64()
)
if first == 1 && frozen == 0 {
_, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{bc.genesisBlock}, []rlp.RawValue{rlp.EmptyList})
_, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{bc.genesisBlock}, []rlp.RawValue{rlp.EmptyList}, nil)
if err != nil {
log.Error("Error writing genesis to ancients", "err", err)
return 0, err
Expand Down
13 changes: 10 additions & 3 deletions core/blockchain_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ import (
"fmt"
"math/big"

"github.com/ethereum/go-ethereum"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
Expand Down Expand Up @@ -524,10 +526,15 @@ func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscr
}

// GetTransferLogs retrieves the transfer logs for all transactions in a given block.
func (bc *BlockChain) GetTransferLogs(hash common.Hash) []*types.TransferLog {
func (bc *BlockChain) GetTransferLogs(hash common.Hash) ([]*types.TransferLog, error) {
number, ok := rawdb.ReadHeaderNumber(bc.db, hash)
if !ok {
return nil
return nil, ethereum.NotFound
}
transferLogs, err := rawdb.ReadTransferLogs(bc.db, hash, number)
if err != nil {
return nil, err
}
return rawdb.ReadTransferLogs(bc.db, hash, number)
bc.transferLogsCache.Add(hash, transferLogs)
return transferLogs, nil
}
6 changes: 6 additions & 0 deletions core/events.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,12 @@ import (
// NewTxsEvent is posted when a batch of transactions enter the transaction pool.
type NewTxsEvent struct{ Txs []*types.Transaction }

// PendingLocalTxsEvent is posted when there are pending local transactions in the transaction pool.
type PendingLocalTxsEvent struct{ Txs []*types.Transaction }

// NewQueuedTxsEvent is posted when a batch of transactions enter the transaction pool.
type NewQueuedTxsEvent struct{ Txs []*types.Transaction }

// RemovedLogsEvent is posted when a reorg happens
type RemovedLogsEvent struct{ Logs []*types.Log }

Expand Down
1 change: 1 addition & 0 deletions core/genesis.go
Original file line number Diff line number Diff line change
Expand Up @@ -567,6 +567,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Blo
rawdb.WriteGenesisStateSpec(batch, block.Hash(), blob)
rawdb.WriteBlock(batch, block)
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), nil)
rawdb.WriteTransferLogs(db, block.Hash(), block.NumberU64(), nil)
rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
rawdb.WriteHeadBlockHash(batch, block.Hash())
rawdb.WriteHeadFastBlockHash(batch, block.Hash())
Expand Down
Loading
Loading