Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions consensus/bor/bor.go
Original file line number Diff line number Diff line change
Expand Up @@ -1333,6 +1333,7 @@ func (c *Bor) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *typ
)

if IsSprintStart(headerNumber, c.config.CalculateSprint(headerNumber)) {
borStart := time.Now()
cx := statefull.ChainContext{Chain: chain, Bor: c}

// check and commit span
Expand All @@ -1351,6 +1352,7 @@ func (c *Bor) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *typ
return nil, nil, 0, err
}
}
state.BorConsensusTime = time.Since(borStart)
}

if err = c.changeContractCodeIfNeeded(headerNumber, state); err != nil {
Expand Down
8 changes: 8 additions & 0 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -1139,6 +1139,14 @@ func (s *StateDB) Copy() *StateDB {
logSize: s.logSize,
preimages: maps.Clone(s.preimages),

// Timing fields — must be carried over so metrics in resultLoop see
// the values accumulated during fillTransactions, not zero.
AccountReads: s.AccountReads,
StorageReads: s.StorageReads,
SnapshotAccountReads: s.SnapshotAccountReads,
SnapshotStorageReads: s.SnapshotStorageReads,
BorConsensusTime: s.BorConsensusTime,

// Do we need to copy the access list and transient storage?
// In practice: No. At the start of a transaction, these two lists are empty.
// In practice, we only ever copy state _between_ transactions/blocks, never
Expand Down
106 changes: 100 additions & 6 deletions miner/worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,48 @@ var (
nil,
metrics.NewExpDecaySample(1028, 0.015),
)

// Trie read/hash/execution metrics for block production (mirroring blockchain.go import path).
// Namespaced under worker/chain/ to distinguish from import-path chain/ metrics.
workerAccountReadTimer = metrics.NewRegisteredResettingTimer("worker/chain/account/reads", nil)
workerStorageReadTimer = metrics.NewRegisteredResettingTimer("worker/chain/storage/reads", nil)
workerSnapshotAccountReadTimer = metrics.NewRegisteredResettingTimer("worker/chain/snapshot/account/reads", nil)
workerSnapshotStorageReadTimer = metrics.NewRegisteredResettingTimer("worker/chain/snapshot/storage/reads", nil)
workerAccountUpdateTimer = metrics.NewRegisteredResettingTimer("worker/chain/account/updates", nil)
workerStorageUpdateTimer = metrics.NewRegisteredResettingTimer("worker/chain/storage/updates", nil)
workerAccountHashTimer = metrics.NewRegisteredResettingTimer("worker/chain/account/hashes", nil)
workerStorageHashTimer = metrics.NewRegisteredTimer("worker/chain/storage/hashes", nil)
workerBorConsensusTimer = metrics.NewRegisteredTimer("worker/chain/bor/consensus", nil)
workerBlockExecutionTimer = metrics.NewRegisteredTimer("worker/chain/execution", nil)
workerMgaspsTimer = metrics.NewRegisteredResettingTimer("worker/chain/mgasps", nil)

// Trie commit metrics for block production (populated after WriteBlockAndSetHead → CommitWithUpdate).
workerAccountCommitTimer = metrics.NewRegisteredResettingTimer("worker/chain/account/commits", nil)
workerStorageCommitTimer = metrics.NewRegisteredResettingTimer("worker/chain/storage/commits", nil)
workerSnapshotCommitTimer = metrics.NewRegisteredResettingTimer("worker/chain/snapshot/commits", nil)
workerTriedbCommitTimer = metrics.NewRegisteredResettingTimer("worker/chain/triedb/commits", nil)
workerWitnessCollectionTimer = metrics.NewRegisteredTimer("worker/chain/witness/collection", nil)
)

// firstNonZeroTime returns a if non-zero, otherwise b.
func firstNonZeroTime(a, b time.Time) time.Time {
if !a.IsZero() {
return a
}
return b
}

// productionStartFrom extracts the productionStart time from genParams.
// Returns zero time if genParams is nil, matching the guarded access pattern
// already used elsewhere in commit() (e.g. the genParams != nil check at the
// prefetch coverage block).
func productionStartFrom(genParams *generateParams) time.Time {
if genParams == nil {
return time.Time{}
}
return genParams.productionStart
}

func newRegisteredCustomTimer(name string, reservoirSize int) *metrics.Timer {
return metrics.GetOrRegister(name, func() interface{} {
return metrics.NewCustomTimer(
Expand Down Expand Up @@ -217,10 +257,12 @@ func (env *environment) discard() {

// task contains all information for consensus engine sealing and result submitting.
type task struct {
receipts []*types.Receipt
state *state.StateDB
block *types.Block
createdAt time.Time
receipts []*types.Receipt
state *state.StateDB
block *types.Block
createdAt time.Time
productionElapsed time.Duration // elapsed from after prepareWork to task submission (excludes sealing wait); used for workerMgaspsTimer and workerBlockExecutionTimer
intermediateRootTime time.Duration // time spent in IntermediateRoot inside FinalizeAndAssemble; subtracted when computing workerBlockExecutionTimer
}

// txFits reports whether the transaction fits into the block size limit.
Expand Down Expand Up @@ -1030,10 +1072,39 @@ func (w *worker) resultLoop() {
witness.SetHeader(block.Header())
}

// Execution metrics: emitted before write because these values are final after
// FinalizeAndAssemble and do not depend on write success — matching the import path
// which emits read/update/hash/execution/bor metrics before writeBlockAndSetHead.
// Emitting here avoids losing these observations on a rare write failure.
if metrics.Enabled() {
workerAccountReadTimer.Update(task.state.AccountReads)
workerStorageReadTimer.Update(task.state.StorageReads)
workerSnapshotAccountReadTimer.Update(task.state.SnapshotAccountReads)
workerSnapshotStorageReadTimer.Update(task.state.SnapshotStorageReads)
workerAccountUpdateTimer.Update(task.state.AccountUpdates)
workerStorageUpdateTimer.Update(task.state.StorageUpdates)
workerAccountHashTimer.Update(task.state.AccountHashes)
workerStorageHashTimer.Update(task.state.StorageHashes)
workerBorConsensusTimer.Update(task.state.BorConsensusTime)
trieRead := task.state.SnapshotAccountReads + task.state.AccountReads +
task.state.SnapshotStorageReads + task.state.StorageReads
// productionElapsed covers fillTx + FinalizeAndAssemble; subtract trie reads,
// Bor consensus time, and IntermediateRoot time to isolate pure EVM execution time.
// Mirrors the import path formula in blockchain.go (writeBlockAndSetHead),
// where ptime already excludes vtime (IntermediateRoot) via explicit subtraction.
// Clamped to zero to avoid negative histogram samples from measurement jitter.
execTime := task.productionElapsed - trieRead - task.state.BorConsensusTime - task.intermediateRootTime
if execTime < 0 {
execTime = 0
}
workerBlockExecutionTimer.Update(execTime)
}

// Commit block and state to database.
writeStart := time.Now()
_, err = w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, true)
writeBlockAndSetHeadTimer.Update(time.Since(writeStart))
writeElapsed := time.Since(writeStart)
writeBlockAndSetHeadTimer.Update(writeElapsed)

if err != nil {
log.Error("Failed writing block to chain", "err", err)
Expand All @@ -1044,6 +1115,25 @@ func (w *worker) resultLoop() {
continue
}

// Commit metrics: emitted only after a successful write because these values are
// populated by WriteBlockAndSetHead → CommitWithUpdate. Emitting on failure would
// record zeroes or stale data — matching the import path which also gates commit
// metrics after a successful writeBlockAndSetHead.
if metrics.Enabled() {
workerAccountCommitTimer.Update(task.state.AccountCommits)
workerStorageCommitTimer.Update(task.state.StorageCommits)
workerSnapshotCommitTimer.Update(task.state.SnapshotCommits)
workerTriedbCommitTimer.Update(task.state.TrieDBCommits)
workerWitnessCollectionTimer.Update(task.state.WitnessCollection)

// MGas/s: denominator includes both production and write time, matching blockchain.go
// which measures elapsed after writeBlockAndSetHead returns
// (gas * 1000 / elapsed_nanoseconds stores milli-gas/ns = MGas/s as a Duration value).
if total := task.productionElapsed + writeElapsed; total > 0 {
workerMgaspsTimer.Update(time.Duration(float64(block.GasUsed()) * 1000 / float64(total)))
}
}

log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash,
"elapsed", common.PrettyDuration(time.Since(task.createdAt)))

Expand Down Expand Up @@ -1555,6 +1645,7 @@ type generateParams struct {
prefetchReader state.ReaderWithStats // The prefetch reader to use for statistics
processReader state.ReaderWithStats // The process reader to use for statistics
prefetchedTxHashes *sync.Map // Map of successfully prefetched transaction hashes
productionStart time.Time // Start of full-block building (after optional empty pre-seal); used for productionElapsed
}

// makeHeader creates a new block header for sealing.
Expand Down Expand Up @@ -1927,6 +2018,9 @@ func (w *worker) buildAndCommitBlock(interrupt *atomic.Int32, noempty bool, genP
emptyWork.state.ResetPrefetcher()
_ = w.commit(emptyWork, nil, false, start, genParams)
}
// Mark the start of full-block building. Set after the optional empty pre-seal commit so that
// productionElapsed for the full block does not include empty-block overhead.
genParams.productionStart = time.Now()
// Fill pending transactions from the txpool into the block.
err = w.fillTransactions(interrupt, work)

Expand Down Expand Up @@ -2214,7 +2308,7 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti
}

select {
case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now()}:
case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now(), productionElapsed: time.Since(firstNonZeroTime(productionStartFrom(genParams), start)), intermediateRootTime: commitTime}:
fees := totalFees(block, env.receipts)
feesInEther := new(big.Float).Quo(new(big.Float).SetInt(fees), big.NewFloat(params.Ether))
log.Info("Commit new sealing work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()),
Expand Down
Loading