-
Notifications
You must be signed in to change notification settings - Fork 23
Feature/validator.ValidateMulti() #405
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
01cfe87
8cf46d1
83e1826
8c35a03
19692bc
53bffdc
d7fcb92
b49be80
4285936
582a5f1
bb82c32
f118cfd
2ae9b2c
cfb0da5
c79f76f
0c909ac
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -681,21 +681,33 @@ func (b *Block) validOrderAndBlessed(ctx context.Context, logger ulogger.Logger, | |
| parentSpendsMap: NewSplitSyncedParentMap(4096), | ||
| } | ||
|
|
||
| concurrency := b.getValidationConcurrency(validOrderAndBlessedConcurrency) | ||
| g, gCtx := errgroup.WithContext(ctx) | ||
| util.SafeSetLimit(g, concurrency) | ||
| // Calculate optimal worker count for I/O-bound subtree validation | ||
| numWorkers := getOptimalSubtreeWorkerCount(len(b.SubtreeSlices), validOrderAndBlessedConcurrency) | ||
|
|
||
| for sIdx := 0; sIdx < len(b.SubtreeSlices); sIdx++ { | ||
| subtree := b.SubtreeSlices[sIdx] | ||
| sIdx := sIdx | ||
| // Create worker pool with parent context for proper cancellation/tracing | ||
| pool := newSubtreeWorkerPool(ctx, b, numWorkers, len(b.SubtreeSlices), logger, deps, validationCtx) | ||
| pool.Start() | ||
|
|
||
| g.Go(func() error { | ||
| return b.validateSubtree(gCtx, logger, deps, validationCtx, subtree, sIdx) | ||
| // Submit all subtrees as jobs to the worker pool | ||
| for sIdx := 0; sIdx < len(b.SubtreeSlices); sIdx++ { | ||
| pool.Submit(subtreeValidationJob{ | ||
| subtreeIndex: sIdx, | ||
| subtree: b.SubtreeSlices[sIdx], | ||
| }) | ||
| } | ||
|
|
||
| // do not wrap the error again, the error is already wrapped | ||
| return g.Wait() | ||
| // Wait for all validations to complete | ||
| pool.Close() | ||
|
|
||
| // Check for validation errors | ||
| for _, result := range pool.results { | ||
| if result.err != nil { | ||
| // Do not wrap the error again, the error is already wrapped | ||
| return result.err | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Critical: Early return on first error loses other validation errors The current implementation returns immediately on the first subtree validation error found. This means if multiple subtrees have validation errors, only the first one discovered (which could be any due to concurrent execution) will be reported. This makes debugging harder because developers won't see all the validation failures at once. Consider either:
The previous errgroup implementation had the same behavior, but it's worth considering if this is the desired outcome for block validation. |
||
| } | ||
| } | ||
|
|
||
| return nil | ||
| } | ||
|
|
||
| func (b *Block) validateSubtree(ctx context.Context, logger ulogger.Logger, deps *validationDependencies, | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Significant Aerospike tuning changes without justification
This PR changes several critical Aerospike performance parameters:
These changes significantly affect UTXO store write performance and memory usage. While the PR title mentions ValidateMulti(), the Aerospike tuning seems unrelated to that feature.
Questions: