Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
2fd04ba
feat(txmetacache): implement Clock algorithm for 90% cache retention
freemans13 Feb 4, 2026
da24200
fix(txmetacache): prevent infinite loop in Clock eviction
freemans13 Feb 4, 2026
f84229c
fix(txmetacache): bound Clock sweep to prevent multi-second stalls
freemans13 Feb 4, 2026
2996a5c
Switch to clock txmetacache
freemans13 Feb 4, 2026
289226f
test(txmetacache): fix Clock test timeout and add sweep benchmark
freemans13 Feb 4, 2026
81b3d05
refactor(txmetacache): simplify Clock implementation and improve obse…
freemans13 Feb 4, 2026
92157c7
fix(txmetacache): make Clock listChunks() consistent with other buckets
freemans13 Feb 4, 2026
28a326d
fix(txmetacache): skip 4GB Clock test with race detector to prevent t…
freemans13 Feb 4, 2026
4f073c2
style(txmetacache): add blank line before BenchmarkCacheSweepThroughput
freemans13 Feb 4, 2026
70d339d
fix(txmetacache): use 1GB for Clock retention test with race detector
freemans13 Feb 4, 2026
eb52ecf
Merge branch 'main' of https://github.com/bsv-blockchain/teranode int…
freemans13 Feb 4, 2026
3c33b71
refactor(txmetacache): remove unused forcedEvictions tracking from Cl…
freemans13 Feb 4, 2026
c28f28e
feat(txmetacache): add configurable bucket type setting for runtime a…
freemans13 Feb 4, 2026
da5fe9a
fix(txmetacache): adjust Clock capacity and update docs to be honest …
freemans13 Feb 4, 2026
1dd2f94
style(txmetacache): fix gci struct field alignment in Stats
freemans13 Feb 4, 2026
451f15e
fix(txmetacache): decrement count in Del() and prioritize empty slots…
freemans13 Feb 4, 2026
ce2686b
docs(txmetacache): document Clock race condition and expose forcedEvi…
freemans13 Feb 4, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions services/subtreevalidation/Server.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,11 +227,12 @@ func New(

// create a caching tx meta store
if tSettings.SubtreeValidation.TxMetaCacheEnabled {
logger.Infof("Using cached version of tx meta store")
bucketType := txmetacache.ParseBucketType(tSettings.SubtreeValidation.TxMetaCacheBucketType)
logger.Infof("Using cached version of tx meta store (algorithm: %s)", bucketType.String())

var err error

u.utxoStore, err = txmetacache.NewTxMetaCache(ctx, tSettings, logger, utxoStore, txmetacache.Unallocated)
u.utxoStore, err = txmetacache.NewTxMetaCache(ctx, tSettings, logger, utxoStore, bucketType)
if err != nil {
logger.Errorf("Failed to create tx meta cache: %v", err)
}
Expand Down
5 changes: 5 additions & 0 deletions settings.conf
Original file line number Diff line number Diff line change
Expand Up @@ -1135,6 +1135,11 @@ tracing_enabled.dev = false
# txMetaCacheMaxMB = 32768 # 32GGB
txMetaCacheMaxMB = 1024 # 1GB

# Cache implementation algorithm (Clock, Unallocated, Preallocated, Trimmed)
# Clock (default) provides 90-95% retention with minimal overhead
# Unallocated provides 50% retention for memory-constrained environments
txMetaCacheBucketType = Clock

txMetaCacheTrimRatio = 5

# Used by tx blaster to receive rejected txs
Expand Down
1 change: 1 addition & 0 deletions settings/settings.go
Original file line number Diff line number Diff line change
Expand Up @@ -520,6 +520,7 @@ func NewSettings(alternativeContext ...string) *Settings {
SubtreeDAHConcurrency: getInt("subtreevalidation_subtreeDAHConcurrency", 8, alternativeContext...),
TxMetaCacheEnabled: getBool("subtreevalidation_txMetaCacheEnabled", true, alternativeContext...),
TxMetaCacheMaxMB: getInt("txMetaCacheMaxMB", 256, alternativeContext...),
TxMetaCacheBucketType: getString("txMetaCacheBucketType", "Clock", alternativeContext...),
TxChanBufferSize: getInt("subtreevalidation_txChanBufferSize", 0, alternativeContext...),
BatchMissingTransactions: getBool("subtreevalidation_batch_missing_transactions", true, alternativeContext...),
SpendBatcherSize: getInt("subtreevalidation_spendBatcherSize", 1024, alternativeContext...),
Expand Down
1 change: 1 addition & 0 deletions settings/subtreevalidation_settings.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ type SubtreeValidationSettings struct {
SubtreeDAHConcurrency int `key:"subtreevalidation_subtreeDAHConcurrency" desc:"Concurrency for subtree DAH operations" default:"8" category:"SubtreeValidation" usage:"Parallel DAH operations for subtrees" type:"int" longdesc:"### Purpose\nControls the number of parallel workers for Delete-At-Height (DAH) operations on subtrees.\n\n### How It Works\nDAH marks subtrees for eventual pruning at a specified block height. Multiple workers can update DAH markers concurrently to improve throughput.\n\n### Trade-offs\n| Setting | Benefit | Drawback |\n|---------|---------|----------|\n| Higher | Faster DAH updates | More storage I/O |\n| Lower | Reduced storage load | Slower DAH processing |\n\n### Recommendations\n- **8** (default) - Good balance for most storage systems\n- Increase for high-IOPS storage systems"`
TxMetaCacheEnabled bool `key:"subtreevalidation_txMetaCacheEnabled" desc:"Enable transaction metadata caching" default:"true" category:"SubtreeValidation" usage:"Improves validation performance" type:"bool" longdesc:"### Purpose\nEnables in-memory caching of transaction metadata for faster validation.\n\n### How It Works\n- Cache is populated from the Kafka txmeta topic\n- Cache hits avoid expensive database lookups to the UTXO store\n- Essential for achieving high-throughput validation\n\n### Values\n- **true** (default) - Enable caching for production performance\n- **false** - Disable caching (debugging or extreme memory constraints only)\n\n### Recommendations\n- Keep enabled for all production deployments\n- Only disable for debugging cache-related issues"`
TxMetaCacheMaxMB int `key:"txMetaCacheMaxMB" desc:"Maximum memory for transaction metadata cache" default:"256" category:"SubtreeValidation" usage:"Increase for better validation performance" type:"int" longdesc:"### Purpose\nSets the maximum memory in megabytes for the transaction metadata cache.\n\n### How It Works\nLarger cache size improves hit rate by retaining more transaction metadata, reducing database lookups during subtree validation.\n\n### Trade-offs\n| Setting | Benefit | Drawback |\n|---------|---------|----------|\n| Higher | Better cache hit rate | More memory usage |\n| Lower | Less memory usage | More database queries |\n\n### Recommendations\n- **256** (default) - Suitable for most deployments\n- Increase for high-throughput nodes with available memory\n- Monitor cache hit rate metrics to optimize"`
TxMetaCacheBucketType string `key:"txMetaCacheBucketType" desc:"Cache implementation algorithm" default:"Clock" category:"SubtreeValidation" usage:"Algorithm for cache eviction (Clock, Unallocated, Preallocated, Trimmed)" type:"string" longdesc:"### Purpose\nSelects the cache implementation algorithm for transaction metadata storage and eviction.\n\n### How It Works\nDifferent algorithms provide different trade-offs between retention rate, memory usage, and performance:\n\n### Available Algorithms\n| Algorithm | Retention | Memory Per Entry | Total Memory | Use Case |\n|-----------|-----------|------------------|--------------|----------|\n| **Clock** (default) | 90-95% | ~240 bytes | ~Same as config | Production - best retention |\n| **Unallocated** | 50% | ~212 bytes | ~Same as config | Baseline - fallback option |\n| **Preallocated** | 50% | ~212 bytes | Exact config | Predictable upfront allocation |\n| **Trimmed** | Variable | ~212 bytes | ~Same as config | Long-running with trimming |\n\n### Values\n- **Clock** (default) - Second-chance LRU algorithm with 90-95% retention\n- **Unallocated** - On-demand memory allocation, 50% retention at capacity\n- **Preallocated** - Upfront memory allocation, 50% retention at capacity\n- **Trimmed** - On-demand with periodic trimming, variable retention\n\n### Memory Overhead Explanation\nClock uses ~240 bytes/entry vs Unallocated's ~212 bytes/entry (+13% per entry) due to:\n- Preallocated slot structure (40 bytes: hash, slice header, accessed bit, padding)\n- However, capacity is adjusted so **total memory** for both algorithms is comparable\n- Example: 256MB config → Clock uses ~278MB, Unallocated uses ~275MB\n- Clock trades 13% more bytes/entry for 80% better retention (90% vs 50%)\n\n### Recommendations\n- **Clock** (default) - Recommended for production: better retention, similar total memory\n- **Unallocated** - Fallback if Clock has unexpected issues\n- **Safe to switch** between algorithms - total memory usage is comparable\n- This setting allows rollback without redeployment"`
TxChanBufferSize int `key:"subtreevalidation_txChanBufferSize" desc:"Buffer size for transaction channel" default:"0" category:"SubtreeValidation" usage:"Channel buffer for transaction processing" type:"int" longdesc:"### Purpose\nSets the buffer size for internal transaction processing channels.\n\n### How It Works\nControls the Go channel buffer size used for passing transactions between processing stages.\n\n### Values\n- **0** (default) - Unbuffered channels for synchronous processing\n- **N > 0** - Buffered channels for pipelined processing\n\n### Recommendations\n- **0** - Default for most deployments (simpler flow control)\n- Increase for pipelining when producer and consumer have variable processing times"`
BatchMissingTransactions bool `key:"subtreevalidation_batch_missing_transactions" desc:"Batch missing transaction fetches" default:"true" category:"SubtreeValidation" usage:"Enable to batch missing transaction requests" type:"bool" longdesc:"### Purpose\nEnables batching of missing transaction fetches instead of individual queries.\n\n### How It Works\n- When enabled, missing transactions are collected and fetched in batches\n- When disabled, each missing transaction is fetched individually\n\n### Trade-offs\n| Setting | Benefit | Drawback |\n|---------|---------|----------|\n| Enabled | Reduced database overhead | Latency for batch collection |\n| Disabled | Lower per-request latency | More database queries |\n\n### Recommendations\n- **true** (default) - Better performance for most deployments\n- Disable only for debugging or specific latency requirements"`
SpendBatcherSize int `key:"subtreevalidation_spendBatcherSize" desc:"Batch size for spend operations" default:"1024" category:"SubtreeValidation" usage:"Number of spends per batch" type:"int" longdesc:"### Purpose\nControls how many UTXO spend operations are batched together during subtree processing.\n\n### How It Works\nWhen validating transactions, UTXO spends are collected and sent to the UTXO store in batches. This also controls the concurrency limit for parallel transaction processing (SpendBatcherSize * 2).\n\n### Trade-offs\n| Setting | Benefit | Drawback |\n|---------|---------|----------|\n| Larger | Fewer database round-trips | Higher memory per batch |\n| Smaller | Lower memory usage | More database overhead |\n\n### Recommendations\n- **1024** (default) - Good balance for Aerospike performance\n- Adjust based on UTXO store characteristics"`
Expand Down
73 changes: 73 additions & 0 deletions stores/txmetacache/bucket_type_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
package txmetacache

import (
"testing"

"github.com/stretchr/testify/require"
)

func TestBucketType_String(t *testing.T) {
tests := []struct {
name string
bt BucketType
expected string
}{
{"Unallocated", Unallocated, "Unallocated"},
{"Preallocated", Preallocated, "Preallocated"},
{"Trimmed", Trimmed, "Trimmed"},
{"Clock", Clock, "Clock"},
{"Unknown", BucketType(999), "Unknown"},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := tt.bt.String()
require.Equal(t, tt.expected, result)
})
}
}

func TestParseBucketType(t *testing.T) {
tests := []struct {
name string
input string
expected BucketType
}{
// Exact case matches
{"Unallocated exact", "Unallocated", Unallocated},
{"Preallocated exact", "Preallocated", Preallocated},
{"Trimmed exact", "Trimmed", Trimmed},
{"Clock exact", "Clock", Clock},

// Lowercase variants
{"unallocated lowercase", "unallocated", Unallocated},
{"preallocated lowercase", "preallocated", Preallocated},
{"trimmed lowercase", "trimmed", Trimmed},
{"clock lowercase", "clock", Clock},

// Invalid/unknown defaults to Clock
{"empty string", "", Clock},
{"invalid string", "InvalidType", Clock},
{"random string", "xyz", Clock},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := ParseBucketType(tt.input)
require.Equal(t, tt.expected, result)
})
}
}

func TestParseBucketType_RoundTrip(t *testing.T) {
// Test that parsing the string representation returns the same bucket type
bucketTypes := []BucketType{Unallocated, Preallocated, Trimmed, Clock}

for _, bt := range bucketTypes {
t.Run(bt.String(), func(t *testing.T) {
str := bt.String()
parsed := ParseBucketType(str)
require.Equal(t, bt, parsed, "Round trip failed for %s", str)
})
}
}
Loading
Loading