diff --git a/.github/workflows/benchmark-compare.yaml b/.github/workflows/benchmark-compare.yaml new file mode 100644 index 000000000..11f904ad3 --- /dev/null +++ b/.github/workflows/benchmark-compare.yaml @@ -0,0 +1,217 @@ +name: Benchmark Comparison + +on: + pull_request: + paths: + - 'services/asset/**' + - '.github/workflows/benchmark-compare.yaml' + - 'cmd/parse-benchmarks/**' + - 'cmd/compare-benchmarks/**' + +permissions: + contents: read + pull-requests: write + issues: write + +env: + GO_VERSION: '1.25.2' + +jobs: + # Run benchmarks on the PR feature branch + benchmark-feature: + name: Benchmark Feature Branch + runs-on: ubuntu-latest + timeout-minutes: 35 + steps: + - name: Checkout feature branch + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + with: + fetch-depth: 0 + + - name: Set up Go + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: false + + - name: Display branch info + run: | + echo "Branch: $(git rev-parse --abbrev-ref HEAD)" + echo "Commit: $(git rev-parse HEAD)" + + - name: Run benchmarks + run: | + echo "=== Running benchmarks on feature branch ===" + go test -bench=. -benchmem -benchtime=5s -timeout=30m -run=^$ \ + ./services/asset/httpimpl \ + | tee feature-benchmark-output.txt + + - name: Parse results + continue-on-error: true + run: | + if [ -d ./cmd/parse-benchmarks ]; then + go run ./cmd/parse-benchmarks \ + -input feature-benchmark-output.txt \ + -output feature-benchmarks.json \ + -commit "${{ github.sha }}" \ + -branch "PR-${{ github.event.pull_request.number }}" \ + -pr "${{ github.event.pull_request.number }}" + else + echo "Parse script not yet available, will use mock data" + fi + + - name: Upload artifact + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + with: + name: feature-benchmarks + path: feature-benchmarks.json + retention-days: 30 + + # Run benchmarks on main branch as baseline + benchmark-main: + name: Benchmark Main Branch + runs-on: ubuntu-latest + timeout-minutes: 35 + steps: + - name: Checkout main branch + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + with: + ref: main + fetch-depth: 0 + + - name: Set up Go + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: false + + - name: Display branch info + run: | + echo "Branch: $(git rev-parse --abbrev-ref HEAD)" + echo "Commit: $(git rev-parse HEAD)" + + - name: Run benchmarks + run: | + echo "=== Running benchmarks on main branch ===" + go test -bench=. -benchmem -benchtime=5s -timeout=30m -run=^$ \ + ./services/asset/httpimpl \ + | tee main-benchmark-output.txt + + - name: Parse results + continue-on-error: true + run: | + if [ -d ./cmd/parse-benchmarks ]; then + go run ./cmd/parse-benchmarks \ + -input main-benchmark-output.txt \ + -output main-benchmarks.json \ + -commit "$(git rev-parse HEAD)" \ + -branch "main" + else + echo "Parse script not yet available, will use mock data" + fi + + - name: Upload artifact + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + with: + name: main-benchmarks + path: main-benchmarks.json + retention-days: 30 + + # Compare results and post report + compare: + name: Compare & Report + runs-on: ubuntu-latest + needs: [benchmark-feature, benchmark-main] + if: always() + steps: + - name: Checkout code + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + + - name: Set up Go + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: false + + - name: Download feature benchmarks + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 + with: + name: feature-benchmarks + continue-on-error: true + + - name: Download main benchmarks + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 + with: + name: main-benchmarks + continue-on-error: true + + - name: Use mock benchmarks if artifacts unavailable + run: | + if [ ! -f feature-benchmarks.json ]; then + echo "Creating mock feature benchmarks" + cp .github/workflows/mock-main-benchmarks.json feature-benchmarks.json + sed -i 's/"main"/"feature"/g; s/59718/60000/g; s/abc1234/def5678/g' feature-benchmarks.json + fi + if [ ! -f main-benchmarks.json ]; then + echo "Creating mock main benchmarks" + cp .github/workflows/mock-main-benchmarks.json main-benchmarks.json + fi + echo "Benchmarks ready for comparison" + ls -la *.json + + - name: Compare benchmarks + id: compare + continue-on-error: true + run: | + echo "=== Comparing Feature vs Main ===" + if [ -d ./cmd/compare-benchmarks ]; then + go run ./cmd/compare-benchmarks \ + -current feature-benchmarks.json \ + -baseline main-benchmarks.json \ + -output comparison-report.md \ + -threshold 5.0 + else + echo "Compare script not yet available, generating placeholder report" + { + echo "## Benchmark Comparison Report" + echo "" + echo "**Status:** Mock data (actual scripts not yet deployed)" + echo "" + echo "- Regressions: 0" + echo "- Improvements: 0" + echo "- Unchanged: 3" + echo "" + echo "*First run with real data coming soon*" + } > comparison-report.md + fi + + - name: Display report + if: always() + run: | + echo "=== Benchmark Comparison Report ===" + cat comparison-report.md + + - name: Post comment on PR + uses: actions/github-script@v7 + if: always() + continue-on-error: true + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const fs = require('fs'); + const report = fs.readFileSync('comparison-report.md', 'utf8'); + + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: report + }); + + - name: Fail on regressions + if: failure() || steps.compare.outcome == 'failure' + run: | + if grep -q "REGRESSION DETECTED" comparison-report.md; then + echo "❌ Performance regressions detected!" + exit 1 + fi diff --git a/.github/workflows/mock-main-benchmarks.json b/.github/workflows/mock-main-benchmarks.json new file mode 100644 index 000000000..590beb182 --- /dev/null +++ b/.github/workflows/mock-main-benchmarks.json @@ -0,0 +1,31 @@ +{ + "version": "1.0", + "timestamp": "2026-01-23T13:00:00Z", + "git": { + "commit": "abc1234567890abcdef1234567890abcdef123456", + "branch": "main" + }, + "benchmarks": [ + { + "name": "BenchmarkGetTransactionJSON", + "ns_per_op": 59718, + "bytes_per_op": 12190, + "allocs_per_op": 120, + "iterations": 104190 + }, + { + "name": "BenchmarkGetTransactionBinary", + "ns_per_op": 40061, + "bytes_per_op": 5216, + "allocs_per_op": 65, + "iterations": 144621 + }, + { + "name": "BenchmarkGetTransactionHex", + "ns_per_op": 40518, + "bytes_per_op": 5876, + "allocs_per_op": 68, + "iterations": 146862 + } + ] +} diff --git a/cmd/compare-benchmarks/main.go b/cmd/compare-benchmarks/main.go new file mode 100644 index 000000000..77058e675 --- /dev/null +++ b/cmd/compare-benchmarks/main.go @@ -0,0 +1,309 @@ +package main + +import ( + "encoding/json" + "flag" + "fmt" + "log" + "os" + "sort" + "strings" +) + +type benchmarkResult struct { + Name string `json:"name"` + NsPerOp int64 `json:"ns_per_op"` + BytesPerOp int64 `json:"bytes_per_op"` + AllocsPerOp int64 `json:"allocs_per_op"` + Iterations int64 `json:"iterations"` +} + +type benchmarkRun struct { + Benchmarks []benchmarkResult `json:"benchmarks"` + Git map[string]string `json:"git"` + Timestamp string `json:"timestamp"` + Version string `json:"version"` +} + +type comparison struct { + Name string + BaselineNsPerOp int64 + CurrentNsPerOp int64 + PercentChange float64 + BaselineAllocsPerOp int64 + CurrentAllocsPerOp int64 + AllocsChange float64 + Degraded bool + Improved bool +} + +func main() { + var ( + currentFile = flag.String("current", "", "Current benchmark JSON file (required)") + baselineFile = flag.String("baseline", "", "Baseline benchmark JSON file (required)") + outputFile = flag.String("output", "comparison-report.md", "Output markdown file") + threshold = flag.Float64("threshold", 5.0, "Degradation threshold percentage") + ) + + flag.Parse() + + // Validate required flags + if *currentFile == "" || *baselineFile == "" { + fmt.Println("Usage: compare-benchmarks -current -baseline [-output ] [-threshold ]") + os.Exit(1) + } + + // Load benchmark runs + baseline, err := loadBenchmarkRun(*baselineFile) + if err != nil { + log.Fatalf("Failed to load baseline: %v", err) + } + + current, err := loadBenchmarkRun(*currentFile) + if err != nil { + log.Fatalf("Failed to load current: %v", err) + } + + fmt.Printf("Baseline: %d benchmarks (branch: %s)\n", len(baseline.Benchmarks), baseline.Git["branch"]) + fmt.Printf("Current: %d benchmarks (branch: %s)\n", len(current.Benchmarks), current.Git["branch"]) + + // Compare + comparisons := compare(baseline, current, *threshold) + + // Generate report + report := generateReport(baseline, current, comparisons, *threshold) + + // Write report + if err := os.WriteFile(*outputFile, []byte(report), 0o600); err != nil { + log.Fatalf("Failed to write report: %v", err) + } + + fmt.Printf("Report written to: %s\n", *outputFile) + fmt.Println("\n=== Summary ===") + printSummary(comparisons) + + // Exit with error if regressions found + hasRegressions := false + for _, c := range comparisons { + if c.Degraded { + hasRegressions = true + break + } + } + + if hasRegressions { + os.Exit(1) + } +} + +// loadBenchmarkRun loads a benchmark run from JSON file +func loadBenchmarkRun(filename string) (*benchmarkRun, error) { + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + + var run benchmarkRun + if err := json.Unmarshal(data, &run); err != nil { + return nil, err + } + + return &run, nil +} + +// compare generates comparisons between baseline and current benchmarks +func compare(baseline, current *benchmarkRun, threshold float64) []comparison { + baselineMap := make(map[string]benchmarkResult) + for _, b := range baseline.Benchmarks { + baselineMap[b.Name] = b + } + + comparisons := make([]comparison, 0, len(current.Benchmarks)) + for _, curr := range current.Benchmarks { + base, exists := baselineMap[curr.Name] + if !exists { + // New benchmark + comparisons = append(comparisons, comparison{ + Name: curr.Name, + CurrentNsPerOp: curr.NsPerOp, + Degraded: false, + }) + continue + } + + // Calculate percent change + percentChange := 0.0 + if base.NsPerOp > 0 { + percentChange = float64(curr.NsPerOp-base.NsPerOp) / float64(base.NsPerOp) * 100 + } + + allocsChange := 0.0 + if base.AllocsPerOp > 0 { + allocsChange = float64(curr.AllocsPerOp-base.AllocsPerOp) / float64(base.AllocsPerOp) * 100 + } + + degraded := percentChange > threshold + improved := percentChange < -threshold + + comparisons = append(comparisons, comparison{ + Name: curr.Name, + BaselineNsPerOp: base.NsPerOp, + CurrentNsPerOp: curr.NsPerOp, + PercentChange: percentChange, + BaselineAllocsPerOp: base.AllocsPerOp, + CurrentAllocsPerOp: curr.AllocsPerOp, + AllocsChange: allocsChange, + Degraded: degraded, + Improved: improved, + }) + } + + // Sort by percent change (worst first) + sort.Slice(comparisons, func(i, j int) bool { + return comparisons[i].PercentChange > comparisons[j].PercentChange + }) + + return comparisons +} + +// generateReport creates a markdown report +func generateReport(baseline, current *benchmarkRun, comparisons []comparison, threshold float64) string { + var sb strings.Builder + + // Header + sb.WriteString("## 📊 Benchmark Comparison Report\n\n") + + // Branch info + baselineBranch := baseline.Git["branch"] + currentBranch := current.Git["branch"] + baselineCommit := baseline.Git["commit"] + if len(baselineCommit) > 8 { + baselineCommit = baselineCommit[:8] + } + currentCommit := current.Git["commit"] + if len(currentCommit) > 8 { + currentCommit = currentCommit[:8] + } + + sb.WriteString(fmt.Sprintf("**Baseline:** `%s` (%s)\n\n", baselineBranch, baselineCommit)) + sb.WriteString(fmt.Sprintf("**Current:** `%s` (%s)\n\n", currentBranch, currentCommit)) + + // Summary statistics + regressions := 0 + improvements := 0 + unchanged := 0 + + for _, c := range comparisons { + if c.Degraded { + regressions++ + } else if c.Improved { + improvements++ + } else { + unchanged++ + } + } + + sb.WriteString("### Summary\n\n") + sb.WriteString(fmt.Sprintf("- **Regressions (>%.1f%%):** %d ❌\n", threshold, regressions)) + sb.WriteString(fmt.Sprintf("- **Improvements (>%.1f%%):** %d ✅\n", threshold, improvements)) + sb.WriteString(fmt.Sprintf("- **Unchanged:** %d ✓\n\n", unchanged)) + + if regressions > 0 { + sb.WriteString("### ⚠️ REGRESSION DETECTED\n\n") + sb.WriteString(fmt.Sprintf("**%d benchmark(s) degraded by more than %.1f%%**\n\n", regressions, threshold)) + } + + // Detailed results table + sb.WriteString("### Detailed Results\n\n") + sb.WriteString("| Benchmark | Baseline | Current | Change | Allocs | Status |\n") + sb.WriteString("|-----------|----------|---------|--------|--------|--------|\n") + + for _, c := range comparisons { + status := "✓" + if c.Degraded { + status = "❌ REGRESSED" + } else if c.Improved { + status = "✅ IMPROVED" + } + + name := formatBenchmarkName(c.Name) + + if c.BaselineNsPerOp == 0 { + // New benchmark + sb.WriteString(fmt.Sprintf("| %s | NEW | %d ns/op | - | %d | %s |\n", + name, c.CurrentNsPerOp, c.CurrentAllocsPerOp, status)) + } else { + changeStr := fmt.Sprintf("%+.1f%%", c.PercentChange) + allocsStr := fmt.Sprintf("%+.1f%%", c.AllocsChange) + + sb.WriteString(fmt.Sprintf("| %s | %d ns/op | %d ns/op | %s | %s | %s |\n", + name, c.BaselineNsPerOp, c.CurrentNsPerOp, changeStr, allocsStr, status)) + } + } + + sb.WriteString("\n") + + // Detailed regressions section + if regressions > 0 { + sb.WriteString("### ❌ Regressions\n\n") + for _, c := range comparisons { + if !c.Degraded { + continue + } + name := formatBenchmarkName(c.Name) + sb.WriteString(fmt.Sprintf("- **%s**\n", name)) + sb.WriteString(fmt.Sprintf(" - Baseline: %d ns/op\n", c.BaselineNsPerOp)) + sb.WriteString(fmt.Sprintf(" - Current: %d ns/op\n", c.CurrentNsPerOp)) + sb.WriteString(fmt.Sprintf(" - Change: **%+.1f%%**\n\n", c.PercentChange)) + } + } + + // Detailed improvements section + if improvements > 0 { + sb.WriteString("### ✅ Improvements\n\n") + for _, c := range comparisons { + if !c.Improved { + continue + } + name := formatBenchmarkName(c.Name) + sb.WriteString(fmt.Sprintf("- **%s**\n", name)) + sb.WriteString(fmt.Sprintf(" - Baseline: %d ns/op\n", c.BaselineNsPerOp)) + sb.WriteString(fmt.Sprintf(" - Current: %d ns/op\n", c.CurrentNsPerOp)) + sb.WriteString(fmt.Sprintf(" - Change: **%+.1f%%** 🎉\n\n", c.PercentChange)) + } + } + + // Footer + sb.WriteString("\n---\n") + sb.WriteString(fmt.Sprintf("*Threshold: %.1f%% | Generated at %s*\n", threshold, baseline.Timestamp)) + + return sb.String() +} + +// formatBenchmarkName shortens benchmark names for display +func formatBenchmarkName(name string) string { + // Remove "Benchmark" prefix for cleaner display + name = strings.TrimPrefix(name, "Benchmark") + + if len(name) > 60 { + return name[:57] + "..." + } + return name +} + +// printSummary prints a summary to stdout +func printSummary(comparisons []comparison) { + regressions := 0 + improvements := 0 + + for _, c := range comparisons { + if c.Degraded { + regressions++ + fmt.Printf("❌ %s: %+.1f%%\n", c.Name, c.PercentChange) + } else if c.Improved { + improvements++ + } + } + + fmt.Printf("\nTotal: %d improvements, %d regressions\n", improvements, regressions) +} diff --git a/cmd/parse-benchmarks/main.go b/cmd/parse-benchmarks/main.go new file mode 100644 index 000000000..84ea1bd44 --- /dev/null +++ b/cmd/parse-benchmarks/main.go @@ -0,0 +1,138 @@ +package main + +import ( + "encoding/json" + "flag" + "fmt" + "log" + "os" + "regexp" + "strconv" + "strings" + "time" +) + +type benchmarkResult struct { + Name string `json:"name"` + NsPerOp int64 `json:"ns_per_op"` + BytesPerOp int64 `json:"bytes_per_op"` + AllocsPerOp int64 `json:"allocs_per_op"` + Iterations int64 `json:"iterations"` +} + +type benchmarkRun struct { + Benchmarks []benchmarkResult `json:"benchmarks"` + Git map[string]string `json:"git"` + Timestamp string `json:"timestamp"` + Version string `json:"version"` +} + +type gitInfo struct { + Commit string `json:"commit"` + Branch string `json:"branch"` + PR string `json:"pr,omitempty"` +} + +func main() { + var ( + inputFile = flag.String("input", "", "Input file with benchmark output (required)") + outputFile = flag.String("output", "", "Output JSON file (required)") + commit = flag.String("commit", "", "Git commit hash") + branch = flag.String("branch", "", "Git branch name") + pr = flag.String("pr", "", "PR number (optional)") + ) + + flag.Parse() + + // Validate required flags + if *inputFile == "" || *outputFile == "" { + fmt.Println("Usage: parse-benchmarks -input -output -commit -branch [-pr ]") + os.Exit(1) + } + + // Read input file + content, err := os.ReadFile(*inputFile) + if err != nil { + log.Fatalf("Failed to read input file: %v", err) + } + + // Parse benchmarks + benchmarks := parseBenchmarks(string(content)) + if len(benchmarks) == 0 { + log.Fatalf("No benchmarks found in output") + } + + fmt.Printf("Parsed %d benchmarks\n", len(benchmarks)) + + // Create benchmark run + run := &benchmarkRun{ + Version: "1.0", + Timestamp: time.Now().UTC().Format(time.RFC3339), + Git: map[string]string{ + "commit": *commit, + "branch": *branch, + "pr": *pr, + }, + Benchmarks: benchmarks, + } + + // Write output + data, err := json.MarshalIndent(run, "", " ") + if err != nil { + log.Fatalf("Failed to marshal JSON: %v", err) + } + + if err := os.WriteFile(*outputFile, data, 0o600); err != nil { + log.Fatalf("Failed to write output file: %v", err) + } + + fmt.Printf("Wrote %d benchmarks to %s\n", len(benchmarks), *outputFile) +} + +// parseBenchmarks extracts benchmarks from go test output +func parseBenchmarks(output string) []benchmarkResult { + results := make([]benchmarkResult, 0, len(strings.Split(output, "\n"))) + + // Pattern matches lines like: + // BenchmarkDemoFastOperation-12 \t 583326\t 3920 ns/op\t 0 B/op\t 0 allocs/op + // BenchmarkGetSubtree_1M_Binary-8 10 1234567 ns/op 512 B/op 2 allocs/op + pattern := regexp.MustCompile( + `^Benchmark(\S+?)\s+\d+\s+(.+)\s+ns/op\s+(.+)\s+B/op\s+(.+)\s+allocs/op$`, + ) + + lines := strings.Split(output, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if !strings.HasPrefix(line, "Benchmark") { + continue + } + + matches := pattern.FindStringSubmatch(line) + if matches == nil { + continue + } + + // matches[0] = full line + // matches[1] = name + // matches[2] = ns/op + // matches[3] = bytes/op + // matches[4] = allocs/op + + result := benchmarkResult{ + Name: "Benchmark" + matches[1], + NsPerOp: parseInt64(matches[2]), + BytesPerOp: parseInt64(matches[3]), + AllocsPerOp: parseInt64(matches[4]), + } + + results = append(results, result) + } + + return results +} + +// parseInt64 safely parses a string to int64 +func parseInt64(s string) int64 { + val, _ := strconv.ParseInt(strings.TrimSpace(s), 10, 64) + return val +} diff --git a/services/asset/httpimpl/handlers_benchmark_test.go b/services/asset/httpimpl/handlers_benchmark_test.go new file mode 100644 index 000000000..0ffce5779 --- /dev/null +++ b/services/asset/httpimpl/handlers_benchmark_test.go @@ -0,0 +1,114 @@ +package httpimpl + +import ( + "net/http" + "testing" + + "github.com/libsv/go-p2p/test" + "github.com/stretchr/testify/mock" +) + +// BenchmarkGetTransactionJSON benchmarks the GetTransaction handler in JSON mode +func BenchmarkGetTransactionJSON(b *testing.B) { + initPrometheusMetrics() + + // Create a dummy testing.T for setup + t := &testing.T{} + httpServer, mockRepo, echoContext, responseRecorder := GetMockHTTP(t, nil) + + // Set up mock to return a transaction + mockRepo.On("GetTransaction", mock.Anything, mock.Anything).Return(test.TX1RawBytes, nil) + + // Set echo context + echoContext.SetPath("/tx/:hash") + echoContext.SetParamNames("hash") + echoContext.SetParamValues("9d45ad79ad3c6baecae872c0e35022d60c3bbbd024ccce06690321ece15ea995") + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + // Reset response recorder for each iteration + responseRecorder.Body.Reset() + responseRecorder.Header().Del("Content-Type") + + err := httpServer.GetTransaction(JSON)(echoContext) + if err != nil { + b.Fatalf("GetTransaction handler failed: %v", err) + } + + if responseRecorder.Code != http.StatusOK { + b.Fatalf("Expected status 200, got %d", responseRecorder.Code) + } + } +} + +// BenchmarkGetTransactionBinary benchmarks the GetTransaction handler in BINARY_STREAM mode +func BenchmarkGetTransactionBinary(b *testing.B) { + initPrometheusMetrics() + + // Create a dummy testing.T for setup + t := &testing.T{} + httpServer, mockRepo, echoContext, responseRecorder := GetMockHTTP(t, nil) + + // Set up mock to return a transaction + mockRepo.On("GetTransaction", mock.Anything, mock.Anything).Return(test.TX1RawBytes, nil) + + // Set echo context + echoContext.SetPath("/tx/:hash") + echoContext.SetParamNames("hash") + echoContext.SetParamValues("9d45ad79ad3c6baecae872c0e35022d60c3bbbd024ccce06690321ece15ea995") + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + // Reset response recorder for each iteration + responseRecorder.Body.Reset() + responseRecorder.Header().Del("Content-Type") + + err := httpServer.GetTransaction(BINARY_STREAM)(echoContext) + if err != nil { + b.Fatalf("GetTransaction handler failed: %v", err) + } + + if responseRecorder.Code != http.StatusOK { + b.Fatalf("Expected status 200, got %d", responseRecorder.Code) + } + } +} + +// BenchmarkGetTransactionHex benchmarks the GetTransaction handler in HEX mode +func BenchmarkGetTransactionHex(b *testing.B) { + initPrometheusMetrics() + + // Create a dummy testing.T for setup + t := &testing.T{} + httpServer, mockRepo, echoContext, responseRecorder := GetMockHTTP(t, nil) + + // Set up mock to return a transaction + mockRepo.On("GetTransaction", mock.Anything, mock.Anything).Return(test.TX1RawBytes, nil) + + // Set echo context + echoContext.SetPath("/tx/:hash") + echoContext.SetParamNames("hash") + echoContext.SetParamValues("9d45ad79ad3c6baecae872c0e35022d60c3bbbd024ccce06690321ece15ea995") + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + // Reset response recorder for each iteration + responseRecorder.Body.Reset() + responseRecorder.Header().Del("Content-Type") + + err := httpServer.GetTransaction(HEX)(echoContext) + if err != nil { + b.Fatalf("GetTransaction handler failed: %v", err) + } + + if responseRecorder.Code != http.StatusOK { + b.Fatalf("Expected status 200, got %d", responseRecorder.Code) + } + } +}