diff --git a/.gitignore b/.gitignore index fd50b6d07..d0686e9c3 100644 --- a/.gitignore +++ b/.gitignore @@ -39,5 +39,6 @@ REPORT.md .zed .superset -# Claude Code worktrees +# Claude Code .claude/worktrees/ +.claude/settings.local.json diff --git a/cmd/roborev/analyze.go b/cmd/roborev/analyze.go index 8eba8c731..366a8f9f2 100644 --- a/cmd/roborev/analyze.go +++ b/cmd/roborev/analyze.go @@ -295,17 +295,19 @@ func runAnalysis(cmd *cobra.Command, typeName string, filePatterns []string, opt cfg, _ := config.LoadGlobal() maxPromptSize := config.ResolveMaxPromptSize(repoRoot, cfg) + ep := getDaemonEndpoint() + // Per-file mode: create one job per file if opts.perFile { - return runPerFileAnalysis(cmd, serverAddr, repoRoot, analysisType, files, opts, maxPromptSize) + return runPerFileAnalysis(cmd, ep, repoRoot, analysisType, files, opts, maxPromptSize) } // Standard mode: all files in one job - return runSingleAnalysis(cmd, serverAddr, repoRoot, analysisType, files, opts, maxPromptSize) + return runSingleAnalysis(cmd, ep, repoRoot, analysisType, files, opts, maxPromptSize) } // runSingleAnalysis creates a single analysis job for all files -func runSingleAnalysis(cmd *cobra.Command, serverAddr string, repoRoot string, analysisType *analyze.AnalysisType, files map[string]string, opts analyzeOptions, maxPromptSize int) error { +func runSingleAnalysis(cmd *cobra.Command, ep daemon.DaemonEndpoint, repoRoot string, analysisType *analyze.AnalysisType, files map[string]string, opts analyzeOptions, maxPromptSize int) error { if !opts.quiet && !opts.jsonOutput { cmd.Printf("Analyzing %d file(s) with %q analysis...\n", len(files), analysisType.Name) } @@ -339,7 +341,7 @@ func runSingleAnalysis(cmd *cobra.Command, serverAddr string, repoRoot string, a } // Enqueue the job - job, err := enqueueAnalysisJob(serverAddr, repoRoot, fullPrompt, outputPrefix, analysisType.Name, opts) + job, err := enqueueAnalysisJob(ep, repoRoot, fullPrompt, outputPrefix, analysisType.Name, opts) if err != nil { return err } @@ -362,19 +364,19 @@ func runSingleAnalysis(cmd *cobra.Command, serverAddr string, repoRoot string, a // If --fix, we need to wait for analysis, run fixer, then mark closed if opts.fix { - return runAnalyzeAndFix(cmd, serverAddr, repoRoot, job.ID, analysisType, opts) + return runAnalyzeAndFix(cmd, ep, repoRoot, job.ID, analysisType, opts) } // If --wait, poll until job completes and show result if opts.wait { - return waitForPromptJob(cmd, serverAddr, job.ID, opts.quiet, promptPollInterval) + return waitForPromptJob(cmd, ep, job.ID, opts.quiet, promptPollInterval) } return nil } // runPerFileAnalysis creates one analysis job per file -func runPerFileAnalysis(cmd *cobra.Command, serverAddr string, repoRoot string, analysisType *analyze.AnalysisType, files map[string]string, opts analyzeOptions, maxPromptSize int) error { +func runPerFileAnalysis(cmd *cobra.Command, ep daemon.DaemonEndpoint, repoRoot string, analysisType *analyze.AnalysisType, files map[string]string, opts analyzeOptions, maxPromptSize int) error { // Sort files for deterministic order fileNames := make([]string, 0, len(files)) for name := range files { @@ -410,7 +412,7 @@ func runPerFileAnalysis(cmd *cobra.Command, serverAddr string, repoRoot string, } } - job, err := enqueueAnalysisJob(serverAddr, repoRoot, fullPrompt, outputPrefix, analysisType.Name, opts) + job, err := enqueueAnalysisJob(ep, repoRoot, fullPrompt, outputPrefix, analysisType.Name, opts) if err != nil { return fmt.Errorf("enqueue job for %s: %w", fileName, err) } @@ -451,7 +453,7 @@ func runPerFileAnalysis(cmd *cobra.Command, serverAddr string, repoRoot string, if !opts.quiet { cmd.Printf("\n=== Fixing job %d (%d/%d) ===\n", info.ID, i+1, len(jobInfos)) } - if err := runAnalyzeAndFix(cmd, serverAddr, repoRoot, info.ID, analysisType, opts); err != nil { + if err := runAnalyzeAndFix(cmd, ep, repoRoot, info.ID, analysisType, opts); err != nil { if !opts.quiet { cmd.Printf("Warning: fix for job %d failed: %v\n", info.ID, err) } @@ -470,7 +472,7 @@ func runPerFileAnalysis(cmd *cobra.Command, serverAddr string, repoRoot string, if !opts.quiet { cmd.Printf("\n=== Job %d (%d/%d) ===\n", info.ID, i+1, len(jobInfos)) } - if err := waitForPromptJob(cmd, serverAddr, info.ID, opts.quiet, promptPollInterval); err != nil { + if err := waitForPromptJob(cmd, ep, info.ID, opts.quiet, promptPollInterval); err != nil { if !opts.quiet { cmd.Printf("Warning: job %d failed: %v\n", info.ID, err) } @@ -496,7 +498,7 @@ func buildOutputPrefix(analysisType string, filePaths []string) string { } // enqueueAnalysisJob sends a job to the daemon -func enqueueAnalysisJob(serverAddr string, repoRoot, prompt, outputPrefix, label string, opts analyzeOptions) (*storage.ReviewJob, error) { +func enqueueAnalysisJob(ep daemon.DaemonEndpoint, repoRoot, prompt, outputPrefix, label string, opts analyzeOptions) (*storage.ReviewJob, error) { branch := git.GetCurrentBranch(repoRoot) if opts.branch != "" && opts.branch != "HEAD" { branch = opts.branch @@ -514,7 +516,7 @@ func enqueueAnalysisJob(serverAddr string, repoRoot, prompt, outputPrefix, label Agentic: true, // Agentic mode needed for reading files when prompt exceeds size limit }) - resp, err := http.Post(serverAddr+"/api/enqueue", "application/json", bytes.NewReader(reqBody)) + resp, err := ep.HTTPClient(10*time.Second).Post(ep.BaseURL()+"/api/enqueue", "application/json", bytes.NewReader(reqBody)) if err != nil { return nil, fmt.Errorf("failed to connect to daemon: %w", err) } @@ -538,7 +540,7 @@ func enqueueAnalysisJob(serverAddr string, repoRoot, prompt, outputPrefix, label } // runAnalyzeAndFix waits for analysis to complete, runs a fixer agent, then marks closed -func runAnalyzeAndFix(cmd *cobra.Command, serverAddr, repoRoot string, jobID int64, analysisType *analyze.AnalysisType, opts analyzeOptions) error { +func runAnalyzeAndFix(cmd *cobra.Command, ep daemon.DaemonEndpoint, repoRoot string, jobID int64, analysisType *analyze.AnalysisType, opts analyzeOptions) error { ctx := cmd.Context() if ctx == nil { ctx = context.Background() @@ -552,7 +554,7 @@ func runAnalyzeAndFix(cmd *cobra.Command, serverAddr, repoRoot string, jobID int ctx, cancel := context.WithTimeout(ctx, analyzeJobTimeout) defer cancel() - review, err := waitForAnalysisJob(ctx, serverAddr, jobID) + review, err := waitForAnalysisJob(ctx, ep, jobID) if err != nil { return fmt.Errorf("analysis failed: %w", err) } @@ -643,14 +645,14 @@ func runAnalyzeAndFix(cmd *cobra.Command, serverAddr, repoRoot string, jobID int // Ensure the fix commit gets a review enqueued if commitCreated { if head, err := git.ResolveSHA(repoRoot, "HEAD"); err == nil { - if err := enqueueIfNeeded(ctx, serverAddr, repoRoot, head); err != nil && !opts.quiet { + if err := enqueueIfNeeded(ctx, ep.BaseURL(), repoRoot, head); err != nil && !opts.quiet { cmd.Printf("Warning: could not enqueue review for fix commit: %v\n", err) } } } // Close the analysis job - if err := markJobClosed(ctx, serverAddr, jobID); err != nil { + if err := markJobClosed(ctx, ep.BaseURL(), jobID); err != nil { // Non-fatal - the fixes were applied, just couldn't update status if !opts.quiet { cmd.Printf("\nWarning: could not close job: %v\n", err) @@ -664,8 +666,9 @@ func runAnalyzeAndFix(cmd *cobra.Command, serverAddr, repoRoot string, jobID int // waitForAnalysisJob polls until the job completes and returns the review. // The context controls the maximum wait time. -func waitForAnalysisJob(ctx context.Context, serverAddr string, jobID int64) (*storage.Review, error) { - client := &http.Client{Timeout: 30 * time.Second} +func waitForAnalysisJob(ctx context.Context, ep daemon.DaemonEndpoint, jobID int64) (*storage.Review, error) { + client := ep.HTTPClient(30 * time.Second) + baseURL := ep.BaseURL() pollInterval := 1 * time.Second maxInterval := 5 * time.Second @@ -677,7 +680,7 @@ func waitForAnalysisJob(ctx context.Context, serverAddr string, jobID int64) (*s default: } - req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/api/jobs?id=%d", serverAddr, jobID), nil) + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/api/jobs?id=%d", baseURL, jobID), nil) if err != nil { return nil, fmt.Errorf("create request: %w", err) } @@ -710,7 +713,7 @@ func waitForAnalysisJob(ctx context.Context, serverAddr string, jobID int64) (*s switch job.Status { case storage.JobStatusDone: // Fetch the review - reviewReq, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/api/review?job_id=%d", serverAddr, jobID), nil) + reviewReq, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/api/review?job_id=%d", baseURL, jobID), nil) if err != nil { return nil, fmt.Errorf("create review request: %w", err) } diff --git a/cmd/roborev/analyze_integration_test.go b/cmd/roborev/analyze_integration_test.go index e3f3b54a2..5b73d4ea7 100644 --- a/cmd/roborev/analyze_integration_test.go +++ b/cmd/roborev/analyze_integration_test.go @@ -95,7 +95,7 @@ func TestWaitForAnalysisJob(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - review, err := waitForAnalysisJob(ctx, ts.URL, testJobID) + review, err := waitForAnalysisJob(ctx, mustParseEndpoint(t, ts.URL), testJobID) if tt.wantErr { require.Error(t, err) @@ -132,7 +132,7 @@ func TestRunAnalyzeAndFix_Integration(t *testing.T) { reasoning: "fast", } - err := runAnalyzeAndFix(cmd, ts.URL, repo.Dir, 99, analysisType, opts) + err := runAnalyzeAndFix(cmd, mustParseEndpoint(t, ts.URL), repo.Dir, 99, analysisType, opts) require.NoError(t, err, "runAnalyzeAndFix failed: %v") // Verify the workflow was executed diff --git a/cmd/roborev/analyze_test.go b/cmd/roborev/analyze_test.go index 9e4e5367b..c82ddb714 100644 --- a/cmd/roborev/analyze_test.go +++ b/cmd/roborev/analyze_test.go @@ -247,7 +247,7 @@ func TestWaitForAnalysisJob_Timeout(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() - _, err := waitForAnalysisJob(ctx, ts.URL, 42) + _, err := waitForAnalysisJob(ctx, mustParseEndpoint(t, ts.URL), 42) require.Error(t, err, "expected timeout error") require.ErrorContains(t, err, "context deadline exceeded", "expected context deadline error") } @@ -411,7 +411,7 @@ func TestPerFileAnalysis(t *testing.T) { cmd, output := newTestCmd(t) analysisType := analyze.GetType("refactor") - err = runPerFileAnalysis(cmd, ts.URL, tmpDir, analysisType, files, analyzeOptions{quiet: false}, config.DefaultMaxPromptSize) + err = runPerFileAnalysis(cmd, mustParseEndpoint(t, ts.URL), tmpDir, analysisType, files, analyzeOptions{quiet: false}, config.DefaultMaxPromptSize) require.NoError(t, err, "runPerFileAnalysis") // Should have created 3 jobs (one per file) @@ -440,7 +440,7 @@ func TestEnqueueAnalysisJob(t *testing.T) { }, }) - job, err := enqueueAnalysisJob(ts.URL, "/repo", "test prompt", "", "test-fixtures", analyzeOptions{agentName: "test"}) + job, err := enqueueAnalysisJob(mustParseEndpoint(t, ts.URL), "/repo", "test prompt", "", "test-fixtures", analyzeOptions{agentName: "test"}) require.NoError(t, err, "enqueueAnalysisJob") assert.Equal(t, int64(42), job.ID, "job.ID") @@ -470,7 +470,7 @@ func TestEnqueueAnalysisJobBranchName(t *testing.T) { t.Run("no branch flag uses current branch", func(t *testing.T) { ts, gotBranch := captureBranch(t) - _, err := enqueueAnalysisJob(ts.URL, repo.Dir, "prompt", "", "refactor", analyzeOptions{}) + _, err := enqueueAnalysisJob(mustParseEndpoint(t, ts.URL), repo.Dir, "prompt", "", "refactor", analyzeOptions{}) require.NoError(t, err, "enqueueAnalysisJob") assert.Equal(t, "test-current", *gotBranch, "expected branch 'test-current'") }) @@ -478,7 +478,7 @@ func TestEnqueueAnalysisJobBranchName(t *testing.T) { t.Run("branch=HEAD uses current branch", func(t *testing.T) { ts, gotBranch := captureBranch(t) - _, err := enqueueAnalysisJob(ts.URL, repo.Dir, "prompt", "", "refactor", analyzeOptions{branch: "HEAD"}) + _, err := enqueueAnalysisJob(mustParseEndpoint(t, ts.URL), repo.Dir, "prompt", "", "refactor", analyzeOptions{branch: "HEAD"}) require.NoError(t, err, "enqueueAnalysisJob") assert.Equal(t, "test-current", *gotBranch, "expected branch 'test-current'") }) @@ -486,7 +486,7 @@ func TestEnqueueAnalysisJobBranchName(t *testing.T) { t.Run("named branch overrides current branch", func(t *testing.T) { ts, gotBranch := captureBranch(t) - _, err := enqueueAnalysisJob(ts.URL, repo.Dir, "prompt", "", "refactor", analyzeOptions{branch: "feature-xyz"}) + _, err := enqueueAnalysisJob(mustParseEndpoint(t, ts.URL), repo.Dir, "prompt", "", "refactor", analyzeOptions{branch: "feature-xyz"}) require.NoError(t, err, "enqueueAnalysisJob") assert.Equal(t, "feature-xyz", *gotBranch, "expected branch 'feature-xyz'") }) @@ -611,7 +611,7 @@ func TestAnalyzeJSONOutput(t *testing.T) { cmd, output := newTestCmd(t) - err := runSingleAnalysis(cmd, ts.URL, tmpDir, analysisType, files, analyzeOptions{jsonOutput: true}, config.DefaultMaxPromptSize) + err := runSingleAnalysis(cmd, mustParseEndpoint(t, ts.URL), tmpDir, analysisType, files, analyzeOptions{jsonOutput: true}, config.DefaultMaxPromptSize) require.NoError(t, err, "runSingleAnalysis: %v") var result AnalyzeResult @@ -638,7 +638,7 @@ func TestAnalyzeJSONOutput(t *testing.T) { cmd, output := newTestCmd(t) - err := runPerFileAnalysis(cmd, ts.URL, tmpDir, analysisType, files, analyzeOptions{jsonOutput: true}, config.DefaultMaxPromptSize) + err := runPerFileAnalysis(cmd, mustParseEndpoint(t, ts.URL), tmpDir, analysisType, files, analyzeOptions{jsonOutput: true}, config.DefaultMaxPromptSize) require.NoError(t, err, "runPerFileAnalysis") var result AnalyzeResult diff --git a/cmd/roborev/comment.go b/cmd/roborev/comment.go index 26f10f998..a04638a43 100644 --- a/cmd/roborev/comment.go +++ b/cmd/roborev/comment.go @@ -10,6 +10,7 @@ import ( "os/exec" "strconv" "strings" + "time" "github.com/roborev-dev/roborev/internal/git" "github.com/spf13/cobra" @@ -134,8 +135,9 @@ Examples: reqBody, _ := json.Marshal(reqData) - addr := getDaemonAddr() - resp, err := http.Post(addr+"/api/comment", "application/json", bytes.NewReader(reqBody)) + ep := getDaemonEndpoint() + addr := ep.BaseURL() + resp, err := ep.HTTPClient(5*time.Second).Post(addr+"/api/comment", "application/json", bytes.NewReader(reqBody)) if err != nil { return fmt.Errorf("failed to connect to daemon: %w", err) } @@ -191,8 +193,9 @@ func closeCmd() *cobra.Command { "closed": closed, }) - addr := getDaemonAddr() - resp, err := http.Post(addr+"/api/review/close", "application/json", bytes.NewReader(reqBody)) + ep := getDaemonEndpoint() + addr := ep.BaseURL() + resp, err := ep.HTTPClient(5*time.Second).Post(addr+"/api/review/close", "application/json", bytes.NewReader(reqBody)) if err != nil { return fmt.Errorf("failed to connect to daemon: %w", err) } diff --git a/cmd/roborev/compact.go b/cmd/roborev/compact.go index 3f8f9cf71..7aa55d54b 100644 --- a/cmd/roborev/compact.go +++ b/cmd/roborev/compact.go @@ -183,13 +183,14 @@ func fetchJobBatch(ctx context.Context, ids []int64) (map[int64]storage.JobWithR return nil, fmt.Errorf("marshal batch request: %w", err) } - req, err := http.NewRequestWithContext(ctx, "POST", serverAddr+"/api/jobs/batch", bytes.NewReader(reqBody)) + ep := getDaemonEndpoint() + req, err := http.NewRequestWithContext(ctx, "POST", ep.BaseURL()+"/api/jobs/batch", bytes.NewReader(reqBody)) if err != nil { return nil, fmt.Errorf("create batch request: %w", err) } req.Header.Set("Content-Type", "application/json") - client := &http.Client{Timeout: 30 * time.Second} + client := ep.HTTPClient(30 * time.Second) resp, err := client.Do(req) if err != nil { return nil, fmt.Errorf("batch fetch: %w", err) @@ -285,7 +286,7 @@ func waitForConsolidation(ctx context.Context, cmd *cobra.Command, jobID int64, output = cmd.OutOrStdout() } - _, err := waitForJobCompletion(ctx, serverAddr, jobID, output) + _, err := waitForJobCompletion(ctx, getDaemonEndpoint().BaseURL(), jobID, output) if err != nil { return fmt.Errorf("verification failed: %w", err) } @@ -400,7 +401,7 @@ func runCompact(cmd *cobra.Command, opts compactOptions) error { // CRITICAL: This must succeed or source jobs will never be closed if err := writeCompactMetadata(consolidatedJobID, successfulJobIDs); err != nil { // Try to cancel the job we just created - if cancelErr := cancelJob(serverAddr, consolidatedJobID); cancelErr != nil { + if cancelErr := cancelJob(getDaemonEndpoint().BaseURL(), consolidatedJobID); cancelErr != nil { // Best effort - log but don't mask the original error log.Printf("Failed to cancel job %d after metadata write failure: %v", consolidatedJobID, cancelErr) } @@ -550,7 +551,8 @@ func enqueueCompactJob(repoRoot, prompt, outputPrefix, label, branch string, opt return nil, fmt.Errorf("marshal enqueue request: %w", err) } - resp, err := http.Post(serverAddr+"/api/enqueue", "application/json", bytes.NewReader(reqBody)) + ep := getDaemonEndpoint() + resp, err := ep.HTTPClient(10*time.Second).Post(ep.BaseURL()+"/api/enqueue", "application/json", bytes.NewReader(reqBody)) if err != nil { return nil, fmt.Errorf("connect to daemon: %w", err) } @@ -607,7 +609,7 @@ func cancelJob(serverAddr string, jobID int64) error { if err != nil { return fmt.Errorf("marshal cancel request: %w", err) } - resp, err := http.Post(serverAddr+"/api/job/cancel", "application/json", bytes.NewReader(reqBody)) + resp, err := getDaemonHTTPClient(10*time.Second).Post(serverAddr+"/api/job/cancel", "application/json", bytes.NewReader(reqBody)) if err != nil { return fmt.Errorf("connect to daemon: %w", err) } diff --git a/cmd/roborev/daemon_client.go b/cmd/roborev/daemon_client.go index 78efb8a7f..6f5d35004 100644 --- a/cmd/roborev/daemon_client.go +++ b/cmd/roborev/daemon_client.go @@ -18,8 +18,9 @@ import ( // waitForJob polls until a job completes and displays the review // Uses the provided serverAddr to ensure we poll the same daemon that received the job. -func waitForJob(cmd *cobra.Command, serverAddr string, jobID int64, quiet bool) error { - client := &http.Client{Timeout: 5 * time.Second} +func waitForJob(cmd *cobra.Command, ep daemon.DaemonEndpoint, jobID int64, quiet bool) error { + serverAddr := ep.BaseURL() + client := ep.HTTPClient(5 * time.Second) if !quiet { cmd.Printf("Waiting for review to complete...") @@ -65,7 +66,7 @@ func waitForJob(cmd *cobra.Command, serverAddr string, jobID int64, quiet bool) cmd.Printf(" done!\n\n") } // Fetch and display the review - return showReview(cmd, serverAddr, jobID, quiet) + return showReview(cmd, ep, jobID, quiet) case storage.JobStatusFailed: if !quiet { @@ -109,9 +110,9 @@ func waitForJob(cmd *cobra.Command, serverAddr string, jobID int64, quiet bool) // showReview fetches and displays a review by job ID // When quiet is true, suppresses output but still returns exit code based on verdict. -func showReview(cmd *cobra.Command, addr string, jobID int64, quiet bool) error { - client := &http.Client{Timeout: 5 * time.Second} - resp, err := client.Get(fmt.Sprintf("%s/api/review?job_id=%d", addr, jobID)) +func showReview(cmd *cobra.Command, ep daemon.DaemonEndpoint, jobID int64, quiet bool) error { + client := ep.HTTPClient(5 * time.Second) + resp, err := client.Get(fmt.Sprintf("%s/api/review?job_id=%d", ep.BaseURL(), jobID)) if err != nil { return fmt.Errorf("failed to fetch review: %w", err) } @@ -148,8 +149,9 @@ func showReview(cmd *cobra.Command, addr string, jobID int64, quiet bool) error // findJobForCommit finds a job for the given commit SHA in the specified repo func findJobForCommit(repoPath, sha string) (*storage.ReviewJob, error) { - addr := getDaemonAddr() - client := &http.Client{Timeout: 5 * time.Second} + ep := getDaemonEndpoint() + addr := ep.BaseURL() + client := ep.HTTPClient(5 * time.Second) // Normalize repo path to handle symlinks/relative paths consistently normalizedRepo := repoPath @@ -232,8 +234,9 @@ func waitForReview(jobID int64) (*storage.Review, error) { } func waitForReviewWithInterval(jobID int64, pollInterval time.Duration) (*storage.Review, error) { - addr := getDaemonAddr() - client := &http.Client{Timeout: 10 * time.Second} + ep := getDaemonEndpoint() + addr := ep.BaseURL() + client := ep.HTTPClient(10 * time.Second) for { resp, err := client.Get(fmt.Sprintf("%s/api/jobs?id=%d", addr, jobID)) @@ -288,7 +291,8 @@ func waitForReviewWithInterval(jobID int64, pollInterval time.Duration) (*storag // enqueueReview enqueues a review job and returns the job ID func enqueueReview(repoPath, gitRef, agentName string) (int64, error) { - addr := getDaemonAddr() + ep := getDaemonEndpoint() + addr := ep.BaseURL() reqBody, _ := json.Marshal(daemon.EnqueueRequest{ RepoPath: repoPath, @@ -296,7 +300,7 @@ func enqueueReview(repoPath, gitRef, agentName string) (int64, error) { Agent: agentName, }) - resp, err := http.Post(addr+"/api/enqueue", "application/json", bytes.NewReader(reqBody)) + resp, err := ep.HTTPClient(10*time.Second).Post(addr+"/api/enqueue", "application/json", bytes.NewReader(reqBody)) if err != nil { return 0, err } @@ -317,8 +321,9 @@ func enqueueReview(repoPath, gitRef, agentName string) (int64, error) { // getCommentsForJob fetches comments for a job func getCommentsForJob(jobID int64) ([]storage.Response, error) { - addr := getDaemonAddr() - client := &http.Client{Timeout: 5 * time.Second} + ep := getDaemonEndpoint() + addr := ep.BaseURL() + client := ep.HTTPClient(5 * time.Second) resp, err := client.Get(fmt.Sprintf("%s/api/comments?job_id=%d", addr, jobID)) if err != nil { diff --git a/cmd/roborev/daemon_integration_test.go b/cmd/roborev/daemon_integration_test.go index e96cd2320..f3dd6f862 100644 --- a/cmd/roborev/daemon_integration_test.go +++ b/cmd/roborev/daemon_integration_test.go @@ -154,7 +154,7 @@ func waitForDaemonReady(t *testing.T, timeout time.Duration, pid int) bool { runtimes, err := daemon.ListAllRuntimes() if err == nil { for _, rt := range runtimes { - if rt.PID == pid && daemon.IsDaemonAlive(rt.Addr) { + if rt.PID == pid && daemon.IsDaemonAlive(rt.Endpoint()) { return true } } diff --git a/cmd/roborev/daemon_lifecycle.go b/cmd/roborev/daemon_lifecycle.go index fab5333c8..adc948ad2 100644 --- a/cmd/roborev/daemon_lifecycle.go +++ b/cmd/roborev/daemon_lifecycle.go @@ -64,12 +64,49 @@ var ErrDaemonNotRunning = fmt.Errorf("daemon not running (no runtime file found) // ErrJobNotFound indicates a job ID was not found during polling var ErrJobNotFound = fmt.Errorf("job not found") -// getDaemonAddr returns the daemon address from runtime file or default -func getDaemonAddr() string { - if info, err := daemon.GetAnyRunningDaemon(); err == nil { - return fmt.Sprintf("http://%s", info.Addr) +// parsedServerEndpoint caches the validated endpoint from the --server flag. +// Set once by validateServerFlag, read by getDaemonEndpoint. +var parsedServerEndpoint *daemon.DaemonEndpoint + +// validateServerFlag parses and validates the --server flag value. +// Called from PersistentPreRunE so invalid values fail fast. +func validateServerFlag() error { + ep, err := daemon.ParseEndpoint(serverAddr) + if err != nil { + return fmt.Errorf("invalid --server address %q: %w", serverAddr, err) + } + parsedServerEndpoint = &ep + return nil +} + +// getDaemonEndpoint returns the daemon endpoint from runtime file or config. +// An explicit --server flag takes precedence over auto-discovered daemons. +func getDaemonEndpoint() daemon.DaemonEndpoint { + // Explicit --server flag takes precedence over auto-discovery + if serverAddr != "" { + if parsedServerEndpoint != nil { + return *parsedServerEndpoint + } + ep, err := daemon.ParseEndpoint(serverAddr) + if err != nil { + return daemon.DaemonEndpoint{Network: "tcp", Address: "127.0.0.1:7373"} + } + return ep + } + // No explicit flag: discover running daemon + if info, err := getAnyRunningDaemon(); err == nil { + return info.Endpoint() + } + // Nothing running: use default + if parsedServerEndpoint != nil { + return *parsedServerEndpoint } - return serverAddr + return daemon.DaemonEndpoint{Network: "tcp", Address: "127.0.0.1:7373"} +} + +// getDaemonHTTPClient returns an HTTP client configured for the daemon endpoint. +func getDaemonHTTPClient(timeout time.Duration) *http.Client { + return getDaemonEndpoint().HTTPClient(timeout) } // registerRepoError is a server-side error from the register endpoint @@ -110,8 +147,8 @@ func registerRepo(repoPath string) error { if err != nil { return err } - client := &http.Client{Timeout: 5 * time.Second} - resp, err := client.Post(getDaemonAddr()+"/api/repos/register", "application/json", bytes.NewReader(body)) + ep := getDaemonEndpoint() + resp, err := ep.HTTPClient(5*time.Second).Post(ep.BaseURL()+"/api/repos/register", "application/json", bytes.NewReader(body)) if err != nil { return err // connection error (*url.Error wrapping net.Error) } @@ -133,7 +170,7 @@ func ensureDaemon() error { // First check runtime files for any running daemon if info, err := getAnyRunningDaemon(); err == nil { if !skipVersionCheck { - probe, err := daemon.ProbeDaemon(info.Addr, 2*time.Second) + probe, err := daemon.ProbeDaemon(info.Endpoint(), 2*time.Second) if err != nil { if verbose { fmt.Printf("Daemon probe failed, restarting...\n") @@ -155,13 +192,13 @@ func ensureDaemon() error { } } - serverAddr = fmt.Sprintf("http://%s", info.Addr) return nil } // Try the configured default address for manual/legacy daemon runs that do // not have a runtime file yet. - if probe, err := probeDaemonServerURL(serverAddr, 2*time.Second); err == nil { + ep := getDaemonEndpoint() + if probe, err := daemon.ProbeDaemon(ep, 2*time.Second); err == nil { if !skipVersionCheck { if probe.Version == "" { if verbose { @@ -212,8 +249,7 @@ func startDaemon() error { // Wait for daemon to publish a responsive runtime entry. for range 30 { time.Sleep(100 * time.Millisecond) - if info, err := daemon.GetAnyRunningDaemon(); err == nil { - serverAddr = fmt.Sprintf("http://%s", info.Addr) + if _, err := daemon.GetAnyRunningDaemon(); err == nil { return nil } } @@ -221,17 +257,6 @@ func startDaemon() error { return fmt.Errorf("daemon failed to start") } -func probeDaemonServerURL(serverURL string, timeout time.Duration) (*daemon.PingInfo, error) { - parsed, err := url.Parse(serverURL) - if err != nil { - return nil, err - } - if parsed.Host == "" { - return nil, fmt.Errorf("invalid daemon server address %q", serverURL) - } - return daemon.ProbeDaemon(parsed.Host, timeout) -} - // stopDaemon stops any running daemons. // Returns ErrDaemonNotRunning if no daemon runtime files are found. func stopDaemon() error { diff --git a/cmd/roborev/fix.go b/cmd/roborev/fix.go index 6addff7f6..18e46228a 100644 --- a/cmd/roborev/fix.go +++ b/cmd/roborev/fix.go @@ -610,7 +610,7 @@ func queryOpenJobs( ctx context.Context, repoRoot, branch string, ) ([]storage.ReviewJob, error) { - jobs, err := withFixDaemonRetryContext(ctx, serverAddr, func(addr string) ([]storage.ReviewJob, error) { + jobs, err := withFixDaemonRetryContext(ctx, getDaemonEndpoint().BaseURL(), func(addr string) ([]storage.ReviewJob, error) { queryURL := fmt.Sprintf( "%s/api/jobs?status=done&repo=%s&closed=false&limit=0", addr, url.QueryEscape(repoRoot), @@ -715,13 +715,14 @@ func runFixList(cmd *cobra.Command, branch string, newestFirst bool) error { cmd.Printf("Found %d open job(s):\n\n", len(jobIDs)) + listAddr := getDaemonEndpoint().BaseURL() for _, id := range jobIDs { - job, err := fetchJob(ctx, serverAddr, id) + job, err := fetchJob(ctx, listAddr, id) if err != nil { fmt.Fprintf(cmd.ErrOrStderr(), "Warning: could not fetch job %d: %v\n", id, err) continue } - review, err := fetchReview(ctx, serverAddr, id) + review, err := fetchReview(ctx, listAddr, id) if err != nil { fmt.Fprintf(cmd.ErrOrStderr(), "Warning: could not fetch review for job %d: %v\n", id, err) continue @@ -815,8 +816,13 @@ func fixSingleJob(cmd *cobra.Command, repoRoot string, jobID int64, opts fixOpti ctx = context.Background() } + // Snapshot the daemon address once for the entire operation. + // The retry helpers (withFixDaemonRetryContext) handle re-resolution + // internally if a connection error triggers daemon recovery. + addr := getDaemonEndpoint().BaseURL() + // Fetch the job to check status - job, err := fetchJob(ctx, serverAddr, jobID) + job, err := fetchJob(ctx, addr, jobID) if err != nil { return fmt.Errorf("fetch job: %w", err) } @@ -826,7 +832,7 @@ func fixSingleJob(cmd *cobra.Command, repoRoot string, jobID int64, opts fixOpti } // Fetch the review/analysis output - review, err := fetchReview(ctx, serverAddr, jobID) + review, err := fetchReview(ctx, addr, jobID) if err != nil { return fmt.Errorf("fetch review: %w", err) } @@ -836,7 +842,7 @@ func fixSingleJob(cmd *cobra.Command, repoRoot string, jobID int64, opts fixOpti if !opts.quiet { cmd.Printf("Job %d: review passed, skipping fix\n", jobID) } - if err := markJobClosed(ctx, serverAddr, jobID); err != nil && !opts.quiet { + if err := markJobClosed(ctx, addr, jobID); err != nil && !opts.quiet { cmd.Printf("Warning: could not close job %d: %v\n", jobID, err) } return nil @@ -914,7 +920,7 @@ func fixSingleJob(cmd *cobra.Command, repoRoot string, jobID int64, opts fixOpti // Enqueue review for fix commit if result.CommitCreated { - if err := enqueueIfNeeded(ctx, serverAddr, repoRoot, result.NewCommitSHA); err != nil && !opts.quiet { + if err := enqueueIfNeeded(ctx, addr, repoRoot, result.NewCommitSHA); err != nil && !opts.quiet { cmd.Printf("Warning: could not enqueue review for fix commit: %v\n", err) } } @@ -925,13 +931,13 @@ func fixSingleJob(cmd *cobra.Command, repoRoot string, jobID int64, opts fixOpti responseText = fmt.Sprintf("Fix applied via `roborev fix` command (commit: %s)", git.ShortSHA(result.NewCommitSHA)) } - if err := addJobResponse(ctx, serverAddr, jobID, "roborev-fix", responseText); err != nil { + if err := addJobResponse(ctx, addr, jobID, "roborev-fix", responseText); err != nil { if !opts.quiet { cmd.Printf("Warning: could not add response to job: %v\n", err) } } - if err := markJobClosed(ctx, serverAddr, jobID); err != nil { + if err := markJobClosed(ctx, addr, jobID); err != nil { if !opts.quiet { cmd.Printf("Warning: could not close job: %v\n", err) } @@ -1006,9 +1012,10 @@ func runFixBatch(cmd *cobra.Command, jobIDs []int64, branch string, allBranches, } // Fetch all jobs and reviews + batchAddr := getDaemonEndpoint().BaseURL() var entries []batchEntry for _, id := range jobIDs { - job, err := fetchJob(ctx, serverAddr, id) + job, err := fetchJob(ctx, batchAddr, id) if err != nil { if !opts.quiet { cmd.Printf("Warning: skipping job %d: %v\n", id, err) @@ -1021,7 +1028,7 @@ func runFixBatch(cmd *cobra.Command, jobIDs []int64, branch string, allBranches, } continue } - review, err := fetchReview(ctx, serverAddr, id) + review, err := fetchReview(ctx, batchAddr, id) if err != nil { if !opts.quiet { cmd.Printf("Warning: skipping job %d: %v\n", id, err) @@ -1032,7 +1039,7 @@ func runFixBatch(cmd *cobra.Command, jobIDs []int64, branch string, allBranches, if !opts.quiet { cmd.Printf("Skipping job %d (review passed)\n", id) } - if err := markJobClosed(ctx, serverAddr, id); err != nil && !opts.quiet { + if err := markJobClosed(ctx, batchAddr, id); err != nil && !opts.quiet { cmd.Printf("Warning: could not close job %d: %v\n", id, err) } continue @@ -1135,7 +1142,7 @@ func runFixBatch(cmd *cobra.Command, jobIDs []int64, branch string, allBranches, // Enqueue review for fix commit if result.CommitCreated { - if enqErr := enqueueIfNeeded(ctx, serverAddr, repoRoot, result.NewCommitSHA); enqErr != nil && !opts.quiet { + if enqErr := enqueueIfNeeded(ctx, batchAddr, repoRoot, result.NewCommitSHA); enqErr != nil && !opts.quiet { cmd.Printf("Warning: could not enqueue review for fix commit: %v\n", enqErr) } } @@ -1146,10 +1153,10 @@ func runFixBatch(cmd *cobra.Command, jobIDs []int64, branch string, allBranches, responseText = fmt.Sprintf("Fix applied via `roborev fix --batch` (commit: %s)", git.ShortSHA(result.NewCommitSHA)) } for _, e := range batch { - if addErr := addJobResponse(ctx, serverAddr, e.jobID, "roborev-fix", responseText); addErr != nil && !opts.quiet { + if addErr := addJobResponse(ctx, batchAddr, e.jobID, "roborev-fix", responseText); addErr != nil && !opts.quiet { cmd.Printf("Warning: could not add response to job %d: %v\n", e.jobID, addErr) } - if markErr := markJobClosed(ctx, serverAddr, e.jobID); markErr != nil { + if markErr := markJobClosed(ctx, batchAddr, e.jobID); markErr != nil { if !opts.quiet { cmd.Printf("Warning: could not close job %d: %v\n", e.jobID, markErr) } @@ -1241,7 +1248,7 @@ func formatJobIDs(ids []int64) string { // fetchJob retrieves a job from the daemon func fetchJob(ctx context.Context, serverAddr string, jobID int64) (*storage.ReviewJob, error) { return withFixDaemonRetryContext(ctx, serverAddr, func(addr string) (*storage.ReviewJob, error) { - client := &http.Client{Timeout: 30 * time.Second} + client := getDaemonHTTPClient(30 * time.Second) req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/api/jobs?id=%d", addr, jobID), nil) if err != nil { @@ -1277,7 +1284,7 @@ func fetchJob(ctx context.Context, serverAddr string, jobID int64) (*storage.Rev // fetchReview retrieves the review output for a job func fetchReview(ctx context.Context, serverAddr string, jobID int64) (*storage.Review, error) { return withFixDaemonRetryContext(ctx, serverAddr, func(addr string) (*storage.Review, error) { - client := &http.Client{Timeout: 30 * time.Second} + client := getDaemonHTTPClient(30 * time.Second) req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/api/review?job_id=%d", addr, jobID), nil) if err != nil { @@ -1469,15 +1476,15 @@ func enqueueIfNeeded(ctx context.Context, serverAddr, repoPath, sha string) erro func refreshFixDaemonAddr(ctx context.Context) (string, error) { if shouldStopFixDaemonRetry(ctx) { - return serverAddr, ctx.Err() + return getDaemonEndpoint().BaseURL(), ctx.Err() } if err := fixDaemonEnsure(); err != nil { - return serverAddr, err + return getDaemonEndpoint().BaseURL(), err } if shouldStopFixDaemonRetry(ctx) { - return serverAddr, ctx.Err() + return getDaemonEndpoint().BaseURL(), ctx.Err() } - return serverAddr, nil + return getDaemonEndpoint().BaseURL(), nil } // hasJobForSHA checks if a review job already exists for the given commit SHA. @@ -1585,21 +1592,21 @@ func waitForFixDaemonRecovery(ctx context.Context) (string, error) { var lastErr error for { if ctx != nil && ctx.Err() != nil { - return serverAddr, ctx.Err() + return getDaemonEndpoint().BaseURL(), ctx.Err() } if err := fixDaemonEnsure(); err == nil { - return serverAddr, nil + return getDaemonEndpoint().BaseURL(), nil } else { lastErr = err } if time.Now().After(deadline) { if lastErr != nil { - return serverAddr, lastErr + return getDaemonEndpoint().BaseURL(), lastErr } - return serverAddr, fmt.Errorf("daemon recovery timed out") + return getDaemonEndpoint().BaseURL(), fmt.Errorf("daemon recovery timed out") } if ctx != nil && ctx.Err() != nil { - return serverAddr, ctx.Err() + return getDaemonEndpoint().BaseURL(), ctx.Err() } fixDaemonSleep(fixDaemonRecoveryPoll) } @@ -1620,5 +1627,5 @@ func doFixDaemonRequest(ctx context.Context, method, requestURL string, body []b if body != nil { req.Header.Set("Content-Type", "application/json") } - return http.DefaultClient.Do(req) + return getDaemonHTTPClient(30 * time.Second).Do(req) } diff --git a/cmd/roborev/fix_test.go b/cmd/roborev/fix_test.go index fb7ba4844..ba1f0d5c0 100644 --- a/cmd/roborev/fix_test.go +++ b/cmd/roborev/fix_test.go @@ -25,8 +25,10 @@ import ( "github.com/roborev-dev/roborev/internal/agent" "github.com/roborev-dev/roborev/internal/config" + "github.com/roborev-dev/roborev/internal/daemon" "github.com/roborev-dev/roborev/internal/storage" "github.com/roborev-dev/roborev/internal/testutil" + "github.com/roborev-dev/roborev/internal/version" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -550,6 +552,8 @@ func TestFixSingleJobRecoversPostFixDaemonCalls(t *testing.T) { case "/api/comment": commentCount.Add(1) w.WriteHeader(http.StatusCreated) + case "/api/comments": + writeJSON(w, map[string]any{"responses": []any{}}) case "/api/review/close": closeCount.Add(1) w.WriteHeader(http.StatusOK) @@ -566,8 +570,25 @@ func TestFixSingleJobRecoversPostFixDaemonCalls(t *testing.T) { return nil }) + var daemonDead atomic.Bool + hijackAndClose := func(w http.ResponseWriter) { + hj, ok := w.(http.Hijacker) + if ok { + conn, _, _ := hj.Hijack() + if conn != nil { + conn.Close() + } + } + } + deadHandler := func(w http.ResponseWriter, r *http.Request) { + hijackAndClose(w) + } _ = newMockDaemonBuilder(t). WithHandler("/api/jobs", func(w http.ResponseWriter, r *http.Request) { + if daemonDead.Load() { + hijackAndClose(w) + return + } writeJSON(w, map[string]any{ "jobs": []storage.ReviewJob{{ ID: 99, @@ -578,8 +599,15 @@ func TestFixSingleJobRecoversPostFixDaemonCalls(t *testing.T) { }). WithHandler("/api/review", func(w http.ResponseWriter, r *http.Request) { writeJSON(w, storage.Review{Output: "## Issues\n- Found minor issue"}) + // Simulate daemon death after responding + daemonDead.Store(true) + removeAllDaemonFiles(t) serverAddr = deadURL }). + WithHandler("/api/enqueue", deadHandler). + WithHandler("/api/comment", deadHandler). + WithHandler("/api/comments", deadHandler). + WithHandler("/api/review/close", deadHandler). Build() cmd, output := newTestCmd(t) @@ -1167,6 +1195,8 @@ func TestRunFixOpenRecoversFromDaemonRestartOnRequery(t *testing.T) { w.WriteHeader(http.StatusCreated) case "/api/review/close": w.WriteHeader(http.StatusOK) + case "/api/ping": + writeJSON(w, daemon.PingInfo{Service: "roborev", Version: version.Version}) default: http.NotFound(w, r) } @@ -1209,6 +1239,10 @@ func TestRunFixOpenRecoversFromDaemonRestartOnRequery(t *testing.T) { }). WithHandler("/api/review/close", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) + // Simulate daemon death: remove runtime file so + // getDaemonEndpoint falls back to serverAddr, then + // point serverAddr at a dead address. + removeAllDaemonFiles(t) serverAddr = deadURL }). Build() @@ -2306,8 +2340,20 @@ func TestRunFixWithSeenDiscoveryAbortsOnConnectionError(t *testing.T) { }) deadURL := "http://127.0.0.1:1" + var daemonDead atomic.Bool _ = newMockDaemonBuilder(t). WithHandler("/api/jobs", func(w http.ResponseWriter, r *http.Request) { + if daemonDead.Load() { + // Simulate dead daemon: hijack connection and close it + hj, ok := w.(http.Hijacker) + if ok { + conn, _, _ := hj.Hijack() + if conn != nil { + conn.Close() + } + } + return + } writeJSON(w, map[string]any{ "jobs": []storage.ReviewJob{{ ID: 10, @@ -2315,7 +2361,23 @@ func TestRunFixWithSeenDiscoveryAbortsOnConnectionError(t *testing.T) { Agent: "test", }}, }) + // Simulate daemon death after responding + daemonDead.Store(true) + removeAllDaemonFiles(t) serverAddr = deadURL + getAnyRunningDaemon = func() (*daemon.RuntimeInfo, error) { + return nil, os.ErrNotExist + } + }). + WithHandler("/api/review", func(w http.ResponseWriter, r *http.Request) { + // Daemon is dead — hijack and close connection + hj, ok := w.(http.Hijacker) + if ok { + conn, _, _ := hj.Hijack() + if conn != nil { + conn.Close() + } + } }). Build() diff --git a/cmd/roborev/helpers_test.go b/cmd/roborev/helpers_test.go index a337d70df..24727c482 100644 --- a/cmd/roborev/helpers_test.go +++ b/cmd/roborev/helpers_test.go @@ -16,6 +16,7 @@ import ( "github.com/spf13/cobra" "github.com/stretchr/testify/require" + "github.com/roborev-dev/roborev/internal/daemon" "github.com/roborev-dev/roborev/internal/storage" ) @@ -117,6 +118,15 @@ func patchServerAddr(t *testing.T, newURL string) { t.Cleanup(func() { serverAddr = old }) } +// mustParseEndpoint parses a server URL into a DaemonEndpoint, failing the +// test if parsing fails. Useful for converting httptest.Server.URL to an endpoint. +func mustParseEndpoint(t *testing.T, serverURL string) daemon.DaemonEndpoint { + t.Helper() + ep, err := daemon.ParseEndpoint(serverURL) + require.NoError(t, err, "parse endpoint %q", serverURL) + return ep +} + // createTestRepo creates a temporary git repository with the given files // committed. It returns the TestGitRepo. func createTestRepo(t *testing.T, files map[string]string) *TestGitRepo { diff --git a/cmd/roborev/job_helpers.go b/cmd/roborev/job_helpers.go index 778dd962a..6b989a62e 100644 --- a/cmd/roborev/job_helpers.go +++ b/cmd/roborev/job_helpers.go @@ -16,7 +16,7 @@ import ( // waitForJobCompletion polls a job until it completes, streaming output if provided. // This consolidates polling logic used across compact, analyze, fix, and run commands. func waitForJobCompletion(ctx context.Context, serverAddr string, jobID int64, output io.Writer) (*storage.Review, error) { - client := &http.Client{Timeout: 30 * time.Second} + client := getDaemonHTTPClient(30 * time.Second) pollInterval := 1 * time.Second maxInterval := 5 * time.Second lastOutputLen := 0 diff --git a/cmd/roborev/list.go b/cmd/roborev/list.go index 6f0313edc..2e50c1b00 100644 --- a/cmd/roborev/list.go +++ b/cmd/roborev/list.go @@ -49,7 +49,8 @@ Examples: return fmt.Errorf("daemon not running: %w", err) } - addr := getDaemonAddr() + ep := getDaemonEndpoint() + addr := ep.BaseURL() // Auto-resolve repo from cwd when not specified. // Use worktree root for branch detection, main repo root for API queries @@ -105,7 +106,7 @@ Examples: } params.Set("limit", strconv.Itoa(limit)) - client := &http.Client{Timeout: 5 * time.Second} + client := ep.HTTPClient(5 * time.Second) resp, err := client.Get(addr + "/api/jobs?" + params.Encode()) if err != nil { return fmt.Errorf("failed to connect to daemon (is it running?)") diff --git a/cmd/roborev/main.go b/cmd/roborev/main.go index 50af1c0f1..42232f62d 100644 --- a/cmd/roborev/main.go +++ b/cmd/roborev/main.go @@ -16,9 +16,12 @@ func main() { Use: "roborev", Short: "Automatic code review for git commits", Long: "roborev automatically reviews git commits using AI agents (Codex, Claude Code, Gemini, Copilot, OpenCode, Cursor, Kiro, Pi)", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + return validateServerFlag() + }, } - rootCmd.PersistentFlags().StringVar(&serverAddr, "server", "http://127.0.0.1:7373", "daemon server address") + rootCmd.PersistentFlags().StringVar(&serverAddr, "server", "", "daemon server address (e.g. 127.0.0.1:7373 or unix://)") rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "verbose output") rootCmd.AddCommand(initCmd()) diff --git a/cmd/roborev/main_test_helpers_test.go b/cmd/roborev/main_test_helpers_test.go index 0822d47e8..0cc1c2c15 100644 --- a/cmd/roborev/main_test_helpers_test.go +++ b/cmd/roborev/main_test_helpers_test.go @@ -178,11 +178,13 @@ func NewMockDaemon(t *testing.T, hooks MockRefineHooks) *MockDaemon { require.NoError(t, os.WriteFile(filepath.Join(tmpDir, "daemon.json"), data, 0644), "failed to write daemon.json") origServerAddr := serverAddr + origGetAnyRunningDaemon := getAnyRunningDaemon serverAddr = ts.URL t.Cleanup(func() { ts.Close() serverAddr = origServerAddr + getAnyRunningDaemon = origGetAnyRunningDaemon }) m := &MockDaemon{ @@ -498,6 +500,27 @@ func daemonFromHandler(t *testing.T, handler http.Handler) *MockDaemon { }) } +// removeAllDaemonFiles removes all daemon runtime files from the data +// directory. Tests use this to simulate daemon death: once the runtime +// files are gone, getDaemonEndpoint falls back to the serverAddr global, +// which can be pointed at a dead address. +func removeAllDaemonFiles(t *testing.T) { + t.Helper() + dataDir := os.Getenv("ROBOREV_DATA_DIR") + if dataDir == "" { + return + } + entries, err := os.ReadDir(dataDir) + if err != nil { + return + } + for _, e := range entries { + if strings.HasPrefix(e.Name(), "daemon.") && strings.HasSuffix(e.Name(), ".json") { + os.Remove(filepath.Join(dataDir, e.Name())) + } + } +} + var chdirMutex sync.Mutex // runWithOutput runs a cobra command within a specific directory and returns its output. diff --git a/cmd/roborev/postcommit.go b/cmd/roborev/postcommit.go index ae697442c..e7885a0e4 100644 --- a/cmd/roborev/postcommit.go +++ b/cmd/roborev/postcommit.go @@ -16,9 +16,12 @@ import ( "github.com/spf13/cobra" ) -// hookHTTPClient is used for hook HTTP requests. Short timeout +// hookHTTPClient returns an HTTP client for hook requests. Short timeout // ensures hooks never block commits if the daemon stalls. -var hookHTTPClient = &http.Client{Timeout: 3 * time.Second} +// Tests can override this variable to inject custom transports. +var hookHTTPClient = func() *http.Client { + return getDaemonHTTPClient(3 * time.Second) +} // hookLogPath can be overridden in tests. var hookLogPath = "" @@ -77,8 +80,9 @@ func postCommitCmd() *cobra.Command { Branch: branchName, }) - resp, err := hookHTTPClient.Post( - serverAddr+"/api/enqueue", + ep := getDaemonEndpoint() + resp, err := hookHTTPClient().Post( + ep.BaseURL()+"/api/enqueue", "application/json", bytes.NewReader(reqBody), ) diff --git a/cmd/roborev/postcommit_test.go b/cmd/roborev/postcommit_test.go index d0ca71647..643eaa55f 100644 --- a/cmd/roborev/postcommit_test.go +++ b/cmd/roborev/postcommit_test.go @@ -275,9 +275,11 @@ func TestPostCommitTimesOutOnSlowDaemon(t *testing.T) { rt := &stallingRoundTripper{hit: make(chan struct{}, 1)} orig := hookHTTPClient - hookHTTPClient = &http.Client{ - Timeout: 50 * time.Millisecond, - Transport: rt, + hookHTTPClient = func() *http.Client { + return &http.Client{ + Timeout: 50 * time.Millisecond, + Transport: rt, + } } t.Cleanup(func() { hookHTTPClient = orig }) diff --git a/cmd/roborev/refine.go b/cmd/roborev/refine.go index a6057cd87..64d0d9da6 100644 --- a/cmd/roborev/refine.go +++ b/cmd/roborev/refine.go @@ -758,8 +758,9 @@ func runRefineList( cmd.Printf("Found %d failed review(s) to refine:\n\n", len(failed)) + refineListAddr := getDaemonEndpoint().BaseURL() for _, job := range failed { - review, err := fetchReview(ctx, serverAddr, job.ID) + review, err := fetchReview(ctx, refineListAddr, job.ID) if err != nil { fmt.Fprintf( cmd.ErrOrStderr(), diff --git a/cmd/roborev/remap.go b/cmd/roborev/remap.go index 4caf3ba24..4cdf5ce2f 100644 --- a/cmd/roborev/remap.go +++ b/cmd/roborev/remap.go @@ -104,8 +104,8 @@ new commits. Called automatically by the post-rewrite hook.`, return nil } - addr := getDaemonAddr() - client := daemon.NewHTTPClient(addr) + ep := getDaemonEndpoint() + client := daemon.NewHTTPClient(ep) result, err := client.Remap(daemon.RemapRequest{ RepoPath: repoRoot, diff --git a/cmd/roborev/review.go b/cmd/roborev/review.go index fdef2464f..d7e06b150 100644 --- a/cmd/roborev/review.go +++ b/cmd/roborev/review.go @@ -10,6 +10,7 @@ import ( "os" "path/filepath" "strings" + "time" "github.com/roborev-dev/roborev/internal/agent" "github.com/roborev-dev/roborev/internal/config" @@ -287,7 +288,8 @@ Examples: reqBody, _ := json.Marshal(reqFields) - resp, err := http.Post(serverAddr+"/api/enqueue", "application/json", bytes.NewReader(reqBody)) + ep := getDaemonEndpoint() + resp, err := ep.HTTPClient(10*time.Second).Post(ep.BaseURL()+"/api/enqueue", "application/json", bytes.NewReader(reqBody)) if err != nil { return fmt.Errorf("failed to connect to daemon: %w", err) } @@ -326,7 +328,7 @@ Examples: // If --wait, poll until job completes and show result if wait { - err := waitForJob(cmd, serverAddr, job.ID, quiet) + err := waitForJob(cmd, ep, job.ID, quiet) // Only silence Cobra's error output for exitError (verdict-based exit codes) // Keep error output for actual failures (network errors, job not found, etc.) if _, isExitErr := err.(*exitError); isExitErr { diff --git a/cmd/roborev/run.go b/cmd/roborev/run.go index c2fa64d42..6920cab40 100644 --- a/cmd/roborev/run.go +++ b/cmd/roborev/run.go @@ -154,7 +154,8 @@ func runPrompt(cmd *cobra.Command, args []string, agentName, modelStr, reasoning Agentic: agentic, }) - resp, err := http.Post(serverAddr+"/api/enqueue", "application/json", bytes.NewReader(reqBody)) + ep := getDaemonEndpoint() + resp, err := ep.HTTPClient(10*time.Second).Post(ep.BaseURL()+"/api/enqueue", "application/json", bytes.NewReader(reqBody)) if err != nil { return fmt.Errorf("failed to connect to daemon: %w", err) } @@ -180,7 +181,7 @@ func runPrompt(cmd *cobra.Command, args []string, agentName, modelStr, reasoning // If --wait, poll until job completes and show result if wait { - return waitForPromptJob(cmd, serverAddr, job.ID, quiet, promptPollInterval) + return waitForPromptJob(cmd, ep, job.ID, quiet, promptPollInterval) } return nil @@ -193,8 +194,9 @@ var promptPollInterval = 500 * time.Millisecond // waitForPromptJob waits for a prompt job to complete and displays the result. // Unlike waitForJob, this doesn't apply verdict-based exit codes since prompt // jobs don't have PASS/FAIL verdicts. -func waitForPromptJob(cmd *cobra.Command, serverAddr string, jobID int64, quiet bool, pollInterval time.Duration) error { - client := &http.Client{Timeout: 5 * time.Second} +func waitForPromptJob(cmd *cobra.Command, ep daemon.DaemonEndpoint, jobID int64, quiet bool, pollInterval time.Duration) error { + serverAddr := ep.BaseURL() + client := ep.HTTPClient(5 * time.Second) if pollInterval <= 0 { pollInterval = promptPollInterval @@ -283,7 +285,7 @@ func waitForPromptJob(cmd *cobra.Command, serverAddr string, jobID int64, quiet // Unlike showReview, this doesn't apply verdict-based exit codes. // The doneMsg parameter is printed before the result on success (used for "done!" message). func showPromptResult(cmd *cobra.Command, addr string, jobID int64, quiet bool, doneMsg string) error { - client := &http.Client{Timeout: 5 * time.Second} + client := getDaemonHTTPClient(5 * time.Second) resp, err := client.Get(fmt.Sprintf("%s/api/review?job_id=%d", addr, jobID)) if err != nil { return fmt.Errorf("failed to fetch result: %w", err) diff --git a/cmd/roborev/run_test.go b/cmd/roborev/run_test.go index 10d73deab..864eb00b3 100644 --- a/cmd/roborev/run_test.go +++ b/cmd/roborev/run_test.go @@ -344,7 +344,7 @@ func TestWaitForPromptJob(t *testing.T) { cmd, out := newTestCmd(t) // Use small poll interval for tests - err := waitForPromptJob(cmd, server.URL, 123, tt.quiet, 1*time.Millisecond) + err := waitForPromptJob(cmd, mustParseEndpoint(t, server.URL), 123, tt.quiet, 1*time.Millisecond) if tt.expectError != "" { require.Error(t, err) @@ -372,7 +372,7 @@ func TestWaitForPromptJob(t *testing.T) { }) cmd, _ := newTestCmd(t) - err := waitForPromptJob(cmd, server.URL, 123, true, 1*time.Millisecond) + err := waitForPromptJob(cmd, mustParseEndpoint(t, server.URL), 123, true, 1*time.Millisecond) require.NoError(t, err) @@ -387,7 +387,7 @@ func TestWaitForPromptJob(t *testing.T) { }) cmd, _ := newTestCmd(t) - err := waitForPromptJob(cmd, server.URL, 123, true, 1*time.Millisecond) + err := waitForPromptJob(cmd, mustParseEndpoint(t, server.URL), 123, true, 1*time.Millisecond) require.NoError(t, err) @@ -406,7 +406,7 @@ func TestWaitForPromptJob(t *testing.T) { t.Cleanup(server.Close) cmd, _ := newTestCmd(t) - err := waitForPromptJob(cmd, server.URL, 123, true, 1*time.Millisecond) + err := waitForPromptJob(cmd, mustParseEndpoint(t, server.URL), 123, true, 1*time.Millisecond) require.Error(t, err, "Expected error for max unknown retries") @@ -445,7 +445,7 @@ func TestWaitForPromptJob(t *testing.T) { t.Cleanup(server.Close) cmd, _ := newTestCmd(t) - err := waitForPromptJob(cmd, server.URL, 123, true, tt.interval) + err := waitForPromptJob(cmd, mustParseEndpoint(t, server.URL), 123, true, tt.interval) require.NoError(t, err, "Expected no error, got: %v") @@ -474,7 +474,7 @@ func TestWaitForPromptJob(t *testing.T) { server, pollCount := newPollingTestServer(t, statuses) cmd, _ := newTestCmd(t) - err := waitForPromptJob(cmd, server.URL, 123, true, 1*time.Millisecond) + err := waitForPromptJob(cmd, mustParseEndpoint(t, server.URL), 123, true, 1*time.Millisecond) require.NoError(t, err) diff --git a/cmd/roborev/show.go b/cmd/roborev/show.go index 7920ab69c..3a1c736ac 100644 --- a/cmd/roborev/show.go +++ b/cmd/roborev/show.go @@ -43,8 +43,9 @@ Examples: return fmt.Errorf("daemon not running: %w", err) } - addr := getDaemonAddr() - client := &http.Client{Timeout: 5 * time.Second} + ep := getDaemonEndpoint() + addr := ep.BaseURL() + client := ep.HTTPClient(5 * time.Second) var queryURL string var displayRef string diff --git a/cmd/roborev/status.go b/cmd/roborev/status.go index 006c9f1e6..8ddb3fbb4 100644 --- a/cmd/roborev/status.go +++ b/cmd/roborev/status.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "log" - "net/http" "os" "text/tabwriter" "time" @@ -28,8 +27,9 @@ func statusCmd() *cobra.Command { return nil } - addr := getDaemonAddr() - client := &http.Client{Timeout: 2 * time.Second} + ep := getDaemonEndpoint() + addr := ep.BaseURL() + client := ep.HTTPClient(2 * time.Second) resp, err := client.Get(addr + "/api/status") if err != nil { fmt.Println("Daemon: not running") diff --git a/cmd/roborev/stream.go b/cmd/roborev/stream.go index a6097c74d..21f967f47 100644 --- a/cmd/roborev/stream.go +++ b/cmd/roborev/stream.go @@ -44,8 +44,8 @@ Examples: } // Build URL with optional repo filter - addr := getDaemonAddr() - streamURL := addr + "/api/stream/events" + ep := getDaemonEndpoint() + streamURL := ep.BaseURL() + "/api/stream/events" if repoFilter != "" { streamURL += "?" + url.Values{"repo": {repoFilter}}.Encode() } @@ -70,7 +70,7 @@ Examples: }() // Make request - client := &http.Client{Timeout: 0} // No timeout for streaming + client := ep.HTTPClient(0) // No timeout for streaming resp, err := client.Do(req) if err != nil { return fmt.Errorf("connect to daemon: %w", err) diff --git a/cmd/roborev/summary.go b/cmd/roborev/summary.go index 1870fe8ac..a1bdde852 100644 --- a/cmd/roborev/summary.go +++ b/cmd/roborev/summary.go @@ -46,7 +46,8 @@ Examples: return fmt.Errorf("daemon not running: %w", err) } - addr := getDaemonAddr() + ep := getDaemonEndpoint() + addr := ep.BaseURL() // Auto-resolve repo from cwd when not specified (unless --all) if !allRepos && repoPath == "" { @@ -75,7 +76,7 @@ Examples: params.Set("all", "true") } - client := &http.Client{Timeout: 10 * time.Second} + client := ep.HTTPClient(10 * time.Second) resp, err := client.Get(addr + "/api/summary?" + params.Encode()) if err != nil { return fmt.Errorf("failed to connect to daemon: %w", err) diff --git a/cmd/roborev/sync.go b/cmd/roborev/sync.go index de947064b..81cffa1da 100644 --- a/cmd/roborev/sync.go +++ b/cmd/roborev/sync.go @@ -133,9 +133,10 @@ func syncNowCmd() *cobra.Command { return fmt.Errorf("daemon not running: %w", err) } - addr := getDaemonAddr() + ep := getDaemonEndpoint() + addr := ep.BaseURL() // Use longer timeout since sync operations can take up to 5 minutes - client := &http.Client{Timeout: 6 * time.Minute} + client := ep.HTTPClient(6 * time.Minute) // Use streaming endpoint to show progress resp, err := client.Post(addr+"/api/sync/now?stream=1", "application/json", nil) diff --git a/cmd/roborev/tui/action_test.go b/cmd/roborev/tui/action_test.go index 388fc69e7..f11d77a70 100644 --- a/cmd/roborev/tui/action_test.go +++ b/cmd/roborev/tui/action_test.go @@ -482,7 +482,7 @@ func TestTUIClosedRollbackRestoresSelectionAfterLeavingQueue(t *testing.T) { } func TestTUISetJobClosedHelper(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(100), @@ -719,7 +719,7 @@ func TestTUIRespondSuccessClearsOnlyMatchingJob(t *testing.T) { } func TestTUIRespondBackspaceMultiByte(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewKindComment m.commentJobID = 1 @@ -755,7 +755,7 @@ func containsRune(s string, r rune) bool { } func TestTUIRespondViewTruncationMultiByte(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewKindComment m.commentJobID = 1 m.width = 30 @@ -789,7 +789,7 @@ func TestTUIRespondViewTruncationMultiByte(t *testing.T) { } func TestTUIRespondViewTabExpansion(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewKindComment m.commentJobID = 1 m.width = 40 @@ -880,7 +880,7 @@ func TestClosedKeyUpdatesStatsFromReviewView(t *testing.T) { } func setupTestModel(jobs []storage.ReviewJob, opts ...func(*model)) model { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = jobs for _, opt := range opts { opt(&m) diff --git a/cmd/roborev/tui/actions.go b/cmd/roborev/tui/actions.go index 21c81c2c6..3ae7dba66 100644 --- a/cmd/roborev/tui/actions.go +++ b/cmd/roborev/tui/actions.go @@ -312,7 +312,7 @@ func (m model) applyFixPatchInWorktree(jobID int64) tea.Cmd { // fetchPatchAndJob fetches the patch content and job details for a fix job. // Returns nil msg on success; a non-nil msg should be returned to the TUI immediately. func (m model) fetchPatchAndJob(jobID int64) (string, *storage.ReviewJob, *applyPatchResultMsg) { - url := m.serverAddr + fmt.Sprintf("/api/job/patch?job_id=%d", jobID) + url := m.endpoint.BaseURL() + fmt.Sprintf("/api/job/patch?job_id=%d", jobID) resp, err := m.client.Get(url) if err != nil { return "", nil, &applyPatchResultMsg{jobID: jobID, err: err} diff --git a/cmd/roborev/tui/api.go b/cmd/roborev/tui/api.go index 535c75a53..7cf7f491a 100644 --- a/cmd/roborev/tui/api.go +++ b/cmd/roborev/tui/api.go @@ -36,7 +36,7 @@ func readErrorBody(body io.Reader, status string) string { // getJSON performs a GET request and decodes the JSON response into out. // Returns errNotFound for 404 responses. Other errors include the server's message. func (m model) getJSON(path string, out any) error { - url := m.serverAddr + path + url := m.endpoint.BaseURL() + path resp, err := m.client.Get(url) if err != nil { return err @@ -65,7 +65,7 @@ func (m model) postJSON(path string, in any, out any) error { return fmt.Errorf("marshal request: %w", err) } - resp, err := m.client.Post(m.serverAddr+path, "application/json", bytes.NewReader(body)) + resp, err := m.client.Post(m.endpoint.BaseURL()+path, "application/json", bytes.NewReader(body)) if err != nil { return err } diff --git a/cmd/roborev/tui/control_test.go b/cmd/roborev/tui/control_test.go index cd03aad90..9ffdcdcbb 100644 --- a/cmd/roborev/tui/control_test.go +++ b/cmd/roborev/tui/control_test.go @@ -80,7 +80,7 @@ func TestIsControlCommand(t *testing.T) { // --- Unit tests for query handlers --- func TestBuildStateResponse(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoPath("/a")), makeJob(2, withRepoPath("/b")), @@ -99,7 +99,7 @@ func TestBuildStateResponse(t *testing.T) { } func TestBuildFilterResponse(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.activeRepoFilter = []string{"/repo"} m.activeBranchFilter = "main" m.lockedRepoFilter = true @@ -110,7 +110,7 @@ func TestBuildFilterResponse(t *testing.T) { } func TestBuildJobsResponse(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withAgent("claude-code"), withRepoPath("/r")), makeJob(2, withAgent("codex"), withRepoPath("/r")), @@ -126,7 +126,7 @@ func TestBuildJobsResponse(t *testing.T) { } func TestBuildSelectedResponse_NoSelection(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.selectedIdx = -1 resp := m.buildSelectedResponse() @@ -136,7 +136,7 @@ func TestBuildSelectedResponse_NoSelection(t *testing.T) { } func TestBuildSelectedResponse_WithSelection(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(42, withAgent("codex"), withClosed(boolPtr(false))), } @@ -154,7 +154,7 @@ func TestBuildSelectedResponse_WithSelection(t *testing.T) { // --- Unit tests for mutation handlers --- func TestHandleCtrlSetFilter(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) repo := "/test/repo" branch := "feature" @@ -170,7 +170,7 @@ func TestHandleCtrlSetFilter(t *testing.T) { } func TestHandleCtrlSetFilter_DisplayName(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.repoNames = map[string][]string{ "msgvault": {"/home/user/projects/msgvault"}, "roborev": {"/home/user/projects/roborev"}, @@ -187,7 +187,7 @@ func TestHandleCtrlSetFilter_DisplayName(t *testing.T) { } func TestHandleCtrlSetFilter_DisplayNameMultiplePaths(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.repoNames = map[string][]string{ "backend": { "/home/user/work/backend", @@ -209,7 +209,7 @@ func TestHandleCtrlSetFilter_DisplayNameMultiplePaths(t *testing.T) { } func TestHandleCtrlSetFilter_DisplayNameNotInJobs(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) // Simulate: jobs are loaded for "roborev" but "msgvault" has no // visible jobs. repoNames (from /api/repos) knows about both. m.jobs = []storage.ReviewJob{ @@ -231,7 +231,7 @@ func TestHandleCtrlSetFilter_DisplayNameNotInJobs(t *testing.T) { } func TestRepoNamesNotClobberedByBranchFilteredModal(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) // Simulate init fetch: repoNames knows both repos. m.repoNames = map[string][]string{ "msgvault": {"/home/user/projects/msgvault"}, @@ -282,7 +282,7 @@ func TestFetchRepos_BranchNoneIsUnfiltered(t *testing.T) { )) defer ts.Close() - m := newModel(ts.URL, withExternalIODisabled()) + m := newModel(testEndpointFromURL(ts.URL), withExternalIODisabled()) m.activeBranchFilter = branchNone cmd := m.fetchRepos() @@ -294,7 +294,7 @@ func TestFetchRepos_BranchNoneIsUnfiltered(t *testing.T) { } func TestRepoNamesRefreshedByUnfilteredModal(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.repoNames = map[string][]string{ "roborev": {"/home/user/projects/roborev"}, } @@ -317,7 +317,7 @@ func TestRepoNamesRefreshedByUnfilteredModal(t *testing.T) { } func TestSetFilterFallbackWhenRepoNamesEmpty(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) // Simulate startup fetch failure: repoNames is nil. m.repoNames = nil @@ -330,7 +330,7 @@ func TestSetFilterFallbackWhenRepoNamesEmpty(t *testing.T) { } func TestHandleCtrlSetFilter_LockedRepo(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.lockedRepoFilter = true params, _ := json.Marshal(map[string]string{"repo": "/test/repo"}) @@ -340,7 +340,7 @@ func TestHandleCtrlSetFilter_LockedRepo(t *testing.T) { } func TestHandleCtrlSetFilter_LockedBranch(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.lockedBranchFilter = true params, _ := json.Marshal(map[string]string{"branch": "main"}) @@ -349,7 +349,7 @@ func TestHandleCtrlSetFilter_LockedBranch(t *testing.T) { } func TestHandleCtrlSetFilter_LockedBranchNoRepoMutation(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.lockedBranchFilter = true m.activeRepoFilter = []string{"/original"} @@ -365,7 +365,7 @@ func TestHandleCtrlSetFilter_LockedBranchNoRepoMutation(t *testing.T) { } func TestHandleCtrlSetFilter_ClearRepo(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.activeRepoFilter = []string{"/old"} m.filterStack = []string{"repo"} @@ -377,7 +377,7 @@ func TestHandleCtrlSetFilter_ClearRepo(t *testing.T) { } func TestHandleCtrlClearFilter(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.activeRepoFilter = []string{"/repo"} m.activeBranchFilter = "main" m.filterStack = []string{"repo", "branch"} @@ -392,7 +392,7 @@ func TestHandleCtrlClearFilter(t *testing.T) { } func TestHandleCtrlClearFilter_LockedBranchNoRepoMutation(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.lockedBranchFilter = true m.activeRepoFilter = []string{"/repo"} m.activeBranchFilter = "main" @@ -413,7 +413,7 @@ func TestHandleCtrlClearFilter_LockedBranchNoRepoMutation(t *testing.T) { } func TestHandleCtrlSetHideClosed(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.hideClosed = false params, _ := json.Marshal(map[string]bool{"hide_closed": true}) @@ -423,7 +423,7 @@ func TestHandleCtrlSetHideClosed(t *testing.T) { } func TestHandleCtrlSelectJob(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(10), makeJob(20), makeJob(30), } @@ -436,7 +436,7 @@ func TestHandleCtrlSelectJob(t *testing.T) { } func TestHandleCtrlSelectJob_NotFound(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{makeJob(1)} params, _ := json.Marshal(map[string]int64{"job_id": 999}) @@ -445,7 +445,7 @@ func TestHandleCtrlSelectJob_NotFound(t *testing.T) { } func TestHandleCtrlSelectJob_HiddenByFilter(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(10, withRepoPath("/visible")), makeJob(20, withRepoPath("/hidden")), @@ -458,7 +458,7 @@ func TestHandleCtrlSelectJob_HiddenByFilter(t *testing.T) { } func TestHandleCtrlSelectJob_HiddenByClosed(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.hideClosed = true m.jobs = []storage.ReviewJob{ makeJob(10, withClosed(boolPtr(false))), @@ -471,7 +471,7 @@ func TestHandleCtrlSelectJob_HiddenByClosed(t *testing.T) { } func TestHandleCtrlSetView(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue params, _ := json.Marshal(map[string]string{"view": "queue"}) @@ -481,7 +481,7 @@ func TestHandleCtrlSetView(t *testing.T) { } func TestHandleCtrlSetView_Invalid(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) params, _ := json.Marshal(map[string]string{"view": "review"}) _, resp, _ := m.handleCtrlSetView(params) @@ -489,7 +489,7 @@ func TestHandleCtrlSetView_Invalid(t *testing.T) { } func TestHandleCtrlSetView_TasksDisabled(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.tasksEnabled = false params, _ := json.Marshal(map[string]string{"view": "tasks"}) @@ -499,7 +499,7 @@ func TestHandleCtrlSetView_TasksDisabled(t *testing.T) { } func TestHandleCtrlCloseReview(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) closed := false m.jobs = []storage.ReviewJob{ makeJob(5, withClosed(&closed)), @@ -516,7 +516,7 @@ func TestHandleCtrlCloseReview(t *testing.T) { } func TestHandleCtrlCloseReview_NonSelectedNoReflow(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.hideClosed = true closed := false m.jobs = []storage.ReviewJob{ @@ -541,7 +541,7 @@ func TestHandleCtrlCloseReview_NonSelectedNoReflow(t *testing.T) { } func TestHandleCtrlCloseReview_NoReview(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(5, withStatus(storage.JobStatusRunning)), } @@ -552,7 +552,7 @@ func TestHandleCtrlCloseReview_NoReview(t *testing.T) { } func TestHandleCtrlCloseReview_ClearsSelectionWhenNoneVisible(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.hideClosed = true // Only one visible job — closing it leaves no visible jobs. m.jobs = []storage.ReviewJob{ @@ -574,7 +574,7 @@ func TestHandleCtrlCloseReview_ClearsSelectionWhenNoneVisible(t *testing.T) { } func TestHandleCtrlCloseReview_RollbackRestoresSelection(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.hideClosed = true m.jobs = []storage.ReviewJob{ makeJob(1, withClosed(boolPtr(false))), @@ -603,7 +603,7 @@ func TestHandleCtrlCloseReview_RollbackRestoresSelection(t *testing.T) { } func TestHandleCtrlCancelJob(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(7, withStatus(storage.JobStatusRunning)), } @@ -616,7 +616,7 @@ func TestHandleCtrlCancelJob(t *testing.T) { } func TestHandleCtrlCancelJob_NonSelectedNoReflow(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.hideClosed = true m.jobs = []storage.ReviewJob{ makeJob(1, withClosed(boolPtr(false))), @@ -637,7 +637,7 @@ func TestHandleCtrlCancelJob_NonSelectedNoReflow(t *testing.T) { } func TestHandleCtrlCancelJob_ClearsSelectionWhenNoneVisible(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.hideClosed = true // Only one visible job — canceling it hides it under hideClosed. m.jobs = []storage.ReviewJob{ @@ -656,7 +656,7 @@ func TestHandleCtrlCancelJob_ClearsSelectionWhenNoneVisible(t *testing.T) { } func TestHandleCtrlCancelJob_RollbackRestoresSelection(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.hideClosed = true m.jobs = []storage.ReviewJob{ makeJob(1, withStatus(storage.JobStatusRunning)), @@ -683,7 +683,7 @@ func TestHandleCtrlCancelJob_RollbackRestoresSelection(t *testing.T) { } func TestHandleCtrlCancelJob_WrongStatus(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(7, withStatus(storage.JobStatusDone)), } @@ -694,7 +694,7 @@ func TestHandleCtrlCancelJob_WrongStatus(t *testing.T) { } func TestHandleCancelKey_ClearsSelectionWhenNoneVisible(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.hideClosed = true m.jobs = []storage.ReviewJob{ @@ -714,7 +714,7 @@ func TestHandleCancelKey_ClearsSelectionWhenNoneVisible(t *testing.T) { } func TestHandleCancelKey_RollbackRestoresSelection(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.hideClosed = true m.jobs = []storage.ReviewJob{ @@ -740,7 +740,7 @@ func TestHandleCancelKey_RollbackRestoresSelection(t *testing.T) { } func TestHandleCtrlRerunJob(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(8, withStatus(storage.JobStatusFailed)), } @@ -754,7 +754,7 @@ func TestHandleCtrlRerunJob(t *testing.T) { func TestHandleCtrlRerunJob_ClearsClosedAndVerdict(t *testing.T) { verdict := "FAIL" - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.hideClosed = true m.jobs = []storage.ReviewJob{ makeJob(8, @@ -780,7 +780,7 @@ func TestHandleCtrlRerunJob_ClearsClosedAndVerdict(t *testing.T) { } func TestHandleCtrlRerunJob_WrongStatus(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(8, withStatus(storage.JobStatusRunning)), } @@ -792,7 +792,7 @@ func TestHandleCtrlRerunJob_WrongStatus(t *testing.T) { func TestHandleRerunKey_ClearsClosedAndVerdict(t *testing.T) { verdict := "FAIL" - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.hideClosed = true m.jobs = []storage.ReviewJob{ @@ -819,7 +819,7 @@ func TestHandleRerunKey_ClearsClosedAndVerdict(t *testing.T) { func TestRerunResultMsg_RestoresClosedOnFailure(t *testing.T) { verdict := "FAIL" closed := true - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(10, withStatus(storage.JobStatusQueued)), } @@ -845,7 +845,7 @@ func TestRerunResultMsg_RestoresClosedOnFailure(t *testing.T) { } func TestHandleCtrlQuit(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) updated, resp, cmd := m.handleCtrlQuit() assert.True(t, resp.OK, "expected OK response") @@ -854,7 +854,7 @@ func TestHandleCtrlQuit(t *testing.T) { } func TestNoQuit_QKeyInQueueView(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled(), withNoQuit()) + m := newModel(testEndpoint, withExternalIODisabled(), withNoQuit()) m.currentView = viewQueue result, cmd := m.Update(tea.KeyMsg{Type: tea.KeyRunes, Runes: []rune{'q'}}) @@ -865,7 +865,7 @@ func TestNoQuit_QKeyInQueueView(t *testing.T) { } func TestNoQuit_CtrlCStillQuits(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled(), withNoQuit()) + m := newModel(testEndpoint, withExternalIODisabled(), withNoQuit()) m.currentView = viewQueue _, cmd := m.Update(tea.KeyMsg{Type: tea.KeyCtrlC}) @@ -874,7 +874,7 @@ func TestNoQuit_CtrlCStillQuits(t *testing.T) { } func TestNoQuit_QStillClosesModal(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled(), withNoQuit()) + m := newModel(testEndpoint, withExternalIODisabled(), withNoQuit()) m.currentView = viewReview m.currentReview = &storage.Review{Agent: "test", Output: "test"} @@ -885,8 +885,8 @@ func TestNoQuit_QStillClosesModal(t *testing.T) { } func TestNoQuit_QueueHelpOmitsQuit(t *testing.T) { - normal := newModel(testServerAddr, withExternalIODisabled()) - noQuit := newModel(testServerAddr, withExternalIODisabled(), withNoQuit()) + normal := newModel(testEndpoint, withExternalIODisabled()) + noQuit := newModel(testEndpoint, withExternalIODisabled(), withNoQuit()) normalRows := normal.queueHelpRows() noQuitRows := noQuit.queueHelpRows() @@ -928,12 +928,12 @@ func TestNoQuit_HelpViewOmitsQuit(t *testing.T) { } func TestNoQuit_TasksEmptyHelpOmitsQuit(t *testing.T) { - normal := newModel(testServerAddr, withExternalIODisabled()) + normal := newModel(testEndpoint, withExternalIODisabled()) normal.currentView = viewTasks normal.tasksEnabled = true normal.fixJobs = nil // empty tasks view - noQuitM := newModel(testServerAddr, withExternalIODisabled(), withNoQuit()) + noQuitM := newModel(testEndpoint, withExternalIODisabled(), withNoQuit()) noQuitM.currentView = viewTasks noQuitM.tasksEnabled = true noQuitM.fixJobs = nil @@ -950,7 +950,7 @@ func TestNoQuit_TasksEmptyHelpOmitsQuit(t *testing.T) { // --- Control message routing through Update() --- func TestUpdateRoutesControlQuery(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{makeJob(1)} respCh := make(chan controlResponse, 1) @@ -972,7 +972,7 @@ func TestUpdateRoutesControlQuery(t *testing.T) { } func TestUpdateRoutesControlMutation(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{makeJob(1), makeJob(2)} params, _ := json.Marshal(map[string]int64{"job_id": 2}) @@ -1004,7 +1004,7 @@ func TestControlSocketRoundtrip(t *testing.T) { ts := controlTestServer(t) - m := newModel(ts.URL, withExternalIODisabled()) + m := newModel(testEndpointFromURL(ts.URL), withExternalIODisabled()) // Provide a pipe for stdin so the program doesn't block on TTY. r, w, _ := os.Pipe() @@ -1117,7 +1117,7 @@ func newTestProgram(t *testing.T) *tea.Program { }, )) t.Cleanup(ts.Close) - m := newModel(ts.URL, withExternalIODisabled()) + m := newModel(testEndpointFromURL(ts.URL), withExternalIODisabled()) p := tea.NewProgram(m, tea.WithoutRenderer()) go func() { _, _ = p.Run() }() t.Cleanup(func() { p.Kill() }) diff --git a/cmd/roborev/tui/control_unix_test.go b/cmd/roborev/tui/control_unix_test.go index f711a5595..44a57b859 100644 --- a/cmd/roborev/tui/control_unix_test.go +++ b/cmd/roborev/tui/control_unix_test.go @@ -104,7 +104,7 @@ func newTestProgramUnix(t *testing.T) *tea.Program { }, )) t.Cleanup(ts.Close) - m := newModel(ts.URL, withExternalIODisabled()) + m := newModel(testEndpointFromURL(ts.URL), withExternalIODisabled()) p := tea.NewProgram(m, tea.WithoutRenderer()) go func() { _, _ = p.Run() }() t.Cleanup(func() { p.Kill() }) diff --git a/cmd/roborev/tui/fetch.go b/cmd/roborev/tui/fetch.go index ce94993ed..82e6d37d0 100644 --- a/cmd/roborev/tui/fetch.go +++ b/cmd/roborev/tui/fetch.go @@ -100,7 +100,7 @@ func (m model) fetchJobs() tea.Cmd { params.Set("limit", fmt.Sprintf("%d", limit)) } - url := fmt.Sprintf("%s/api/jobs?%s", m.serverAddr, params.Encode()) + url := fmt.Sprintf("%s/api/jobs?%s", m.endpoint.BaseURL(), params.Encode()) resp, err := m.client.Get(url) if err != nil { return jobsErrMsg{err: err, seq: seq} @@ -144,7 +144,7 @@ func (m model) fetchMoreJobs() tea.Cmd { params.Set("closed", "false") } params.Set("exclude_job_type", "fix") - url := fmt.Sprintf("%s/api/jobs?%s", m.serverAddr, params.Encode()) + url := fmt.Sprintf("%s/api/jobs?%s", m.endpoint.BaseURL(), params.Encode()) resp, err := m.client.Get(url) if err != nil { return paginationErrMsg{err: err, seq: seq} @@ -194,8 +194,7 @@ func (m model) tryReconnect() tea.Cmd { if err != nil { return reconnectMsg{err: err} } - newAddr := fmt.Sprintf("http://%s", info.Addr) - return reconnectMsg{newAddr: newAddr, version: info.Version} + return reconnectMsg{endpoint: info.Endpoint(), version: info.Version} } } @@ -203,10 +202,10 @@ func (m model) tryReconnect() tea.Cmd { // display-name-to-root-paths mapping for control socket resolution. func (m model) fetchRepoNames() tea.Cmd { client := m.client - serverAddr := m.serverAddr + baseURL := m.endpoint.BaseURL() return func() tea.Msg { - resp, err := client.Get(serverAddr + "/api/repos") + resp, err := client.Get(baseURL + "/api/repos") if err != nil { return repoNamesMsg{} // non-fatal; map stays nil } @@ -240,13 +239,13 @@ func (m model) fetchRepoNames() tea.Cmd { func (m model) fetchRepos() tea.Cmd { // Capture values for use in goroutine client := m.client - serverAddr := m.serverAddr + baseURL := m.endpoint.BaseURL() activeBranchFilter := m.activeBranchFilter // Constrain repos by active branch filter return func() tea.Msg { // Build URL with optional branch filter (URL-encoded) // Skip sending branch for branchNone sentinel - it's a client-side filter - reposURL := serverAddr + "/api/repos" + reposURL := baseURL + "/api/repos" if activeBranchFilter != "" && activeBranchFilter != branchNone { params := neturl.Values{} params.Set("branch", activeBranchFilter) @@ -314,7 +313,7 @@ func (m model) fetchBranchesForRepo( rootPaths []string, repoIdx int, expand bool, searchSeq int, ) tea.Cmd { client := m.client - serverAddr := m.serverAddr + baseURL := m.endpoint.BaseURL() errMsg := func(err error) repoBranchesMsg { return repoBranchesMsg{ @@ -327,7 +326,7 @@ func (m model) fetchBranchesForRepo( } return func() tea.Msg { - branchURL := serverAddr + "/api/branches" + branchURL := baseURL + "/api/branches" if len(rootPaths) > 0 { params := neturl.Values{} for _, repoPath := range rootPaths { @@ -385,13 +384,13 @@ func (m model) backfillBranches() tea.Cmd { // Capture values for use in goroutine machineID := m.status.MachineID client := m.client - serverAddr := m.serverAddr + baseURL := m.endpoint.BaseURL() return func() tea.Msg { var backfillCount int // First, check if there are any NULL branches via the API - resp, err := client.Get(serverAddr + "/api/branches") + resp, err := client.Get(baseURL + "/api/branches") if err != nil { return errMsg(err) } @@ -410,7 +409,7 @@ func (m model) backfillBranches() tea.Cmd { // If there are NULL branches, fetch all jobs to backfill if checkResult.NullsRemaining > 0 { - resp, err := client.Get(serverAddr + "/api/jobs") + resp, err := client.Get(baseURL + "/api/jobs") if err != nil { return errMsg(err) } @@ -466,7 +465,7 @@ func (m model) backfillBranches() tea.Cmd { "job_id": bf.id, "branch": bf.branch, }) - resp, err := client.Post(serverAddr+"/api/job/update-branch", "application/json", bytes.NewReader(reqBody)) + resp, err := client.Post(baseURL+"/api/job/update-branch", "application/json", bytes.NewReader(reqBody)) if err == nil { if resp.StatusCode == http.StatusOK { var updateResult struct { @@ -589,7 +588,7 @@ func (m model) fetchReviewForPrompt(jobID int64) tea.Cmd { // Uses incremental fetching: only new bytes since logOffset are // downloaded and rendered, reusing the persistent logFmtr state. func (m model) fetchJobLog(jobID int64) tea.Cmd { - addr := m.serverAddr + baseURL := m.endpoint.BaseURL() width := m.width client := m.client style := m.glamourStyle @@ -599,7 +598,7 @@ func (m model) fetchJobLog(jobID int64) tea.Cmd { return func() tea.Msg { url := fmt.Sprintf( "%s/api/job/log?job_id=%d&offset=%d", - addr, jobID, offset, + baseURL, jobID, offset, ) resp, err := client.Get(url) if err != nil { @@ -810,7 +809,7 @@ func (m model) fetchCommitMsg(job *storage.ReviewJob) tea.Cmd { } func (m model) fetchPatch(jobID int64) tea.Cmd { return func() tea.Msg { - url := m.serverAddr + fmt.Sprintf("/api/job/patch?job_id=%d", jobID) + url := m.endpoint.BaseURL() + fmt.Sprintf("/api/job/patch?job_id=%d", jobID) resp, err := m.client.Get(url) if err != nil { return patchMsg{jobID: jobID, err: err} diff --git a/cmd/roborev/tui/filter_queue_test.go b/cmd/roborev/tui/filter_queue_test.go index a6840b308..4a57f0e2f 100644 --- a/cmd/roborev/tui/filter_queue_test.go +++ b/cmd/roborev/tui/filter_queue_test.go @@ -82,7 +82,7 @@ func TestTUIFilterToZeroVisibleJobs(t *testing.T) { } func TestTUIMultiPathFilterStatusCounts(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.height = 20 m.daemonVersion = "test" @@ -108,7 +108,7 @@ func TestTUIMultiPathFilterStatusCounts(t *testing.T) { func TestTUIBranchFilterApplied(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("repo-a"), withBranch("main")), @@ -131,7 +131,7 @@ func TestTUIBranchFilterApplied(t *testing.T) { func TestTUIBranchFilterNone(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("repo-a"), withBranch("main")), @@ -153,7 +153,7 @@ func TestTUIBranchFilterNone(t *testing.T) { func TestTUIBranchFilterCombinedWithRepoFilter(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("repo-a"), withRepoPath("/path/to/repo-a"), withBranch("main")), @@ -175,7 +175,7 @@ func TestTUIBranchFilterCombinedWithRepoFilter(t *testing.T) { func TestTUINavigateDownNoLoadMoreWhenBranchFiltered(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{makeJob(1, withBranch("feature"))} m.selectedIdx = 0 @@ -193,7 +193,7 @@ func TestTUINavigateDownNoLoadMoreWhenBranchFiltered(t *testing.T) { func TestTUINavigateJKeyNoLoadMoreWhenBranchFiltered(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{makeJob(1, withBranch("feature"))} m.selectedIdx = 0 @@ -211,7 +211,7 @@ func TestTUINavigateJKeyNoLoadMoreWhenBranchFiltered(t *testing.T) { func TestTUIPageDownNoLoadMoreWhenBranchFiltered(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{makeJob(1, withBranch("feature"))} m.selectedIdx = 0 @@ -230,7 +230,7 @@ func TestTUIPageDownNoLoadMoreWhenBranchFiltered(t *testing.T) { func TestTUIBranchFilterClearTriggersRefetch(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.activeBranchFilter = "feature" @@ -246,7 +246,7 @@ func TestTUIBranchFilterClearTriggersRefetch(t *testing.T) { } func TestTUIQueueNavigationWithFilter(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("repo-a"), withRepoPath("/path/to/repo-a")), @@ -276,7 +276,7 @@ func TestTUIQueueNavigationWithFilter(t *testing.T) { } func TestTUIJobsRefreshWithFilter(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("repo-a"), withRepoPath("/path/to/repo-a")), @@ -310,7 +310,7 @@ func TestTUIJobsRefreshWithFilter(t *testing.T) { } func TestTUIRefreshWithZeroVisibleJobs(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("repo-a"), withRepoPath("/path/to/repo-a")), @@ -330,7 +330,7 @@ func TestTUIRefreshWithZeroVisibleJobs(t *testing.T) { } func TestTUIActionsNoOpWithZeroVisibleJobs(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("repo-a"), withRepoPath("/path/to/repo-a")), @@ -352,7 +352,7 @@ func TestTUIActionsNoOpWithZeroVisibleJobs(t *testing.T) { } func TestTUIBKeyOpensBranchFilter(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.jobs = []storage.ReviewJob{makeJob(1, withRepoName("repo-a"))} m.selectedIdx = 0 @@ -365,7 +365,7 @@ func TestTUIBKeyOpensBranchFilter(t *testing.T) { } func TestTUIFilterOpenBatchesBackfill(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.branchBackfillDone = false m.jobs = []storage.ReviewJob{makeJob(1)} @@ -378,7 +378,7 @@ func TestTUIFilterOpenBatchesBackfill(t *testing.T) { } func TestTUIFilterCwdRepoSortsFirst(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter m.cwdRepoRoot = "/path/to/repo-b" @@ -398,7 +398,7 @@ func TestTUIFilterCwdRepoSortsFirst(t *testing.T) { } func TestTUIFilterNoCwdNoReorder(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter repos := []repoFilterItem{ @@ -416,7 +416,7 @@ func TestTUIFilterNoCwdNoReorder(t *testing.T) { } func TestTUIBKeyNoOpOutsideQueue(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewReview m2, cmd := pressKey(m, 'b') diff --git a/cmd/roborev/tui/filter_search_test.go b/cmd/roborev/tui/filter_search_test.go index a478ea192..4bd94d898 100644 --- a/cmd/roborev/tui/filter_search_test.go +++ b/cmd/roborev/tui/filter_search_test.go @@ -361,7 +361,7 @@ func TestTUILateErrorAfterSearchClear(t *testing.T) { // session. Scenario: type "f" → clear → type "m" → old error // arrives with stale searchSeq. func TestTUIStaleSearchErrorIgnored(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter setupFilterTree(&m, []treeFilterNode{ {name: "repo-a", rootPaths: []string{"/a"}, count: 3}, @@ -395,7 +395,7 @@ func TestTUIStaleSearchErrorIgnored(t *testing.T) { // search text before repos have loaded, fetchUnloadedBranches is // triggered once repos arrive via reposMsg. func TestTUISearchBeforeReposLoad(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter m, _ = pressKey(m, 'f') @@ -426,7 +426,7 @@ func TestTUISearchBeforeReposLoad(t *testing.T) { // text (non-empty → non-empty) clears fetchFailed so previously // failed repos are retried with the new search. func TestTUISearchEditClearsFetchFailed(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter setupFilterTree(&m, []treeFilterNode{ {name: "repo-a", rootPaths: []string{"/a"}, count: 3}, diff --git a/cmd/roborev/tui/filter_stack_test.go b/cmd/roborev/tui/filter_stack_test.go index c5b10a77d..a0ba10742 100644 --- a/cmd/roborev/tui/filter_stack_test.go +++ b/cmd/roborev/tui/filter_stack_test.go @@ -10,7 +10,7 @@ import ( ) func TestTUIFilterClearWithEsc(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("repo-a"), withRepoPath("/path/to/repo-a")), @@ -31,7 +31,7 @@ func TestTUIFilterClearWithEsc(t *testing.T) { func TestTUIFilterClearWithEscLayered(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("repo-a"), withRepoPath("/path/to/repo-a")), @@ -56,7 +56,7 @@ func TestTUIFilterClearWithEscLayered(t *testing.T) { func TestTUIFilterClearHideClosedOnly(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("repo-a")), @@ -74,7 +74,7 @@ func TestTUIFilterClearHideClosedOnly(t *testing.T) { func TestTUIFilterEscapeWhileLoadingFiresNewFetch(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("repo-a"), withRepoPath("/path/to/repo-a")), @@ -100,7 +100,7 @@ func TestTUIFilterEscapeWhileLoadingFiresNewFetch(t *testing.T) { func TestTUIFilterEscapeWhilePaginationDiscardsAppend(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("repo-a"), withRepoPath("/path/to/repo-a")), @@ -223,7 +223,7 @@ func TestTUIFilterStackEscapeOrder(t *testing.T) { func TestTUIFilterStackTitleBarOrder(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("myrepo"), withRepoPath("/path/to/myrepo"), withBranch("feature")), @@ -247,7 +247,7 @@ func TestTUIFilterStackTitleBarOrder(t *testing.T) { func TestTUIFilterStackReverseOrder(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("myrepo"), withRepoPath("/path/to/myrepo"), withBranch("develop")), @@ -267,7 +267,7 @@ func TestTUIFilterStackReverseOrder(t *testing.T) { } func TestTUIRemoveFilterFromStack(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.filterStack = []string{"repo", "branch", "other"} @@ -282,7 +282,7 @@ func TestTUIRemoveFilterFromStack(t *testing.T) { // daemon reconnect clears fetchFailed and retriggers branch // fetches when search is active. func TestTUIReconnectClearsFetchFailed(t *testing.T) { - m := newModel("http://localhost:7373", withExternalIODisabled()) + m := newModel(testEndpointFromURL("http://localhost:7373"), withExternalIODisabled()) m.currentView = viewFilter setupFilterTree(&m, []treeFilterNode{ {name: "repo-a", rootPaths: []string{"/a"}, count: 3}, @@ -292,7 +292,7 @@ func TestTUIReconnectClearsFetchFailed(t *testing.T) { m.filterTree[0].fetchFailed = true m2, cmd := updateModel(t, m, reconnectMsg{ - newAddr: "http://localhost:7374", + endpoint: testEndpointFromURL("http://localhost:7374"), }) assert.False(t, m2.filterTree[0].fetchFailed) @@ -300,7 +300,7 @@ func TestTUIReconnectClearsFetchFailed(t *testing.T) { assert.True(t, m2.filterTree[0].loading) - m3 := newModel("http://localhost:7373", withExternalIODisabled()) + m3 := newModel(testEndpointFromURL("http://localhost:7373"), withExternalIODisabled()) m3.currentView = viewFilter setupFilterTree(&m3, []treeFilterNode{ {name: "repo-b", rootPaths: []string{"/b"}, count: 2}, @@ -308,7 +308,7 @@ func TestTUIReconnectClearsFetchFailed(t *testing.T) { m3.filterTree[0].fetchFailed = true m4, _ := updateModel(t, m3, reconnectMsg{ - newAddr: "http://localhost:7374", + endpoint: testEndpointFromURL("http://localhost:7374"), }) assert.False(t, m4.filterTree[0].fetchFailed) @@ -367,7 +367,7 @@ func TestTUIPopFilterAllLocked(t *testing.T) { func TestTUIEscapeWithLockedFilters(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("r"), withRepoPath("/r"), withBranch("b")), diff --git a/cmd/roborev/tui/filter_test.go b/cmd/roborev/tui/filter_test.go index 12c9c38c9..92a9d4851 100644 --- a/cmd/roborev/tui/filter_test.go +++ b/cmd/roborev/tui/filter_test.go @@ -11,7 +11,7 @@ import ( // Helper function to initialize model for filter tests func initFilterModel(nodes []treeFilterNode) model { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter if nodes != nil { setupFilterTree(&m, nodes) @@ -29,7 +29,7 @@ func makeNode(name string, count int) treeFilterNode { } func TestTUIFilterOpenModal(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("repo-a")), @@ -291,7 +291,7 @@ func TestTUIRightArrowRetriesAfterFailedLoad(t *testing.T) { func TestTUIWindowResizeNoLoadMoreWhenMultiRepoFiltered(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{makeJob(1, withRepoPath("/repo1"))} m.hasMore = true @@ -313,7 +313,7 @@ func TestTUIWindowResizeNoLoadMoreWhenMultiRepoFiltered(t *testing.T) { } func TestTUIBKeyFallsBackToFirstRepo(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter m.filterBranchMode = true @@ -330,7 +330,7 @@ func TestTUIBKeyFallsBackToFirstRepo(t *testing.T) { } func TestTUIBKeyUsesActiveRepoFilter(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter m.filterBranchMode = true m.activeRepoFilter = []string{"/path/to/repo-b"} @@ -350,7 +350,7 @@ func TestTUIBKeyUsesActiveRepoFilter(t *testing.T) { } func TestTUIBKeyUsesMultiPathActiveRepoFilter(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter m.filterBranchMode = true m.activeRepoFilter = []string{"/path/a", "/path/b"} @@ -370,7 +370,7 @@ func TestTUIBKeyUsesMultiPathActiveRepoFilter(t *testing.T) { } func TestTUIFilterOpenSkipsBackfillWhenDone(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.branchBackfillDone = true m.jobs = []storage.ReviewJob{makeJob(1)} diff --git a/cmd/roborev/tui/filter_test_helpers.go b/cmd/roborev/tui/filter_test_helpers.go index 88fcc5d7c..9739d06d1 100644 --- a/cmd/roborev/tui/filter_test_helpers.go +++ b/cmd/roborev/tui/filter_test_helpers.go @@ -1,6 +1,7 @@ package tui import ( + "github.com/roborev-dev/roborev/internal/daemon" "github.com/roborev-dev/roborev/internal/storage" "testing" ) @@ -46,7 +47,7 @@ func withFilterTree(nodes []treeFilterNode) testModelOption { } func initTestModel(opts ...testModelOption) model { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(daemon.DaemonEndpoint{Network: "tcp", Address: "localhost"}, withExternalIODisabled()) for _, opt := range opts { opt(&m) } diff --git a/cmd/roborev/tui/filter_tree_test.go b/cmd/roborev/tui/filter_tree_test.go index 67f98c3d3..39fd42b00 100644 --- a/cmd/roborev/tui/filter_tree_test.go +++ b/cmd/roborev/tui/filter_tree_test.go @@ -140,7 +140,7 @@ func TestTUITreeFilterBranchFetchFailureClearsLoading(t *testing.T) { } func TestTUITreeFilterBranchFetchFailureOutOfView(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.filterBranchMode = true setupFilterTree(&m, []treeFilterNode{ @@ -441,7 +441,7 @@ func TestTUIManualExpandFailureDoesNotBlockSearch(t *testing.T) { func TestTUITreeFilterSelectBranchTriggersRefetch(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter setupFilterTree(&m, []treeFilterNode{ { @@ -471,7 +471,7 @@ func TestTUITreeFilterSelectBranchTriggersRefetch(t *testing.T) { func TestTUIBranchBackfillDoneSetWhenNoNullsRemain(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.branchBackfillDone = false m2, _ := updateModel(t, m, branchesMsg{ @@ -483,7 +483,7 @@ func TestTUIBranchBackfillDoneSetWhenNoNullsRemain(t *testing.T) { func TestTUIBranchBackfillDoneSetEvenWhenNullsRemain(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.branchBackfillDone = false m2, _ := updateModel(t, m, branchesMsg{ @@ -495,7 +495,7 @@ func TestTUIBranchBackfillDoneSetEvenWhenNullsRemain(t *testing.T) { func TestTUIBranchBackfillIsOneTimeOperation(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.branchBackfillDone = false m2, _ := updateModel(t, m, branchesMsg{ @@ -513,7 +513,7 @@ func TestTUIBranchBackfillIsOneTimeOperation(t *testing.T) { func TestTUIBranchBackfillDoneStaysTrueAfterNewJobs(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.branchBackfillDone = true m2, _ := updateModel(t, m, branchesMsg{ @@ -524,7 +524,7 @@ func TestTUIBranchBackfillDoneStaysTrueAfterNewJobs(t *testing.T) { } func TestTUITreeFilterCollapseOnExpandedRepo(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter setupFilterTree(&m, []treeFilterNode{ { @@ -549,7 +549,7 @@ func TestTUITreeFilterCollapseOnExpandedRepo(t *testing.T) { } func TestTUIBKeyAutoExpandsCwdRepo(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter m.filterBranchMode = true m.cwdRepoRoot = "/path/to/repo-b" @@ -569,7 +569,7 @@ func TestTUIBKeyAutoExpandsCwdRepo(t *testing.T) { } func TestTUIBKeyPositionsCursorOnBranch(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter m.filterBranchMode = true setupFilterTree(&m, []treeFilterNode{ @@ -592,7 +592,7 @@ func TestTUIBKeyPositionsCursorOnBranch(t *testing.T) { } func TestTUIBKeyEscapeClearsBranchMode(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter m.filterBranchMode = true setupFilterTree(&m, []treeFilterNode{ @@ -606,7 +606,7 @@ func TestTUIBKeyEscapeClearsBranchMode(t *testing.T) { } func TestTUIFilterCwdBranchSortsFirst(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter m.cwdRepoRoot = "/path/to/repo-a" m.cwdBranch = "feature" @@ -635,7 +635,7 @@ func TestTUIFilterCwdBranchSortsFirst(t *testing.T) { } func TestTUIFilterEnterClearsBranchMode(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewFilter m.filterBranchMode = true setupFilterTree(&m, []treeFilterNode{ diff --git a/cmd/roborev/tui/handlers_msg.go b/cmd/roborev/tui/handlers_msg.go index 05e31617a..5f9b3a1df 100644 --- a/cmd/roborev/tui/handlers_msg.go +++ b/cmd/roborev/tui/handlers_msg.go @@ -706,8 +706,9 @@ func (m model) handleSavePatchResultMsg(msg savePatchResultMsg) (tea.Model, tea. // handleReconnectMsg processes daemon reconnection attempts. func (m model) handleReconnectMsg(msg reconnectMsg) (tea.Model, tea.Cmd) { m.reconnecting = false - if msg.err == nil && msg.newAddr != "" && msg.newAddr != m.serverAddr { - m.serverAddr = msg.newAddr + if msg.err == nil && msg.endpoint != m.endpoint { + m.endpoint = msg.endpoint + m.client = msg.endpoint.HTTPClient(10 * time.Second) m.consecutiveErrors = 0 m.err = nil if msg.version != "" { @@ -717,7 +718,7 @@ func (m model) handleReconnectMsg(msg reconnectMsg) (tea.Model, tea.Cmd) { // new daemon address after reconnect. if m.controlSocket != "" { rtInfo := buildTUIRuntimeInfo( - m.controlSocket, m.serverAddr, + m.controlSocket, m.endpoint.BaseURL(), ) if err := WriteTUIRuntime(rtInfo); err != nil { log.Printf( diff --git a/cmd/roborev/tui/helpers_test.go b/cmd/roborev/tui/helpers_test.go index 9de62a861..e0ad3b23e 100644 --- a/cmd/roborev/tui/helpers_test.go +++ b/cmd/roborev/tui/helpers_test.go @@ -519,7 +519,7 @@ func TestRenderHelpTableLinesWithinWidth(t *testing.T) { } func TestQueueHelpRowsTasksWorkflowToggle(t *testing.T) { - disabled := newModel(testServerAddr, withExternalIODisabled()).queueHelpRows() + disabled := newModel(testEndpoint, withExternalIODisabled()).queueHelpRows() assert.NotEmpty(t, disabled, "expected queue help rows") for _, row := range disabled { for _, item := range row { @@ -527,7 +527,7 @@ func TestQueueHelpRowsTasksWorkflowToggle(t *testing.T) { } } - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.tasksEnabled = true enabled := m.queueHelpRows() foundF := false diff --git a/cmd/roborev/tui/queue_test.go b/cmd/roborev/tui/queue_test.go index 27beb4525..9ce85f503 100644 --- a/cmd/roborev/tui/queue_test.go +++ b/cmd/roborev/tui/queue_test.go @@ -40,7 +40,7 @@ func mouseWheelUp() tea.MouseMsg { } func newTuiModel(serverAddr string) model { - return newModel(serverAddr, withExternalIODisabled()) + return newModel(testEndpointFromURL(serverAddr), withExternalIODisabled()) } const ( @@ -596,7 +596,7 @@ func TestTUIQueueTableRendersWithinWidth(t *testing.T) { widths := []int{80, 100, 120, 200} for _, w := range widths { t.Run(fmt.Sprintf("width=%d", w), func(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = w m.height = 30 m.jobs = []storage.ReviewJob{ @@ -639,7 +639,7 @@ func TestStatusColumnAutoWidth(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = 200 m.height = 30 @@ -676,7 +676,7 @@ func TestStatusColumnAutoWidth(t *testing.T) { } func TestTUIPaginationAppendMode(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) initialJobs := make([]storage.ReviewJob, 50) for i := range 50 { @@ -705,7 +705,7 @@ func TestTUIPaginationAppendMode(t *testing.T) { } func TestTUIPaginationRefreshMaintainsView(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) jobs := make([]storage.ReviewJob, 100) for i := range 100 { @@ -730,7 +730,7 @@ func TestTUIPaginationRefreshMaintainsView(t *testing.T) { } func TestTUILoadingMoreClearedOnPaginationError(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.loadingMore = true errMsg := paginationErrMsg{err: fmt.Errorf("network error")} @@ -742,7 +742,7 @@ func TestTUILoadingMoreClearedOnPaginationError(t *testing.T) { } func TestTUILoadingMoreNotClearedOnGenericError(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.loadingMore = true errMsg := errMsg(fmt.Errorf("some other error")) @@ -754,7 +754,7 @@ func TestTUILoadingMoreNotClearedOnGenericError(t *testing.T) { } func TestTUIPaginationBlockedWhileLoadingJobs(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.loadingJobs = true m.hasMore = true @@ -771,7 +771,7 @@ func TestTUIPaginationBlockedWhileLoadingJobs(t *testing.T) { } func TestTUIPaginationAllowedWhenNotLoadingJobs(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.loadingJobs = false m.hasMore = true @@ -788,7 +788,7 @@ func TestTUIPaginationAllowedWhenNotLoadingJobs(t *testing.T) { } func TestTUIPageDownBlockedWhileLoadingJobs(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.loadingJobs = true m.hasMore = true @@ -807,7 +807,7 @@ func TestTUIPageDownBlockedWhileLoadingJobs(t *testing.T) { func TestTUIPageUpDownMovesSelection(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.hideClosed = true m.height = 15 @@ -965,7 +965,7 @@ func TestTUIResizeBehavior(t *testing.T) { } func TestTUIJobsMsgHideClosedUnderfilledViewportAutoPaginates(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.hideClosed = true m.height = 29 @@ -994,7 +994,7 @@ func TestTUIJobsMsgHideClosedUnderfilledViewportAutoPaginates(t *testing.T) { } func TestTUIJobsMsgHideClosedFilledViewportDoesNotAutoPaginate(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.hideClosed = true m.height = 29 @@ -1024,7 +1024,7 @@ func TestTUIJobsMsgHideClosedFilledViewportDoesNotAutoPaginate(t *testing.T) { func TestTUIEmptyQueueRendersPaddedHeight(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = 100 m.height = 20 m.jobs = []storage.ReviewJob{} @@ -1041,7 +1041,7 @@ func TestTUIEmptyQueueRendersPaddedHeight(t *testing.T) { func TestTUIEmptyQueueWithFilterRendersPaddedHeight(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = 100 m.height = 20 m.jobs = []storage.ReviewJob{} @@ -1058,7 +1058,7 @@ func TestTUIEmptyQueueWithFilterRendersPaddedHeight(t *testing.T) { func TestTUILoadingJobsShowsLoadingMessage(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = 100 m.height = 20 m.jobs = []storage.ReviewJob{} @@ -1073,7 +1073,7 @@ func TestTUILoadingJobsShowsLoadingMessage(t *testing.T) { func TestTUILoadingShowsForLoadingMore(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = 100 m.height = 20 m.jobs = []storage.ReviewJob{} @@ -1087,7 +1087,7 @@ func TestTUILoadingShowsForLoadingMore(t *testing.T) { func TestTUIQueueNoScrollIndicatorPads(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = 100 m.height = 30 @@ -1418,7 +1418,7 @@ func withQueueTestFlags(hasMore, loadingMore, loadingJobs bool) queueTestModelOp } func newQueueTestModel(opts ...queueTestModelOption) model { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue for _, opt := range opts { opt(&m) @@ -1437,7 +1437,7 @@ func TestTUIQueueNarrowWidthFlexAllocation(t *testing.T) { for _, w := range []int{20, 30, 40} { t.Run(fmt.Sprintf("width=%d", w), func(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = w m.height = 20 m.jobs = []storage.ReviewJob{ @@ -1453,7 +1453,7 @@ func TestTUIQueueNarrowWidthFlexAllocation(t *testing.T) { func TestTUIQueueLongCellContent(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = 80 m.height = 20 m.jobs = []storage.ReviewJob{ @@ -1480,7 +1480,7 @@ func TestTUIQueueLongCellContent(t *testing.T) { func TestTUIQueueLongAgentName(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = 100 m.height = 20 m.jobs = []storage.ReviewJob{ @@ -1506,7 +1506,7 @@ func TestTUIQueueLongAgentName(t *testing.T) { func TestTUIQueueWideCharacterWidth(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = 100 m.height = 20 m.jobs = []storage.ReviewJob{ @@ -1533,7 +1533,7 @@ func TestTUIQueueWideCharacterWidth(t *testing.T) { func TestTUIQueueAgentColumnCapped(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = 120 m.height = 20 longAgent := strings.Repeat("x", 30) @@ -1608,7 +1608,7 @@ func TestTUIQueueFlexOvershootHandled(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = tt.width m.height = 20 m.jobs = []storage.ReviewJob{ @@ -1636,7 +1636,7 @@ func TestTUIQueueFlexOvershootHandled(t *testing.T) { func TestTUIQueueFlexColumnsGetContentWidth(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = 120 m.height = 20 m.jobs = []storage.ReviewJob{ diff --git a/cmd/roborev/tui/review_addressed_test.go b/cmd/roborev/tui/review_addressed_test.go index f7b6d0a18..e0aee2b6d 100644 --- a/cmd/roborev/tui/review_addressed_test.go +++ b/cmd/roborev/tui/review_addressed_test.go @@ -10,7 +10,7 @@ import ( ) func TestTUIReviewViewClosedRollbackOnError(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) // Initial state with review view showing an open review m.currentView = viewReview @@ -39,7 +39,7 @@ func TestTUIReviewViewClosedRollbackOnError(t *testing.T) { } func TestTUIReviewViewClosedSuccessNoRollback(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) // Initial state with review view m.currentView = viewReview @@ -64,7 +64,7 @@ func TestTUIReviewViewClosedSuccessNoRollback(t *testing.T) { } func TestTUIReviewViewNavigateAwayBeforeError(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) // Setup: jobs in queue with closed=false addrA := false @@ -108,7 +108,7 @@ func TestTUIReviewViewNavigateAwayBeforeError(t *testing.T) { } func TestTUIReviewViewToggleSyncsQueueJob(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) // Setup: job in queue with closed=false addr := false @@ -134,7 +134,7 @@ func TestTUIReviewViewToggleSyncsQueueJob(t *testing.T) { func TestTUIReviewViewErrorWithoutJobID(t *testing.T) { // Test that review-view errors without jobID are still handled if // pendingReviewClosed matches - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) // Review without an associated job (Job is nil) m.currentView = viewReview @@ -170,7 +170,7 @@ func TestTUIReviewViewErrorWithoutJobID(t *testing.T) { func TestTUIReviewViewStaleErrorWithoutJobID(t *testing.T) { // Test that stale review-view errors without jobID are ignored - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) // Review without an associated job m.currentView = viewReview @@ -209,7 +209,7 @@ func TestTUIReviewViewSameStateLateError(t *testing.T) { // Test: true (seq 1) -> false (seq 2) -> true (seq 3), with late error from first true // The late error has newState=true which matches current pending newState, // but sequence numbers now distinguish same-state toggles. - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) // Review without an associated job m.currentView = viewReview diff --git a/cmd/roborev/tui/review_branch_test.go b/cmd/roborev/tui/review_branch_test.go index cf1d8d5e2..f5dbb1b06 100644 --- a/cmd/roborev/tui/review_branch_test.go +++ b/cmd/roborev/tui/review_branch_test.go @@ -10,7 +10,7 @@ import ( ) func TestTUIReviewMsgSetsBranchName(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1), } @@ -81,7 +81,7 @@ func TestReviewBranchName(t *testing.T) { } func TestTUIReviewMsgEmptyBranchForRange(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRef("abc123..def456")), } @@ -103,7 +103,7 @@ func TestTUIReviewMsgEmptyBranchForRange(t *testing.T) { func TestTUIBranchClearedOnFailedJobNavigation(t *testing.T) { // Test that navigating from a successful review with branch to a failed job clears the branch - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = 100 m.height = 30 m.currentView = viewReview @@ -131,7 +131,7 @@ func TestTUIBranchClearedOnFailedJobNavigation(t *testing.T) { func TestTUIBranchClearedOnFailedJobEnter(t *testing.T) { // Test that pressing Enter on a failed job clears the branch - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = 100 m.height = 30 m.currentView = viewQueue @@ -156,7 +156,7 @@ func TestTUIBranchClearedOnFailedJobEnter(t *testing.T) { func TestTUIRenderQueueViewBranchFilterOnlyNoPanic(t *testing.T) { // Test that renderQueueView doesn't panic when branch filter is active // but repo filter is empty (regression test for index out of range) - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.width = 100 m.height = 30 m.currentView = viewQueue diff --git a/cmd/roborev/tui/review_clipboard_test.go b/cmd/roborev/tui/review_clipboard_test.go index 0805cdeac..046438507 100644 --- a/cmd/roborev/tui/review_clipboard_test.go +++ b/cmd/roborev/tui/review_clipboard_test.go @@ -27,7 +27,7 @@ func (m *mockClipboard) WriteText(text string) error { func TestTUIYankCopyFromReviewView(t *testing.T) { mock := &mockClipboard{} - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.clipboard = mock m.currentView = viewReview m.currentReview = makeReview(1, &storage.ReviewJob{ID: 1}, withReviewAgent("test"), withReviewOutput("This is the review content to copy")) @@ -47,7 +47,7 @@ func TestTUIYankCopyFromReviewView(t *testing.T) { } func TestTUIYankCopyShowsFlashMessage(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewReview m.currentReview = makeReview(1, &storage.ReviewJob{ID: 1}, withReviewAgent("test"), withReviewOutput("Review content")) m.width = 80 @@ -66,7 +66,7 @@ func TestTUIYankCopyShowsFlashMessage(t *testing.T) { } func TestTUIYankCopyShowsErrorOnFailure(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m, _ = updateModel(t, m, clipboardResultMsg{err: fmt.Errorf("clipboard not available"), view: viewQueue}) @@ -78,7 +78,7 @@ func TestTUIYankCopyShowsErrorOnFailure(t *testing.T) { func TestTUIYankFlashViewNotAffectedByViewChange(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.width = 80 m.height = 24 @@ -95,7 +95,7 @@ func TestTUIYankFlashViewNotAffectedByViewChange(t *testing.T) { } func TestTUIYankFromQueueRequiresCompletedJob(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.jobs = []storage.ReviewJob{ makeJob(1, withRef("abc123"), withAgent("test"), withStatus(storage.JobStatusRunning)), diff --git a/cmd/roborev/tui/review_log_test.go b/cmd/roborev/tui/review_log_test.go index 9316ef4e0..9912fa2dc 100644 --- a/cmd/roborev/tui/review_log_test.go +++ b/cmd/roborev/tui/review_log_test.go @@ -41,7 +41,7 @@ func hasMsgType(msgs []tea.Msg, typeName string) bool { func TestTUILogVisibleLinesWithCommandHeader(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.height = 30 m.logJobID = 1 @@ -60,7 +60,7 @@ func TestTUILogVisibleLinesWithCommandHeader(t *testing.T) { func TestTUILogPagingUsesLogVisibleLines(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewLog m.logJobID = 1 m.height = 20 @@ -99,7 +99,7 @@ func TestTUILogPagingUsesLogVisibleLines(t *testing.T) { func TestTUILogPagingNoHeader(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewLog m.logJobID = 1 m.height = 20 @@ -127,7 +127,7 @@ func TestTUILogPagingNoHeader(t *testing.T) { func TestTUILogLoadingGuard(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewLog m.logJobID = 1 m.logStreaming = true @@ -142,7 +142,7 @@ func TestTUILogLoadingGuard(t *testing.T) { func TestTUILogErrorDroppedOutsideLogView(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.logFetchSeq = 3 m.logLoading = true @@ -160,7 +160,7 @@ func TestTUILogErrorDroppedOutsideLogView(t *testing.T) { func TestTUILogViewLookupFixJob(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewLog m.logJobID = 42 m.logFromView = viewTasks @@ -185,7 +185,7 @@ func TestTUILogViewLookupFixJob(t *testing.T) { func TestTUILogCancelFixJob(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewLog m.logJobID = 42 m.logFromView = viewTasks @@ -210,7 +210,7 @@ func TestTUILogCancelFixJob(t *testing.T) { func TestTUILogVisibleLinesFixJob(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewLog m.logJobID = 42 m.logFromView = viewTasks @@ -243,7 +243,7 @@ func TestTUILogVisibleLinesFixJob(t *testing.T) { func TestTUILogNavFromTasks(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewLog m.logJobID = 20 m.logFromView = viewTasks @@ -469,7 +469,7 @@ func TestTUILogOutputTable(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = tt.initialView m.logJobID = 42 m.logFromView = viewQueue @@ -576,7 +576,7 @@ func TestMouseDisabledInContentViews(t *testing.T) { if tc.enterKey != 0 { t.Run(tc.name+" enter disables mouse", func(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.height = 30 m.width = 80 @@ -608,7 +608,7 @@ func TestMouseDisabledInContentViews(t *testing.T) { } t.Run(tc.name+" exit enables mouse", func(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = tc.view m.height = 30 m.width = 80 @@ -652,7 +652,7 @@ func TestMouseDisabledInContentViews(t *testing.T) { func TestMouseNotToggledWithinContentViews(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewReview m.height = 30 m.width = 80 diff --git a/cmd/roborev/tui/review_navigation_test.go b/cmd/roborev/tui/review_navigation_test.go index e0c7b4ddc..1dc6c6c60 100644 --- a/cmd/roborev/tui/review_navigation_test.go +++ b/cmd/roborev/tui/review_navigation_test.go @@ -134,7 +134,7 @@ func TestTUIReviewNavigation(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = tt.initialJobs m.selectedIdx = tt.initialIdx m.selectedJobID = tt.initialID @@ -178,7 +178,7 @@ func TestTUIReviewNavigation(t *testing.T) { func TestTUIReviewStaleResponseIgnored(t *testing.T) { // Test that stale review responses are ignored (race condition fix) - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1), @@ -204,7 +204,7 @@ func TestTUIReviewStaleResponseIgnored(t *testing.T) { func TestTUIReviewMsgWithMatchingJobID(t *testing.T) { // Test that review responses with matching job ID are accepted - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1), @@ -228,7 +228,7 @@ func TestTUIReviewMsgWithMatchingJobID(t *testing.T) { func TestTUISelectionSyncInReviewView(t *testing.T) { // Test that selectedIdx syncs with currentReview.Job.ID when jobs refresh - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) // Initial state: viewing review for job 2 m.jobs = []storage.ReviewJob{ @@ -261,7 +261,7 @@ func TestTUIJobsRefreshDuringReviewNavigation(t *testing.T) { // This tests the race condition fix: user navigates to job 3, but jobs refresh // arrives before the review loads. Selection should stay on job 3, not revert // to the currently displayed review's job (job 2). - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1), @@ -310,7 +310,7 @@ func TestTUIEmptyRefreshWhileViewingReview(t *testing.T) { // Test that transient empty jobs refresh doesn't break selection // when viewing a review. Selection should restore to displayed review // when jobs repopulate. - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1), @@ -347,7 +347,7 @@ func TestTUIEmptyRefreshWhileViewingReview(t *testing.T) { func TestTUIEmptyRefreshSeedsFromCurrentReview(t *testing.T) { // Test that if selectedJobID somehow becomes 0 while viewing a review, // it gets seeded from the current review when jobs repopulate - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{} m.selectedIdx = 0 diff --git a/cmd/roborev/tui/review_views_test.go b/cmd/roborev/tui/review_views_test.go index 592cc395e..1c1919961 100644 --- a/cmd/roborev/tui/review_views_test.go +++ b/cmd/roborev/tui/review_views_test.go @@ -9,7 +9,7 @@ import ( ) func TestTUIEscapeFromReviewTriggersRefreshWithHideClosed(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewReview m.hideClosed = true m.loadingJobs = false @@ -40,7 +40,7 @@ func TestTUIEscapeFromReviewTriggersRefreshWithHideClosed(t *testing.T) { } func TestTUIEscapeFromReviewNoRefreshWithoutHideClosed(t *testing.T) { - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewReview m.hideClosed = false m.loadingJobs = false @@ -71,7 +71,7 @@ func TestTUIEscapeFromReviewNoRefreshWithoutHideClosed(t *testing.T) { func TestTUICommitMsgViewNavigationFromQueue(t *testing.T) { // Test that pressing escape in commit message view returns to the originating view (queue) - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{makeJob(1, withRef("abc123"))} m.selectedIdx = 0 m.selectedJobID = 1 @@ -105,7 +105,7 @@ func TestTUICommitMsgViewNavigationFromQueue(t *testing.T) { func TestTUICommitMsgViewNavigationFromReview(t *testing.T) { // Test that pressing escape in commit message view returns to the originating view (review) - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) j := makeJob(1, withRef("abc123")) m.jobs = []storage.ReviewJob{j} m.currentReview = makeReview(1, &j) @@ -126,7 +126,7 @@ func TestTUICommitMsgViewNavigationFromReview(t *testing.T) { func TestTUICommitMsgViewNavigationWithQ(t *testing.T) { // Test that pressing 'q' in commit message view also returns to originating view - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewCommitMsg m.commitMsgFromView = viewReview m.commitMsgContent = "test message" @@ -198,7 +198,7 @@ func TestFetchCommitMsgJobTypeDetection(t *testing.T) { // This is critical: Prompt field is populated for ALL jobs (stores review prompt), // so we must use IsTaskJob() to identify task jobs, not Prompt != "" - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) tests := []struct { name string @@ -350,7 +350,7 @@ func TestFetchCommitMsgJobTypeDetection(t *testing.T) { func TestTUIHelpViewToggleFromQueue(t *testing.T) { // Test that '?' opens help from queue and pressing '?' again returns to queue - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) m.currentView = viewQueue // Press '?' to open help @@ -379,7 +379,7 @@ func TestTUIHelpViewToggleFromQueue(t *testing.T) { func TestTUIHelpViewToggleFromReview(t *testing.T) { // Test that '?' opens help from review and escape returns to review - m := newModel("http://localhost", withExternalIODisabled()) + m := newModel(localhostEndpoint, withExternalIODisabled()) j := makeJob(1, withRef("abc123")) m.currentReview = makeReview(1, &j) m.currentView = viewReview diff --git a/cmd/roborev/tui/tui.go b/cmd/roborev/tui/tui.go index 1a0a2325a..f1adab223 100644 --- a/cmd/roborev/tui/tui.go +++ b/cmd/roborev/tui/tui.go @@ -254,7 +254,7 @@ func sanitizeForDisplay(s string) string { } type model struct { - serverAddr string + endpoint daemon.DaemonEndpoint daemonVersion string client *http.Client glamourStyle gansi.StyleConfig // detected once at init @@ -438,7 +438,7 @@ func isConnectionError(err error) bool { return errors.As(err, &netErr) } -func newModel(serverAddr string, opts ...option) model { +func newModel(ep daemon.DaemonEndpoint, opts ...option) model { var opt options for _, o := range opts { o(&opt) @@ -528,9 +528,9 @@ func newModel(serverAddr string, opts ...option) model { } return model{ - serverAddr: serverAddr, + endpoint: ep, daemonVersion: daemonVersion, - client: &http.Client{Timeout: 10 * time.Second}, + client: ep.HTTPClient(10 * time.Second), glamourStyle: streamfmt.GlamourStyle(), jobs: []storage.ReviewJob{}, currentView: viewQueue, @@ -855,7 +855,7 @@ func (m model) View() string { // Config holds resolved parameters for running the TUI. type Config struct { - ServerAddr string + Endpoint daemon.DaemonEndpoint RepoFilter string BranchFilter string ControlSocket string // Unix socket path for external control (default: auto) @@ -904,7 +904,7 @@ func Run(cfg Config) error { } } - m := newModel(cfg.ServerAddr, opts...) + m := newModel(cfg.Endpoint, opts...) p := tea.NewProgram( m, programOptionsForModel(m)..., @@ -951,7 +951,7 @@ func Run(cfg Config) error { }) rtInfo := buildTUIRuntimeInfo( - socketPath, cfg.ServerAddr, + socketPath, cfg.Endpoint.BaseURL(), ) if err := WriteTUIRuntime(rtInfo); err != nil { log.Printf( diff --git a/cmd/roborev/tui/tui_test.go b/cmd/roborev/tui/tui_test.go index ccf3215a6..669475648 100644 --- a/cmd/roborev/tui/tui_test.go +++ b/cmd/roborev/tui/tui_test.go @@ -7,6 +7,7 @@ import ( tea "github.com/charmbracelet/bubbletea" "github.com/roborev-dev/roborev/internal/config" + "github.com/roborev-dev/roborev/internal/daemon" "github.com/roborev-dev/roborev/internal/storage" "github.com/roborev-dev/roborev/internal/version" "github.com/stretchr/testify/assert" @@ -28,6 +29,26 @@ import ( const testServerAddr = "http://test.invalid:9999" +// testEndpoint is a DaemonEndpoint parsed from testServerAddr. +var testEndpoint = testEndpointFromURL(testServerAddr) + +// localhostEndpoint is a convenience DaemonEndpoint for tests using "http://localhost". +var localhostEndpoint = daemon.DaemonEndpoint{Network: "tcp", Address: "localhost"} + +// testEndpointFromURL parses a URL string into a DaemonEndpoint for tests. +// It strips the http:// prefix and uses the host:port as the TCP address. +func testEndpointFromURL(rawURL string) daemon.DaemonEndpoint { + ep, err := daemon.ParseEndpoint(rawURL) + if err != nil { + // Fallback for test URLs that ParseEndpoint may reject + // (e.g. non-loopback hosts like test.invalid) + addr := strings.TrimPrefix(rawURL, "https://") + addr = strings.TrimPrefix(addr, "http://") + return daemon.DaemonEndpoint{Network: "tcp", Address: addr} + } + return ep +} + // setupTuiTestEnv isolates the test from the production roborev environment // by setting ROBOREV_DATA_DIR to a temp directory. This prevents tests from // reading production daemon.json or affecting production state. @@ -63,7 +84,7 @@ func mockServerModel(t *testing.T, handler http.HandlerFunc) (*httptest.Server, t.Helper() ts := httptest.NewServer(handler) t.Cleanup(ts.Close) - return ts, newModel(ts.URL, withExternalIODisabled()) + return ts, newModel(testEndpointFromURL(ts.URL), withExternalIODisabled()) } // pressKey simulates pressing a rune key and returns the updated model. @@ -233,7 +254,7 @@ func TestTUIHTTPTimeout(t *testing.T) { } func TestTUIGetVisibleJobs(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = []storage.ReviewJob{ makeJob(1, withRepoName("repo-a"), withRepoPath("/path/to/repo-a")), @@ -296,7 +317,7 @@ func TestTUIGetVisibleSelectedIdx(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.jobs = jobs m.selectedIdx = tt.selectedIdx if tt.filter != nil { @@ -313,7 +334,7 @@ func TestTUIGetVisibleSelectedIdx(t *testing.T) { } func TestTUITickNoRefreshWhileLoadingJobs(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) // Set up with loadingJobs true m.jobs = []storage.ReviewJob{makeJob(1), makeJob(2), makeJob(3)} @@ -331,7 +352,7 @@ func TestTUITickNoRefreshWhileLoadingJobs(t *testing.T) { } func TestTUIDisplayTickDoesNotTriggerRefresh(t *testing.T) { - m := newModel("http://localhost") + m := newModel(testEndpointFromURL("http://localhost")) m.loadingJobs = false m.loadingMore = false @@ -389,7 +410,7 @@ func TestTUITickInterval(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.statusFetchedOnce = tt.statusFetchedOnce m.status.RunningJobs = tt.runningJobs m.status.QueuedJobs = tt.queuedJobs @@ -405,7 +426,7 @@ func TestTUITickInterval(t *testing.T) { } func TestTUIJobsMsgClearsLoadingJobs(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) // Set up with loadingJobs true m.loadingJobs = true @@ -426,7 +447,7 @@ func TestTUIJobsMsgClearsLoadingJobs(t *testing.T) { } func TestTUIJobsMsgAppendKeepsLoadingJobs(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) // Set up with loadingJobs true (shouldn't normally happen with append, but test the logic) m.jobs = []storage.ReviewJob{makeJob(1)} @@ -449,7 +470,7 @@ func TestTUIJobsMsgAppendKeepsLoadingJobs(t *testing.T) { func TestTUINewModelLoadingJobsTrue(t *testing.T) { // newModel should initialize loadingJobs to true since Init() calls fetchJobs - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) if !m.loadingJobs { assert.Condition(t, func() bool { return false @@ -458,7 +479,7 @@ func TestTUINewModelLoadingJobsTrue(t *testing.T) { } func TestTUIJobsErrMsgClearsLoadingJobs(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.loadingJobs = true // Simulate job fetch error @@ -488,7 +509,7 @@ func TestTUIHideClosedMalformedConfigNotOverwritten(t *testing.T) { }, "write malformed config: %v", err) } - m := newModel(testServerAddr) + m := newModel(testEndpoint) m.currentView = viewQueue // Toggle hide closed ON @@ -524,7 +545,7 @@ func TestTUIHideClosedMalformedConfigNotOverwritten(t *testing.T) { } func TestTUIIsJobVisibleRespectsPendingClosed(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.hideClosed = true // Job with Closed=false but pendingClosed=true should be hidden @@ -565,7 +586,7 @@ func TestTUIIsJobVisibleRespectsPendingClosed(t *testing.T) { } func TestTUIUpdateNotificationInQueueView(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.width = 80 m.height = 24 @@ -600,7 +621,7 @@ func TestTUIUpdateNotificationInQueueView(t *testing.T) { } func TestTUIUpdateNotificationDevBuild(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.width = 80 m.height = 24 @@ -621,7 +642,7 @@ func TestTUIUpdateNotificationDevBuild(t *testing.T) { } func TestTUIUpdateNotificationNotInReviewView(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewReview m.width = 80 m.height = 24 @@ -640,7 +661,7 @@ func TestTUIVersionMismatchDetection(t *testing.T) { _ = setupTuiTestEnv(t) t.Run("detects version mismatch", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) // Simulate receiving status with different version status := statusMsg(storage.DaemonStatus{ @@ -662,7 +683,7 @@ func TestTUIVersionMismatchDetection(t *testing.T) { }) t.Run("no mismatch when versions match", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) // Simulate receiving status with same version as TUI status := statusMsg(storage.DaemonStatus{ @@ -679,7 +700,7 @@ func TestTUIVersionMismatchDetection(t *testing.T) { }) t.Run("displays error banner in queue view when mismatched", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.width = 100 m.height = 30 m.currentView = viewQueue @@ -706,7 +727,7 @@ func TestTUIVersionMismatchDetection(t *testing.T) { }) t.Run("does not display daemon status in review view when mismatched", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.width = 100 m.height = 30 m.currentView = viewReview @@ -738,7 +759,7 @@ func TestTUIVersionMismatchDetection(t *testing.T) { }) t.Run("review flash still renders when mismatch is present", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.width = 100 m.height = 30 m.currentView = viewReview @@ -778,7 +799,7 @@ func TestTUIVersionMismatchDetection(t *testing.T) { func TestTUIConfigReloadFlash(t *testing.T) { _ = setupTuiTestEnv(t) - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) t.Run("no flash on first status fetch", func(t *testing.T) { // First status fetch with a ConfigReloadCounter should NOT flash @@ -808,7 +829,7 @@ func TestTUIConfigReloadFlash(t *testing.T) { t.Run("flash on config reload after first fetch", func(t *testing.T) { // Start with a model that has already fetched status once - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.statusFetchedOnce = true m.lastConfigReloadCounter = 1 @@ -834,7 +855,7 @@ func TestTUIConfigReloadFlash(t *testing.T) { t.Run("flash when ConfigReloadCounter changes from zero to non-zero", func(t *testing.T) { // Model has fetched status once but daemon hadn't reloaded yet - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.statusFetchedOnce = true m.lastConfigReloadCounter = 0 // No reload had occurred @@ -854,7 +875,7 @@ func TestTUIConfigReloadFlash(t *testing.T) { }) t.Run("no flash when ConfigReloadCounter unchanged", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.statusFetchedOnce = true m.lastConfigReloadCounter = 1 @@ -886,7 +907,7 @@ func TestTUIReconnectOnConsecutiveErrors(t *testing.T) { wantErrors int wantReconnecting bool wantCmd bool - wantServerAddr string + wantBaseURL string wantDaemonVersion string wantErrNil bool } @@ -969,11 +990,11 @@ func TestTUIReconnectOnConsecutiveErrors(t *testing.T) { initialErrors: 0, reconnecting: true, initialErr: errors.New("connection refused"), - msg: reconnectMsg{newAddr: "http://127.0.0.1:7374", version: "2.0.0"}, + msg: reconnectMsg{endpoint: testEndpointFromURL("http://127.0.0.1:7374"), version: "2.0.0"}, wantErrors: 0, wantReconnecting: false, wantCmd: true, - wantServerAddr: "http://127.0.0.1:7374", + wantBaseURL: "http://127.0.0.1:7374", wantDaemonVersion: "2.0.0", wantErrNil: true, }, @@ -981,11 +1002,11 @@ func TestTUIReconnectOnConsecutiveErrors(t *testing.T) { name: "handles reconnection to same address", initialErrors: 3, reconnecting: true, - msg: reconnectMsg{newAddr: testServerAddr}, + msg: reconnectMsg{endpoint: testEndpoint}, wantErrors: 3, wantReconnecting: false, wantCmd: false, - wantServerAddr: testServerAddr, + wantBaseURL: testEndpoint.BaseURL(), }, { name: "handles failed reconnection", @@ -1000,7 +1021,7 @@ func TestTUIReconnectOnConsecutiveErrors(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.consecutiveErrors = tt.initialErrors m.reconnecting = tt.reconnecting m.err = tt.initialErr @@ -1022,10 +1043,10 @@ func TestTUIReconnectOnConsecutiveErrors(t *testing.T) { return false }, "cmd returned = %v, want %v", cmd != nil, tt.wantCmd) } - if tt.wantServerAddr != "" && m2.serverAddr != tt.wantServerAddr { + if tt.wantBaseURL != "" && m2.endpoint.BaseURL() != tt.wantBaseURL { assert.Condition(t, func() bool { return false - }, "serverAddr = %q, want %q", m2.serverAddr, tt.wantServerAddr) + }, "endpoint.BaseURL() = %q, want %q", m2.endpoint.BaseURL(), tt.wantBaseURL) } if tt.wantDaemonVersion != "" && m2.daemonVersion != tt.wantDaemonVersion { assert.Condition(t, func() bool { @@ -1043,7 +1064,7 @@ func TestTUIReconnectOnConsecutiveErrors(t *testing.T) { func TestTUIStatusDisplaysCorrectly(t *testing.T) { // Test that the queue view renders status correctly - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.width = 200 m.height = 30 m.currentView = viewQueue @@ -1075,7 +1096,7 @@ func TestTUIStatusDisplaysCorrectly(t *testing.T) { } func TestHandleFixKeyRejectsFixJob(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.tasksEnabled = true m.jobs = []storage.ReviewJob{ @@ -1113,7 +1134,7 @@ func TestHandleFixKeyRejectsFixJob(t *testing.T) { func TestTUIFixTriggerResultMsg(t *testing.T) { t.Run("warning shows flash and triggers refresh", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewTasks m.width = 80 m.height = 24 @@ -1144,7 +1165,7 @@ func TestTUIFixTriggerResultMsg(t *testing.T) { }) t.Run("success shows enqueued flash and triggers refresh", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewTasks m.width = 80 m.height = 24 @@ -1169,7 +1190,7 @@ func TestTUIFixTriggerResultMsg(t *testing.T) { }) t.Run("error shows failure flash with no refresh", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewTasks m.width = 80 m.height = 24 @@ -1197,7 +1218,7 @@ func TestTUIFixTriggerResultMsg(t *testing.T) { func TestTUIColumnOptionsCanEnableTasksWorkflow(t *testing.T) { setupTuiTestEnv(t) - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue result, _ := m.Update(tea.KeyMsg{Type: tea.KeyRunes, Runes: []rune{'o'}}) @@ -1266,7 +1287,7 @@ func TestTUIColumnOptionsCanEnableTasksWorkflow(t *testing.T) { func TestTUIColumnOptionsCanDisableMouse(t *testing.T) { setupTuiTestEnv(t) - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue result, _ := m.Update(tea.KeyMsg{Type: tea.KeyRunes, Runes: []rune{'o'}}) @@ -1349,7 +1370,7 @@ func TestTUIColumnOptionsCanDisableMouse(t *testing.T) { func TestTUIColumnOptionsCanReEnableMouse(t *testing.T) { setupTuiTestEnv(t) - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.width = 120 m.height = 20 @@ -1431,7 +1452,7 @@ func TestNewModelLoadsMouseDisabledFromConfig(t *testing.T) { }, "write config: %v", err) } - m := newModel(testServerAddr) + m := newModel(testEndpoint) if m.mouseEnabled { require.Condition(t, func() bool { return false @@ -1445,7 +1466,7 @@ func TestNewModelLoadsMouseDisabledFromConfig(t *testing.T) { } func TestTUISelection(t *testing.T) { t.Run("MaintainedOnInsert", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) // Initial state with 3 jobs, select the middle one (ID=2) m.jobs = []storage.ReviewJob{ @@ -1466,7 +1487,7 @@ func TestTUISelection(t *testing.T) { }) t.Run("ClampsOnRemoval", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) // Initial state with 3 jobs, select the last one (ID=1) m.jobs = []storage.ReviewJob{ @@ -1487,7 +1508,7 @@ func TestTUISelection(t *testing.T) { }) t.Run("FirstJobOnEmpty", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) // No prior selection (empty jobs list, zero selectedJobID) m.jobs = []storage.ReviewJob{} @@ -1506,7 +1527,7 @@ func TestTUISelection(t *testing.T) { }) t.Run("EmptyList", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) // Had jobs, now empty m.jobs = []storage.ReviewJob{makeJob(1)} @@ -1522,7 +1543,7 @@ func TestTUISelection(t *testing.T) { }) t.Run("MaintainedOnLargeBatch", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) // Initial state with 1 job selected m.jobs = []storage.ReviewJob{makeJob(1)} @@ -1555,7 +1576,7 @@ func TestTUIHideClosed(t *testing.T) { }, "write config: %v", err) } - m := newModel(testServerAddr) + m := newModel(testEndpoint) if !m.hideClosed { assert.Condition(t, func() bool { return false @@ -1564,7 +1585,7 @@ func TestTUIHideClosed(t *testing.T) { }) t.Run("Toggle", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue // Initial state: hideClosed is false (TestMain isolates from real config) @@ -1594,7 +1615,7 @@ func TestTUIHideClosed(t *testing.T) { }) t.Run("FiltersJobs", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.hideClosed = true @@ -1648,7 +1669,7 @@ func TestTUIHideClosed(t *testing.T) { }) t.Run("SelectionMovesToVisible", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.jobs = []storage.ReviewJob{ @@ -1669,7 +1690,7 @@ func TestTUIHideClosed(t *testing.T) { }) t.Run("RefreshRevalidatesSelection", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.hideClosed = true @@ -1695,7 +1716,7 @@ func TestTUIHideClosed(t *testing.T) { }) t.Run("NavigationSkipsHidden", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.hideClosed = true @@ -1715,7 +1736,7 @@ func TestTUIHideClosed(t *testing.T) { }) t.Run("withRepoFilter", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.hideClosed = true m.activeRepoFilter = []string{"/repo/a"} @@ -1744,7 +1765,7 @@ func TestTUIHideClosed(t *testing.T) { t.Run("ClearRepoFilterRefetches", func(t *testing.T) { // Scenario: hide closed enabled, then filter by repo, then press escape // to clear the repo filter. Should trigger a refetch to show all open reviews. - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.hideClosed = true m.activeRepoFilter = []string{"/repo/a"} @@ -1804,7 +1825,7 @@ func TestTUIHideClosed(t *testing.T) { }) t.Run("EnableTriggersRefetch", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.hideClosed = false @@ -1833,7 +1854,7 @@ func TestTUIHideClosed(t *testing.T) { }) t.Run("DisableRefetches", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.hideClosed = true // Already enabled @@ -1873,7 +1894,7 @@ func TestTUIHideClosed(t *testing.T) { }, "write config: %v", err) } - m := newModel(testServerAddr) + m := newModel(testEndpoint) m.currentView = viewQueue // Verify the default was loaded @@ -1917,7 +1938,7 @@ func TestTUIHideClosed(t *testing.T) { func TestTUIFlashMessage(t *testing.T) { t.Run("AppearsInQueueView", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewQueue m.width = 80 m.height = 24 @@ -1934,7 +1955,7 @@ func TestTUIFlashMessage(t *testing.T) { }) t.Run("NotShownInDifferentView", func(t *testing.T) { - m := newModel(testServerAddr, withExternalIODisabled()) + m := newModel(testEndpoint, withExternalIODisabled()) m.currentView = viewReview m.width = 80 m.height = 24 @@ -2038,7 +2059,7 @@ func TestNewTuiModelOptions(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - m := newModel(testServerAddr, tc.opts...) + m := newModel(testEndpoint, tc.opts...) if !reflect.DeepEqual(m.activeRepoFilter, tc.expectedRepoFilter) { assert.Condition(t, func() bool { diff --git a/cmd/roborev/tui/types.go b/cmd/roborev/tui/types.go index 2b77dd9e8..78a6c4bbb 100644 --- a/cmd/roborev/tui/types.go +++ b/cmd/roborev/tui/types.go @@ -5,6 +5,7 @@ import ( "time" "github.com/atotto/clipboard" + "github.com/roborev-dev/roborev/internal/daemon" "github.com/roborev-dev/roborev/internal/storage" "github.com/roborev-dev/roborev/internal/streamfmt" ) @@ -215,9 +216,9 @@ type commitMsgMsg struct { err error } type reconnectMsg struct { - newAddr string // New daemon address if found, empty if not found - version string // Daemon version (to avoid sync call in Update) - err error + endpoint daemon.DaemonEndpoint // New daemon endpoint if found + version string // Daemon version (to avoid sync call in Update) + err error } type fixJobsMsg struct { diff --git a/cmd/roborev/tui_cmd.go b/cmd/roborev/tui_cmd.go index 96b003a3b..a71fe1b7b 100644 --- a/cmd/roborev/tui_cmd.go +++ b/cmd/roborev/tui_cmd.go @@ -2,9 +2,9 @@ package main import ( "fmt" - "strings" "github.com/roborev-dev/roborev/cmd/roborev/tui" + "github.com/roborev-dev/roborev/internal/daemon" "github.com/spf13/cobra" ) @@ -37,10 +37,15 @@ to the current branch. Use = syntax for explicit values: return fmt.Errorf("daemon error: %w", err) } + var ep daemon.DaemonEndpoint if addr == "" { - addr = getDaemonAddr() - } else if !strings.HasPrefix(addr, "http://") && !strings.HasPrefix(addr, "https://") { - addr = "http://" + addr + ep = getDaemonEndpoint() + } else { + var err error + ep, err = daemon.ParseEndpoint(addr) + if err != nil { + return fmt.Errorf("--addr: %w", err) + } } if cmd.Flags().Changed("repo") { @@ -65,7 +70,7 @@ to the current branch. Use = syntax for explicit values: } return tui.Run(tui.Config{ - ServerAddr: addr, + Endpoint: ep, RepoFilter: repoFilter, BranchFilter: branchFilter, ControlSocket: controlSocket, diff --git a/cmd/roborev/wait.go b/cmd/roborev/wait.go index c958a32b2..ecac4d74c 100644 --- a/cmd/roborev/wait.go +++ b/cmd/roborev/wait.go @@ -138,8 +138,8 @@ Examples: jobID = job.ID } - addr := getDaemonAddr() - err := waitForJob(cmd, addr, jobID, quiet) + ep := getDaemonEndpoint() + err := waitForJob(cmd, ep, jobID, quiet) if err != nil { // Map ErrJobNotFound to exit 1 with a user-facing message // (waitForJob returns a plain error to stay compatible with reviewCmd) @@ -246,7 +246,7 @@ func waitMultiple( jobIDs = append(jobIDs, job.ID) } - addr := getDaemonAddr() + ep := getDaemonEndpoint() // Wait for all jobs concurrently. // Always poll in quiet mode to avoid interleaved output from @@ -261,7 +261,7 @@ func waitMultiple( wg.Add(1) go func(idx int, jobID int64) { defer wg.Done() - err := waitForJob(cmd, addr, jobID, true) + err := waitForJob(cmd, ep, jobID, true) results[idx] = result{jobID: jobID, err: err} }(i, id) } diff --git a/docs/plans/2026-03-18-unix-socket-transport-design.md b/docs/plans/2026-03-18-unix-socket-transport-design.md new file mode 100644 index 000000000..586ef406a --- /dev/null +++ b/docs/plans/2026-03-18-unix-socket-transport-design.md @@ -0,0 +1,364 @@ +# Unix Socket Transport for Daemon Communication + +## Motivation + +The roborev daemon currently only listens on TCP loopback addresses (127.0.0.1, [::1], localhost). This is enforced throughout the codebase as an SSRF mitigation since there is no authentication or TLS on the HTTP API. + +Unix domain sockets provide two advantages over TCP loopback: + +1. **Security hardening** -- filesystem permissions (owner-only `0600` on the socket, `0700` on the parent directory) give per-user access control without needing TLS/mTLS. Other local users cannot connect. +2. **Container isolation** -- NixOS containers with network isolation can share the daemon via a bind-mounted socket path, without exposing a TCP port or adding HTTPS/mTLS. + +## Design Summary + +Introduce a `DaemonEndpoint` type that encapsulates the transport (TCP or Unix) and address. Thread it through RuntimeInfo, the server, the client, probe functions, and CLI commands -- replacing raw `host:port` strings. TCP loopback remains the default. Users opt in to Unix sockets via config. + +## Config Surface + +The existing `server_addr` config key determines the transport based on its value: + +| Value | Transport | Address | +|-------|-----------|---------| +| `""` (empty/default) | TCP | `127.0.0.1:7373` | +| `"127.0.0.1:7373"` | TCP | As specified | +| `"[::1]:7373"` | TCP | As specified | +| `"unix://"` | Unix | Auto-generated: `os.TempDir()/roborev-/daemon.sock` | +| `"unix:///explicit/path.sock"` | Unix | Explicit absolute path | + +Relative paths (`unix://relative.sock`) are rejected. Paths exceeding the platform socket limit (104 bytes on macOS, 108 on Linux) are rejected at parse time. + +The `--server` CLI flag accepts the same values and takes precedence over the config file. + +## DaemonEndpoint Type + +New file: `internal/daemon/endpoint.go` + +```go +type DaemonEndpoint struct { + Network string // "tcp" or "unix" + Address string // "127.0.0.1:7373" or "/tmp/roborev-1000/daemon.sock" +} +``` + +### Construction + +```go +func ParseEndpoint(serverAddr string) (DaemonEndpoint, error) +``` + +Parses a `server_addr` config value: +- Empty string returns TCP default `127.0.0.1:7373` +- Bare `host:port` returns TCP with loopback validation +- `http://host:port` strips the scheme and treats as bare `host:port` (for backwards compatibility with the old `--server` flag format) +- `unix://` returns Unix with `DefaultSocketPath()` +- `unix:///absolute/path` returns Unix with the given path +- Anything else (relative path, non-loopback TCP) returns an error + +```go +func DefaultSocketPath() string +``` + +Returns `filepath.Join(os.TempDir(), fmt.Sprintf("roborev-%d", os.Getuid()), "daemon.sock")`. Example paths: +- Linux: `/tmp/roborev-1000/daemon.sock` (~31 chars) +- macOS: `/var/folders/zz/.../T/roborev-501/daemon.sock` (~73 chars) + +Both are well within the platform socket path limits. The macOS path length varies by system (the `os.TempDir()` segment can be longer on some setups), but `ParseEndpoint` validates the length at parse time and returns a clear error if it exceeds the limit. Users can fall back to `unix:///short/path.sock` if needed. + +### Methods + +```go +func (e DaemonEndpoint) Listener() (net.Listener, error) +``` + +Calls `net.Listen(e.Network, e.Address)`. For Unix, the caller must ensure the parent directory exists and any stale socket is removed before calling this. + +```go +func (e DaemonEndpoint) HTTPClient(timeout time.Duration) *http.Client +``` + +For TCP: standard `http.Client` with the given timeout. +For Unix: `http.Client` with a custom transport: + +```go +&http.Transport{ + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + return net.DialContext(ctx, "unix", e.Address) + }, +} +``` + +```go +func (e DaemonEndpoint) BaseURL() string +``` + +TCP: `"http://:"`. Unix: `"http://localhost"` (host is ignored by the Unix dialer, but `net/http` requires a valid URL). + +```go +func (e DaemonEndpoint) IsUnix() bool +func (e DaemonEndpoint) String() string // For logging: "tcp:127.0.0.1:7373" or "unix:/tmp/..." +``` + +### Validation + +Performed inside `ParseEndpoint`: +- TCP: loopback check (same `isLoopbackAddr` logic as today) +- Unix: must be absolute path, must not exceed platform socket path limit, must not contain null bytes + +## RuntimeInfo Changes + +Current JSON format: + +```json +{"pid": 1234, "addr": "127.0.0.1:7373", "port": 7373, "version": "0.47.0"} +``` + +New format adds a `network` field: + +```json +{"pid": 1234, "addr": "/tmp/roborev-1000/daemon.sock", "port": 0, "network": "unix", "version": "0.47.0"} +``` + +Struct changes: + +```go +type RuntimeInfo struct { + PID int `json:"pid"` + Addr string `json:"addr"` + Port int `json:"port"` // 0 for unix + Network string `json:"network"` // "tcp" or "unix"; empty treated as "tcp" + Version string `json:"version"` + SourcePath string `json:"-"` +} +``` + +**Backwards compatibility:** +- Reading: missing `network` field defaults to `"tcp"`. Old daemon.json files work as before. +- Writing: always includes `network`. +- An old CLI reading a Unix runtime file will fail to probe (it will try `http:///tmp/roborev-1000/daemon.sock/api/ping`), which fails gracefully -- it reports "daemon not running" rather than crashing. + +New helper: + +```go +func (r *RuntimeInfo) Endpoint() DaemonEndpoint +``` + +Returns a `DaemonEndpoint` derived from the runtime info, defaulting `Network` to `"tcp"` if empty. + +`WriteRuntime` signature changes: + +```go +// Before: +func WriteRuntime(addr string, port int, version string) error + +// After: +func WriteRuntime(ep DaemonEndpoint, version string) error +``` + +## Server Changes + +`server.go` `Start()` flow: + +1. `ParseEndpoint(cfg.ServerAddr)` -- parse and validate config +2. For TCP: `FindAvailablePort` scans for open port (unchanged) +3. For Unix: create parent directory with `0700`, remove stale socket file if present +4. `ep.Listener()` -- bind +5. For Unix: `os.Chmod(socketPath, 0600)` after bind +6. `WriteRuntime(ep, version)` -- persist with network field + +`FindAvailablePort` remains TCP-only. Not called for Unix endpoints. For TCP, `FindAvailablePort` returns a resolved `addr, port` (potentially with an incremented port); a new `DaemonEndpoint` is constructed from this resolved address for the rest of the `Start()` flow. + +`validateDaemonBindAddr` is replaced by validation inside `ParseEndpoint`. + +**`waitForServerReady`** signature changes from `waitForServerReady(ctx, addr string, ...)` to `waitForServerReady(ctx, ep DaemonEndpoint, ...)`. It passes the endpoint to `ProbeDaemon(ep, ...)` instead of a raw address string. This is critical -- without this change, `ProbeDaemon`'s loopback check would reject Unix socket addresses and the daemon would fail to start. + +**Shutdown cleanup:** `Server.Stop()` removes the socket file for Unix endpoints. `CleanupZombieDaemons` detects stale Unix sockets by checking whether the owning PID (from RuntimeInfo) is alive -- if the process is dead, it removes the socket file and runtime file directly, rather than attempting an HTTP probe on a potentially unresponsive socket. + +## Client Changes + +`HTTPClient` struct: + +```go +// Before: +type HTTPClient struct { + addr string + httpClient *http.Client + pollInterval time.Duration +} + +// After: +type HTTPClient struct { + baseURL string + httpClient *http.Client + pollInterval time.Duration +} +``` + +`NewHTTPClient` takes a `DaemonEndpoint`: + +```go +func NewHTTPClient(ep DaemonEndpoint) *HTTPClient { + return &HTTPClient{ + baseURL: ep.BaseURL(), + httpClient: ep.HTTPClient(10 * time.Second), + pollInterval: DefaultPollInterval, + } +} +``` + +All endpoint methods change `c.addr` to `c.baseURL` -- pure rename, no logic changes. + +`NewHTTPClientFromRuntime`: + +```go +func NewHTTPClientFromRuntime() (*HTTPClient, error) { + // ... retry loop ... + info, err := GetAnyRunningDaemon() + if err == nil { + return NewHTTPClient(info.Endpoint()), nil + } + // ... +} +``` + +## Probe Function Changes + +`ProbeDaemon`, `IsDaemonAlive`, `probeDaemonPing`, `probeLegacyDaemonStatus` change from `addr string` to `DaemonEndpoint`: + +```go +func ProbeDaemon(ep DaemonEndpoint, timeout time.Duration) (*PingInfo, error) +func IsDaemonAlive(ep DaemonEndpoint) bool +``` + +TCP probes still validate loopback (via the endpoint's validation at parse time). Unix probes skip loopback checks (filesystem permissions handle access control). + +URL construction uses `ep.BaseURL() + "/api/ping"` instead of `fmt.Sprintf("http://%s/api/ping", addr)`. + +HTTP clients use `ep.HTTPClient(timeout)` to get the right transport. + +`KillDaemon` gets the endpoint from `info.Endpoint()` and uses `ep.HTTPClient()` + `ep.BaseURL()` for the shutdown POST. The inline `isLoopbackAddr` guard is replaced by the endpoint's own safety: Unix endpoints are inherently local, TCP endpoints had loopback validated at parse time. Both `GetAnyRunningDaemon` and `CleanupZombieDaemons` call `IsDaemonAlive(info.Endpoint())` instead of `IsDaemonAlive(info.Addr)`. + +## CLI Changes + +### Central principle + +**All `http.Client` instances in `cmd/roborev/` that communicate with the daemon must be created through `DaemonEndpoint.HTTPClient()`.** A plain `&http.Client{}` uses the default TCP transport, which cannot dial Unix sockets. + +### Module-level variable + +The global `serverAddr string` is replaced by a `daemonEndpoint daemon.DaemonEndpoint`. A convenience helper provides an HTTP client: + +```go +func getDaemonHTTPClient(timeout time.Duration) *http.Client { + return daemonEndpoint.HTTPClient(timeout) +} +``` + +### Specific changes + +**`cmd/roborev/main.go`:** + +`--server` flag default changes from `"http://127.0.0.1:7373"` to `""`. Empty means "use config file, which defaults to TCP `127.0.0.1:7373`". This prevents the flag from always overriding config. The flag is parsed into `daemonEndpoint` via `ParseEndpoint` in a `PersistentPreRun` hook. + +**`cmd/roborev/daemon_lifecycle.go`:** + +`getDaemonAddr() string` becomes `getDaemonEndpoint() DaemonEndpoint`: + +```go +func getDaemonEndpoint() DaemonEndpoint { + if info, err := daemon.GetAnyRunningDaemon(); err == nil { + return info.Endpoint() + } + ep, err := daemon.ParseEndpoint(serverAddr) + if err != nil { + // Fall back to TCP default if parsing fails + return daemon.DaemonEndpoint{Network: "tcp", Address: "127.0.0.1:7373"} + } + return ep +} +``` + +`ensureDaemon()` and `startDaemon()` set `daemonEndpoint` instead of `serverAddr`. The fallback path in `ensureDaemon()` (which currently calls `probeDaemonServerURL(serverAddr, ...)` when no runtime file exists) changes to `ProbeDaemon(daemonEndpoint, ...)` directly -- the endpoint already encapsulates the transport. + +`probeDaemonServerURL` is removed -- its URL-parsing logic is no longer needed since `DaemonEndpoint` encapsulates both transport and address. + +**`cmd/roborev/daemon_client.go`:** + +`waitForJob` and `showReview` use `getDaemonEndpoint().BaseURL()` for URLs and `getDaemonHTTPClient(timeout)` for HTTP clients. All 5 locally-constructed `&http.Client{}` instances and the bare `http.Post()` call are replaced. + +**`cmd/roborev/stream.go`:** + +Uses `getDaemonEndpoint()` for both the stream URL and the HTTP client (to get the right transport). + +**All other CLI command files** (`analyze.go`, `compact.go`, `review.go`, `run.go`, `fix.go`, `list.go`, `summary.go`, `status.go`, `show.go`, `wait.go`, `comment.go`, `sync.go`, `remap.go`, `postcommit.go`, `refine.go`, `job_helpers.go`): Replace `getDaemonAddr()` + raw `&http.Client{}` with `getDaemonEndpoint().BaseURL()` + `getDaemonHTTPClient(timeout)`. These are mechanical changes. Notable special cases: + +- `review.go` and `run.go` use bare `http.Post(serverAddr+...)` with the global variable directly -- these change to `getDaemonHTTPClient(timeout).Post(getDaemonEndpoint().BaseURL()+...)`. +- `postcommit.go` has a package-level `var hookHTTPClient = &http.Client{...}` which cannot be initialized at package load time (the endpoint isn't parsed yet). Change to lazy initialization: `hookHTTPClient` becomes a function `getHookHTTPClient()` that calls `getDaemonHTTPClient(3 * time.Second)` on first use. + +**`cmd/roborev/daemon_cmd.go`:** + +The `daemon run --addr` flag feeds into `cfg.ServerAddr` which is then parsed by `ParseEndpoint` inside `Server.Start()`. The `--addr` flag accepts the same `unix://` values as `server_addr` in config. No separate parsing needed -- `Start()` handles it. + +## TUI Changes + +`tui/tui.go`: model struct `serverAddr string` becomes `endpoint DaemonEndpoint`. `newModel(serverAddr string, ...)` becomes `newModel(ep DaemonEndpoint, ...)`. The `http.Client` on the model is created via `ep.HTTPClient(10 * time.Second)` instead of `&http.Client{Timeout: 10 * time.Second}`. + +`tui/api.go`: `m.serverAddr + path` becomes `m.endpoint.BaseURL() + path`. + +`tui/fetch.go`: `tryReconnect()` currently returns a `reconnectMsg{newAddr: string}`. Changes to return `reconnectMsg{endpoint: DaemonEndpoint}` derived from `info.Endpoint()`. The `reconnectMsg` struct in `tui/types.go` (or equivalent) changes its field from `newAddr string` to `endpoint DaemonEndpoint`. + +`tui/handlers_msg.go`: `handleReconnectMsg` must update both `m.endpoint` and `m.client` (via `ep.HTTPClient(10 * time.Second)`) since the reconnected daemon may be on a different transport than the original. + +`tui_cmd.go`: passes `DaemonEndpoint` to `newModel` instead of a URL string. The TUI's own `--addr` flag is updated to accept `unix://` values and is parsed via `ParseEndpoint` before being passed to `newModel`. The existing `http://` prefix logic is removed (handled by `ParseEndpoint`'s backwards-compatible `http://` stripping). + +## Testing + +### Existing test impact + +- **`httptest.Server` tests** (mock daemon, CLI): `patchServerAddr` sets a `DaemonEndpoint` parsed from the test server URL. Mechanical change. +- **`newWorkerTestContext`** (daemon): no networking, no changes. +- **`RuntimeInfo` tests**: add `Network` field. Add compat test: JSON without `network` field defaults to TCP. + +### New tests + +- **`ParseEndpoint`** -- table-driven: empty, TCP, `unix://`, `unix:///path`, invalid (relative path, too-long path, non-loopback TCP, null bytes). +- **`DaemonEndpoint.Listener()`** -- TCP and Unix listeners bind and accept connections. +- **`DaemonEndpoint.HTTPClient()`** -- round-trip through a Unix socket. Start a server on a Unix listener in a short temp path, verify the client connects and gets a response. +- **`DefaultSocketPath()`** -- verify format includes uid, verify length is under 104 bytes. +- **RuntimeInfo compat** -- read old-format JSON (no `network` field), verify `Endpoint()` returns TCP. + +### Socket path length in tests + +`t.TempDir()` on macOS produces long paths that can exceed the 104-byte Unix socket limit. Tests that create Unix sockets should use short explicit paths under `/tmp` with `t.Cleanup` for removal, not `t.TempDir()`. + +## Files Changed + +| File | Change | +|------|--------| +| `internal/daemon/endpoint.go` | **New.** `DaemonEndpoint` type, `ParseEndpoint`, `DefaultSocketPath` | +| `internal/daemon/runtime.go` | Add `Network` field to `RuntimeInfo`, add `Endpoint()` method, change `WriteRuntime` signature, update probe/kill functions to use `DaemonEndpoint` | +| `internal/daemon/server.go` | `Start()` uses `ParseEndpoint`, Unix listener setup, socket cleanup on stop | +| `internal/daemon/client.go` | `addr` -> `baseURL`, `NewHTTPClient` takes `DaemonEndpoint` | +| `internal/config/config.go` | No struct changes (ServerAddr stays string). Default stays `"127.0.0.1:7373"` | +| `cmd/roborev/main.go` | `--server` flag default to `""` | +| `cmd/roborev/daemon_lifecycle.go` | `getDaemonEndpoint()` returns `DaemonEndpoint` | +| `cmd/roborev/daemon_cmd.go` | `--addr` flag accepts `unix://` values, feeds into `cfg.ServerAddr` | +| `cmd/roborev/daemon_client.go` | Replace raw `http.Client{}` with `getDaemonHTTPClient()`, use `getDaemonEndpoint().BaseURL()` | +| `cmd/roborev/stream.go` | Use `getDaemonEndpoint()` for URL and transport | +| `cmd/roborev/*.go` (15+ command files) | Replace `getDaemonAddr()` + raw `http.Client{}` with `getDaemonEndpoint().BaseURL()` + `getDaemonHTTPClient()` | +| `cmd/roborev/tui_cmd.go` | Pass `DaemonEndpoint` to TUI | +| `cmd/roborev/tui/tui.go` | `serverAddr` -> `endpoint DaemonEndpoint` | +| `cmd/roborev/tui/api.go` | Use `m.endpoint.BaseURL()` | +| `cmd/roborev/tui/fetch.go` | Use `m.endpoint.BaseURL()`, `tryReconnect()` returns `DaemonEndpoint` | +| `cmd/roborev/tui/handlers_msg.go` | `handleReconnectMsg` updates both endpoint and HTTP client | +| Various `*_test.go` | Update to use `DaemonEndpoint`, add new transport tests | + +## Container Bind-Mount Note + +The auto-generated socket path (`unix://`) uses `os.TempDir()` which resolves inside the current environment. For container bind-mount use cases, the explicit path form (`unix:///shared/roborev.sock`) is recommended, since the host's temp directory may not be accessible or may resolve differently inside a container. + +## Out of Scope + +- Making Unix socket the default transport (TCP remains default) +- Remote/non-loopback TCP (still requires auth/TLS, separate effort) +- Relative socket paths (rejected; can be added later if needed) +- Windows named pipes (Unix sockets only; Windows support not a current goal) diff --git a/docs/superpowers/plans/2026-03-18-unix-socket-transport.md b/docs/superpowers/plans/2026-03-18-unix-socket-transport.md new file mode 100644 index 000000000..e401fd4f8 --- /dev/null +++ b/docs/superpowers/plans/2026-03-18-unix-socket-transport.md @@ -0,0 +1,1246 @@ +# Unix Socket Transport Implementation Plan + +> **For agentic workers:** REQUIRED: Use superpowers:subagent-driven-development (if subagents available) or superpowers:executing-plans to implement this plan. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Add Unix domain socket support alongside TCP loopback for the daemon HTTP API, controlled via `server_addr` config. + +**Architecture:** Introduce a `DaemonEndpoint` type (`internal/daemon/endpoint.go`) that encapsulates transport (TCP/Unix) and address. Thread it through RuntimeInfo, server, client, probes, CLI, and TUI -- replacing raw `host:port` strings. TCP remains the default. + +**Tech Stack:** Go stdlib (`net`, `net/http`, `os`), no new dependencies. + +**Spec:** `docs/plans/2026-03-18-unix-socket-transport-design.md` + +--- + +## File Structure + +| File | Responsibility | +|------|---------------| +| `internal/daemon/endpoint.go` | **New.** `DaemonEndpoint` type, `ParseEndpoint`, `DefaultSocketPath`, `MaxUnixPathLen` | +| `internal/daemon/endpoint_test.go` | **New.** Tests for parsing, validation, listener, HTTP round-trip | +| `internal/daemon/runtime.go` | Add `Network` to `RuntimeInfo`, `Endpoint()` method, change `WriteRuntime`/probe/kill signatures | +| `internal/daemon/runtime_test.go` | Update for new signatures, add compat test | +| `internal/daemon/server.go` | `Start()` uses `DaemonEndpoint`, Unix listener setup, socket cleanup | +| `internal/daemon/server_test.go` | Update `waitForServerReady` tests | +| `internal/daemon/client.go` | `addr` -> `baseURL`, `NewHTTPClient(DaemonEndpoint)` | +| `cmd/roborev/main.go` | `--server` flag default to `""`, add `PersistentPreRunE` | +| `cmd/roborev/daemon_lifecycle.go` | `getDaemonEndpoint()`, remove `probeDaemonServerURL`, update `ensureDaemon`/`startDaemon` | +| `cmd/roborev/daemon_client.go` | Replace raw `http.Client{}` with `getDaemonHTTPClient()` | +| `cmd/roborev/stream.go` | Use endpoint for URL and transport | +| `cmd/roborev/*.go` (16 command files) | Mechanical: `getDaemonAddr()` -> `getDaemonEndpoint().BaseURL()`, raw clients -> `getDaemonHTTPClient()` | +| `cmd/roborev/tui_cmd.go` | Parse `--addr` via `ParseEndpoint`, pass `DaemonEndpoint` to TUI | +| `cmd/roborev/tui/tui.go` | `serverAddr` -> `endpoint`, `newModel(DaemonEndpoint)` | +| `cmd/roborev/tui/api.go` | `m.serverAddr` -> `m.endpoint.BaseURL()` | +| `cmd/roborev/tui/fetch.go` | `tryReconnect` returns `DaemonEndpoint` | +| `cmd/roborev/tui/handlers_msg.go` | `handleReconnectMsg` updates endpoint + client | + +--- + +### Task 1: DaemonEndpoint type and tests + +**Files:** +- Create: `internal/daemon/endpoint.go` +- Create: `internal/daemon/endpoint_test.go` + +- [ ] **Step 1: Write endpoint_test.go with ParseEndpoint table tests** + +```go +package daemon + +import ( + "os" + "runtime" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseEndpoint(t *testing.T) { + tests := []struct { + name string + input string + network string + wantErr string + }{ + {"empty defaults to TCP", "", "tcp", ""}, + {"bare host:port", "127.0.0.1:7373", "tcp", ""}, + {"ipv6 loopback", "[::1]:7373", "tcp", ""}, + {"localhost", "localhost:7373", "tcp", ""}, + {"http prefix stripped", "http://127.0.0.1:7373", "tcp", ""}, + {"unix auto", "unix://", "unix", ""}, + {"unix explicit path", "unix:///tmp/test.sock", "unix", ""}, + {"non-loopback rejected", "192.168.1.1:7373", "", "loopback"}, + {"relative unix rejected", "unix://relative.sock", "", "absolute"}, + {"http non-loopback rejected", "http://192.168.1.1:7373", "", "loopback"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ep, err := ParseEndpoint(tt.input) + if tt.wantErr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.wantErr) + return + } + require.NoError(t, err) + assert.Equal(t, tt.network, ep.Network) + if tt.network == "tcp" { + assert.NotEmpty(t, ep.Address) + } + }) + } +} + +func TestParseEndpoint_UnixPathTooLong(t *testing.T) { + long := "unix:///" + strings.Repeat("a", MaxUnixPathLen) + _, err := ParseEndpoint(long) + require.Error(t, err) + assert.Contains(t, err.Error(), "exceeds") +} + +func TestParseEndpoint_UnixNullByte(t *testing.T) { + _, err := ParseEndpoint("unix:///tmp/bad\x00.sock") + require.Error(t, err) +} + +func TestDefaultSocketPath(t *testing.T) { + path := DefaultSocketPath() + assert.True(t, len(path) < MaxUnixPathLen, + "default socket path %q (%d bytes) exceeds limit %d", path, len(path), MaxUnixPathLen) + assert.Contains(t, path, "roborev-") + assert.True(t, strings.HasSuffix(path, "daemon.sock")) +} + +func TestDaemonEndpoint_BaseURL(t *testing.T) { + assert := assert.New(t) + + tcp := DaemonEndpoint{Network: "tcp", Address: "127.0.0.1:7373"} + assert.Equal("http://127.0.0.1:7373", tcp.BaseURL()) + + unix := DaemonEndpoint{Network: "unix", Address: "/tmp/test.sock"} + assert.Equal("http://localhost", unix.BaseURL()) +} + +func TestDaemonEndpoint_String(t *testing.T) { + assert := assert.New(t) + + tcp := DaemonEndpoint{Network: "tcp", Address: "127.0.0.1:7373"} + assert.Equal("tcp:127.0.0.1:7373", tcp.String()) + + unix := DaemonEndpoint{Network: "unix", Address: "/tmp/test.sock"} + assert.Equal("unix:/tmp/test.sock", unix.String()) +} +``` + +- [ ] **Step 2: Run tests to verify they fail** + +Run: `nix develop -c go test ./internal/daemon/ -run TestParseEndpoint -v` +Expected: FAIL (types not defined yet) + +- [ ] **Step 3: Write endpoint.go** + +```go +package daemon + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +// MaxUnixPathLen is the platform socket path length limit. +// macOS/BSD: 104, Linux: 108. +var MaxUnixPathLen = func() int { + if runtime.GOOS == "darwin" { + return 104 + } + return 108 +}() + +// DaemonEndpoint encapsulates the transport type and address for the daemon. +type DaemonEndpoint struct { + Network string // "tcp" or "unix" + Address string // "127.0.0.1:7373" or "/tmp/roborev-1000/daemon.sock" +} + +// ParseEndpoint parses a server_addr config value into a DaemonEndpoint. +func ParseEndpoint(serverAddr string) (DaemonEndpoint, error) { + if serverAddr == "" { + return DaemonEndpoint{Network: "tcp", Address: "127.0.0.1:7373"}, nil + } + + // Strip http:// prefix for backwards compatibility with old --server flag format. + if strings.HasPrefix(serverAddr, "http://") { + serverAddr = strings.TrimPrefix(serverAddr, "http://") + return parseTCPEndpoint(serverAddr) + } + + if strings.HasPrefix(serverAddr, "unix://") { + return parseUnixEndpoint(serverAddr) + } + + return parseTCPEndpoint(serverAddr) +} + +func parseTCPEndpoint(addr string) (DaemonEndpoint, error) { + if !isLoopbackAddr(addr) { + return DaemonEndpoint{}, fmt.Errorf( + "daemon address %q must use a loopback host (127.0.0.1, localhost, or [::1])", addr) + } + return DaemonEndpoint{Network: "tcp", Address: addr}, nil +} + +func parseUnixEndpoint(raw string) (DaemonEndpoint, error) { + path := strings.TrimPrefix(raw, "unix://") + + if path == "" { + return DaemonEndpoint{Network: "unix", Address: DefaultSocketPath()}, nil + } + + if !filepath.IsAbs(path) { + return DaemonEndpoint{}, fmt.Errorf( + "unix socket path %q must be absolute", path) + } + + if strings.ContainsRune(path, 0) { + return DaemonEndpoint{}, fmt.Errorf( + "unix socket path contains null byte") + } + + if len(path) >= MaxUnixPathLen { + return DaemonEndpoint{}, fmt.Errorf( + "unix socket path %q (%d bytes) exceeds platform limit of %d bytes", + path, len(path), MaxUnixPathLen) + } + + return DaemonEndpoint{Network: "unix", Address: path}, nil +} + +// DefaultSocketPath returns the auto-generated socket path under os.TempDir(). +func DefaultSocketPath() string { + return filepath.Join(os.TempDir(), fmt.Sprintf("roborev-%d", os.Getuid()), "daemon.sock") +} + +// IsUnix returns true if this endpoint uses a Unix domain socket. +func (e DaemonEndpoint) IsUnix() bool { + return e.Network == "unix" +} + +// BaseURL returns the HTTP base URL for constructing API requests. +// For Unix sockets, the host is "localhost" (ignored by the custom dialer). +func (e DaemonEndpoint) BaseURL() string { + if e.IsUnix() { + return "http://localhost" + } + return "http://" + e.Address +} + +// HTTPClient returns an http.Client configured for this endpoint's transport. +func (e DaemonEndpoint) HTTPClient(timeout time.Duration) *http.Client { + if e.IsUnix() { + return &http.Client{ + Timeout: timeout, + Transport: &http.Transport{ + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, "unix", e.Address) + }, + }, + } + } + return &http.Client{Timeout: timeout} +} + +// Listener creates a net.Listener bound to this endpoint. +// For Unix sockets, the caller must ensure the parent directory exists +// and any stale socket file is removed before calling this. +func (e DaemonEndpoint) Listener() (net.Listener, error) { + return net.Listen(e.Network, e.Address) +} + +// String returns a human-readable representation for logging. +func (e DaemonEndpoint) String() string { + return e.Network + ":" + e.Address +} + +// Port returns the TCP port, or 0 for Unix sockets. +func (e DaemonEndpoint) Port() int { + if e.IsUnix() { + return 0 + } + _, portStr, err := net.SplitHostPort(e.Address) + if err != nil { + return 0 + } + port := 0 + fmt.Sscanf(portStr, "%d", &port) + return port +} +``` + +- [ ] **Step 4: Run tests to verify they pass** + +Run: `nix develop -c go test ./internal/daemon/ -run "TestParseEndpoint|TestDefaultSocketPath|TestDaemonEndpoint" -v` +Expected: PASS + +- [ ] **Step 5: Add Listener and HTTPClient round-trip tests** + +Append to `endpoint_test.go`: + +```go +func TestDaemonEndpoint_Listener_TCP(t *testing.T) { + ep := DaemonEndpoint{Network: "tcp", Address: "127.0.0.1:0"} + ln, err := ep.Listener() + require.NoError(t, err) + defer ln.Close() + assert.Contains(t, ln.Addr().String(), "127.0.0.1:") +} + +func TestDaemonEndpoint_Listener_Unix(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Unix sockets not supported on Windows") + } + // Use short path to stay under socket limit + sockPath := filepath.Join("/tmp", fmt.Sprintf("roborev-test-%d.sock", os.Getpid())) + t.Cleanup(func() { os.Remove(sockPath) }) + + ep := DaemonEndpoint{Network: "unix", Address: sockPath} + ln, err := ep.Listener() + require.NoError(t, err) + defer ln.Close() + assert.Equal(t, sockPath, ln.Addr().String()) +} + +func TestDaemonEndpoint_HTTPClient_UnixRoundTrip(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Unix sockets not supported on Windows") + } + sockPath := filepath.Join("/tmp", fmt.Sprintf("roborev-test-%d.sock", os.Getpid())) + t.Cleanup(func() { os.Remove(sockPath) }) + + // Start server on Unix socket + ln, err := net.Listen("unix", sockPath) + require.NoError(t, err) + defer ln.Close() + + srv := &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("ok")) + })} + go srv.Serve(ln) + defer srv.Close() + + // Connect via DaemonEndpoint + ep := DaemonEndpoint{Network: "unix", Address: sockPath} + client := ep.HTTPClient(2 * time.Second) + resp, err := client.Get(ep.BaseURL() + "/test") + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, http.StatusOK, resp.StatusCode) +} +``` + +- [ ] **Step 6: Run all endpoint tests** + +Run: `nix develop -c go test ./internal/daemon/ -run "TestParseEndpoint|TestDefaultSocketPath|TestDaemonEndpoint" -v` +Expected: PASS + +- [ ] **Step 7: Commit** + +```bash +git add internal/daemon/endpoint.go internal/daemon/endpoint_test.go +git commit -m "Add DaemonEndpoint type with ParseEndpoint, Listener, HTTPClient" +``` + +--- + +### Task 2: RuntimeInfo changes and WriteRuntime + +**Files:** +- Modify: `internal/daemon/runtime.go` +- Modify: `internal/daemon/runtime_test.go` + +- [ ] **Step 1: Write test for RuntimeInfo.Endpoint() and backwards compat** + +Add to `runtime_test.go`: + +```go +func TestRuntimeInfo_Endpoint(t *testing.T) { + assert := assert.New(t) + + // TCP with explicit network + info := RuntimeInfo{PID: 1, Addr: "127.0.0.1:7373", Port: 7373, Network: "tcp"} + ep := info.Endpoint() + assert.Equal("tcp", ep.Network) + assert.Equal("127.0.0.1:7373", ep.Address) + + // Unix + info = RuntimeInfo{PID: 1, Addr: "/tmp/test.sock", Port: 0, Network: "unix"} + ep = info.Endpoint() + assert.Equal("unix", ep.Network) + assert.Equal("/tmp/test.sock", ep.Address) + + // Empty network defaults to TCP (backwards compat) + info = RuntimeInfo{PID: 1, Addr: "127.0.0.1:7373", Port: 7373, Network: ""} + ep = info.Endpoint() + assert.Equal("tcp", ep.Network) +} + +func TestRuntimeInfo_BackwardsCompat_NoNetworkField(t *testing.T) { + // Simulate old JSON without "network" field + data := []byte(`{"pid": 1234, "addr": "127.0.0.1:7373", "port": 7373, "version": "0.47.0"}`) + var info RuntimeInfo + require.NoError(t, json.Unmarshal(data, &info)) + assert.Equal(t, "", info.Network) + ep := info.Endpoint() + assert.Equal(t, "tcp", ep.Network) + assert.Equal(t, "127.0.0.1:7373", ep.Address) +} +``` + +- [ ] **Step 2: Run tests to verify they fail** + +Run: `nix develop -c go test ./internal/daemon/ -run "TestRuntimeInfo_Endpoint|TestRuntimeInfo_BackwardsCompat" -v` +Expected: FAIL (Endpoint method and Network field don't exist) + +- [ ] **Step 3: Add Network field and Endpoint() to RuntimeInfo** + +In `runtime.go`, update `RuntimeInfo`: + +```go +type RuntimeInfo struct { + PID int `json:"pid"` + Addr string `json:"addr"` + Port int `json:"port"` + Network string `json:"network"` // "tcp" or "unix"; empty treated as "tcp" on read + Version string `json:"version"` + SourcePath string `json:"-"` +} + +func (r *RuntimeInfo) Endpoint() DaemonEndpoint { + network := r.Network + if network == "" { + network = "tcp" + } + return DaemonEndpoint{Network: network, Address: r.Addr} +} +``` + +- [ ] **Step 4: Update WriteRuntime signature** + +Change `WriteRuntime` to accept `DaemonEndpoint`: + +```go +func WriteRuntime(ep DaemonEndpoint, version string) error { + info := RuntimeInfo{ + PID: os.Getpid(), + Addr: ep.Address, + Port: ep.Port(), + Network: ep.Network, + Version: version, + } + // ... rest unchanged +} +``` + +- [ ] **Step 5: Update the WriteRuntime call in server.go** + +In `server.go:203`, change: + +```go +// Before: +if err := WriteRuntime(addr, port, version.Version); err != nil { + +// After: +if err := WriteRuntime(ep, version.Version); err != nil { +``` + +(The `ep` variable will be fully wired in Task 4. For now, construct it inline to keep the build green.) + +Temporary in `server.go:203`: + +```go +if err := WriteRuntime(DaemonEndpoint{Network: "tcp", Address: addr}, version.Version); err != nil { +``` + +- [ ] **Step 6: Fix any other callers of WriteRuntime in the codebase** + +Run: `nix develop -c go build ./...` +Expected: PASS (or fix compile errors from signature change) + +- [ ] **Step 7: Run tests** + +Run: `nix develop -c go test ./internal/daemon/ -run "TestRuntimeInfo" -v` +Expected: PASS + +- [ ] **Step 8: Commit** + +```bash +git add internal/daemon/runtime.go internal/daemon/runtime_test.go internal/daemon/server.go +git commit -m "Add Network field to RuntimeInfo, Endpoint() method, update WriteRuntime signature" +``` + +--- + +### Task 3: Update probe and kill functions to use DaemonEndpoint + +**Files:** +- Modify: `internal/daemon/runtime.go` +- Modify: `internal/daemon/runtime_test.go` + +- [ ] **Step 1: Change ProbeDaemon signature** + +```go +// Before: +func ProbeDaemon(addr string, timeout time.Duration) (*PingInfo, error) + +// After: +func ProbeDaemon(ep DaemonEndpoint, timeout time.Duration) (*PingInfo, error) { + if ep.Address == "" { + return nil, fmt.Errorf("empty daemon address") + } + + // TCP endpoints must be loopback (validated at parse time, but defense in depth) + if !ep.IsUnix() && !isLoopbackAddr(ep.Address) { + return nil, fmt.Errorf("non-loopback daemon address: %s", ep.Address) + } + + client := ep.HTTPClient(timeout) + baseURL := ep.BaseURL() + if info, shouldFallback, err := probeDaemonPing(client, baseURL); !shouldFallback { + return info, err + } + + return probeLegacyDaemonStatus(client, baseURL) +} +``` + +- [ ] **Step 2: Update probeDaemonPing and probeLegacyDaemonStatus to use baseURL** + +```go +// Before: +func probeDaemonPing(client *http.Client, addr string) (*PingInfo, bool, error) { + resp, err := client.Get(fmt.Sprintf("http://%s/api/ping", addr)) + +// After: +func probeDaemonPing(client *http.Client, baseURL string) (*PingInfo, bool, error) { + resp, err := client.Get(baseURL + "/api/ping") +``` + +Same pattern for `probeLegacyDaemonStatus`. + +- [ ] **Step 3: Change IsDaemonAlive signature** + +```go +// Before: +func IsDaemonAlive(addr string) bool + +// After: +func IsDaemonAlive(ep DaemonEndpoint) bool { + if ep.Address == "" { + return false + } + for attempt := range 2 { + if attempt > 0 { + time.Sleep(200 * time.Millisecond) + } + if _, err := ProbeDaemon(ep, 1*time.Second); err == nil { + return true + } + } + return false +} +``` + +- [ ] **Step 4: Update all callers of IsDaemonAlive and ProbeDaemon** + +In `runtime.go`: +- `GetAnyRunningDaemon()`: `IsDaemonAlive(info.Addr)` -> `IsDaemonAlive(info.Endpoint())` +- `KillDaemon()`: Use `info.Endpoint()` for both HTTP shutdown and alive checks +- `CleanupZombieDaemons()`: `IsDaemonAlive(info.Addr)` -> `IsDaemonAlive(info.Endpoint())`. For Unix socket endpoints, also add PID-based liveness check: if the PID from RuntimeInfo is dead, skip the HTTP probe and go straight to cleanup (removes socket file + runtime file). This avoids slow timeouts on stale Unix sockets where the process has crashed. + +```go +// In CleanupZombieDaemons, before the IsDaemonAlive call: +ep := info.Endpoint() +if ep.IsUnix() && info.PID > 0 && !isProcessAlive(info.PID) { + // Process is dead -- remove stale socket and runtime file directly + os.Remove(ep.Address) + if info.SourcePath != "" { + os.Remove(info.SourcePath) + } else { + RemoveRuntimeForPID(info.PID) + } + cleaned++ + continue +} +if IsDaemonAlive(ep) { + continue +} +``` + +In `server.go`: +- `Start()` line 141: `IsDaemonAlive(info.Addr)` -> `IsDaemonAlive(info.Endpoint())` + +- [ ] **Step 5: Update KillDaemon to use endpoint for HTTP shutdown** + +```go +func KillDaemon(info *RuntimeInfo) bool { + if info == nil { + return true + } + + removeRuntimeFile := func() { + if info.SourcePath != "" { + os.Remove(info.SourcePath) + } else if info.PID > 0 { + RemoveRuntimeForPID(info.PID) + } + // Clean up Unix socket file if applicable + ep := info.Endpoint() + if ep.IsUnix() { + os.Remove(ep.Address) + } + } + + ep := info.Endpoint() + + // Try graceful HTTP shutdown + if ep.Address != "" { + client := ep.HTTPClient(2 * time.Second) + resp, err := client.Post(ep.BaseURL()+"/api/shutdown", "application/json", nil) + if err == nil { + resp.Body.Close() + for range 10 { + time.Sleep(200 * time.Millisecond) + if !IsDaemonAlive(ep) { + removeRuntimeFile() + return true + } + } + } + } + + // ... rest unchanged (OS kill, PID check) +} +``` + +- [ ] **Step 6: Update waitForServerReady signature** + +```go +// Before: +func waitForServerReady(ctx context.Context, addr string, timeout time.Duration, serveErrCh <-chan error) (bool, bool, error) + +// After: +func waitForServerReady(ctx context.Context, ep DaemonEndpoint, timeout time.Duration, serveErrCh <-chan error) (bool, bool, error) +``` + +Update the `ProbeDaemon` call inside: +```go +// Before: +if _, err := ProbeDaemon(addr, 200*time.Millisecond); err == nil { +// After: +if _, err := ProbeDaemon(ep, 200*time.Millisecond); err == nil { +``` + +Update caller in `server.go` `Start()`: +```go +// Before: +ready, serveExited, err := waitForServerReady(ctx, addr, 2*time.Second, serveErrCh) +// After: +ready, serveExited, err := waitForServerReady(ctx, DaemonEndpoint{Network: "tcp", Address: addr}, 2*time.Second, serveErrCh) +``` + +(This temporary construction will be replaced in Task 4 when `Start()` gets the full endpoint flow.) + +- [ ] **Step 7: Build and run all daemon tests** + +Run: `nix develop -c go build ./... && nix develop -c go test ./internal/daemon/ -v -count=1` +Expected: PASS + +- [ ] **Step 8: Commit** + +```bash +git add internal/daemon/runtime.go internal/daemon/runtime_test.go internal/daemon/server.go internal/daemon/server_test.go +git commit -m "Update probe, kill, and alive-check functions to use DaemonEndpoint" +``` + +--- + +### Task 4: Server Start() with full endpoint flow + +**Files:** +- Modify: `internal/daemon/server.go` +- Modify: `internal/daemon/server_test.go` + +- [ ] **Step 1: Rewrite Start() to use ParseEndpoint** + +Replace the validation + port discovery + listener block in `Start()`: + +```go +func (s *Server) Start(ctx context.Context) error { + cfg := s.configWatcher.Config() + + ep, err := ParseEndpoint(cfg.ServerAddr) + if err != nil { + return fmt.Errorf("invalid server address: %w", err) + } + + // Clean up any zombie daemons first + if cleaned := CleanupZombieDaemons(); cleaned > 0 { + log.Printf("Cleaned up %d zombie daemon(s)", cleaned) + // ... activity log unchanged + } + + // Check if a responsive daemon is still running after cleanup + if info, err := GetAnyRunningDaemon(); err == nil && IsDaemonAlive(info.Endpoint()) { + return fmt.Errorf("daemon already running (pid %d on %s)", info.PID, info.Addr) + } + + // Reset stale jobs + if err := s.db.ResetStaleJobs(); err != nil { + log.Printf("Warning: failed to reset stale jobs: %v", err) + } + + // Start config watcher + if err := s.configWatcher.Start(ctx); err != nil { + log.Printf("Warning: failed to start config watcher: %v", err) + } + + // Prepare listener + if ep.IsUnix() { + // Create parent dir with owner-only permissions + dir := filepath.Dir(ep.Address) + if err := os.MkdirAll(dir, 0700); err != nil { + s.configWatcher.Stop() + return fmt.Errorf("create socket directory %s: %w", dir, err) + } + // Remove stale socket + os.Remove(ep.Address) + } else { + // TCP: find available port + // Use the parsed endpoint address (not cfg.ServerAddr which may have http:// prefix) + addr, _, err := FindAvailablePort(ep.Address) + if err != nil { + s.configWatcher.Stop() + return fmt.Errorf("find available port: %w", err) + } + ep = DaemonEndpoint{Network: "tcp", Address: addr} + } + + listener, err := ep.Listener() + if err != nil { + s.configWatcher.Stop() + return fmt.Errorf("listen on %s: %w", ep, err) + } + + // Update endpoint with actual bound address (port may differ for TCP) + if !ep.IsUnix() { + actualAddr := listener.Addr().String() + ep = DaemonEndpoint{Network: "tcp", Address: actualAddr} + } + + // Set socket permissions + if ep.IsUnix() { + if err := os.Chmod(ep.Address, 0600); err != nil { + listener.Close() + s.configWatcher.Stop() + return fmt.Errorf("chmod socket %s: %w", ep.Address, err) + } + } + + s.httpServer.Addr = ep.Address + s.endpoint = ep // Store for cleanup + + serveErrCh := make(chan error, 1) + log.Printf("Starting HTTP server on %s", ep) + go func() { + serveErrCh <- s.httpServer.Serve(listener) + }() + + s.workerPool.Start() + + ready, serveExited, err := waitForServerReady(ctx, ep, 2*time.Second, serveErrCh) + if err != nil { + _ = listener.Close() + s.configWatcher.Stop() + s.workerPool.Stop() + return err + } + if !ready { + if err := awaitServeExitOnUnreadyStartup(serveExited, serveErrCh); err != nil { + s.configWatcher.Stop() + s.workerPool.Stop() + return err + } + return nil + } + + if err := WriteRuntime(ep, version.Version); err != nil { + log.Printf("Warning: failed to write runtime info: %v", err) + } + + // ... rest unchanged (activity log, hook check, serveErrCh wait) +} +``` + +- [ ] **Step 2: Add `endpoint` field to Server struct** + +Find the Server struct definition and add: + +```go +endpoint DaemonEndpoint // Stored for cleanup on stop +``` + +- [ ] **Step 3: Add socket cleanup to server shutdown** + +Find the shutdown/stop method and add Unix socket removal: + +```go +// In the shutdown path: +if s.endpoint.IsUnix() { + os.Remove(s.endpoint.Address) +} +``` + +- [ ] **Step 4: Remove validateDaemonBindAddr calls** + +Delete or leave as unused (will be removed once all callers are gone). The validation is now inside `ParseEndpoint`. + +- [ ] **Step 5: Build and run tests** + +Run: `nix develop -c go build ./... && nix develop -c go test ./internal/daemon/ -v -count=1` +Expected: PASS + +- [ ] **Step 6: Run go vet and fmt** + +Run: `nix develop -c go fmt ./... && nix develop -c go vet ./...` + +- [ ] **Step 7: Commit** + +```bash +git add internal/daemon/server.go internal/daemon/runtime.go +git commit -m "Wire DaemonEndpoint through Server.Start(), add Unix socket setup and cleanup" +``` + +--- + +### Task 5: Update daemon client + +**Files:** +- Modify: `internal/daemon/client.go` + +- [ ] **Step 1: Change HTTPClient struct and NewHTTPClient** + +```go +type HTTPClient struct { + baseURL string + httpClient *http.Client + pollInterval time.Duration +} + +func NewHTTPClient(ep DaemonEndpoint) *HTTPClient { + return &HTTPClient{ + baseURL: ep.BaseURL(), + httpClient: ep.HTTPClient(10 * time.Second), + pollInterval: DefaultPollInterval, + } +} +``` + +- [ ] **Step 2: Rename c.addr to c.baseURL throughout client.go** + +Find-and-replace `c.addr` -> `c.baseURL` in all method bodies. This is a pure rename -- no logic changes. + +- [ ] **Step 3: Update NewHTTPClientFromRuntime** + +```go +func NewHTTPClientFromRuntime() (*HTTPClient, error) { + var lastErr error + for range 5 { + info, err := GetAnyRunningDaemon() + if err == nil { + return NewHTTPClient(info.Endpoint()), nil + } + lastErr = err + time.Sleep(100 * time.Millisecond) + } + return nil, fmt.Errorf("daemon not running: %w", lastErr) +} +``` + +- [ ] **Step 4: Build and test** + +Run: `nix develop -c go build ./... && nix develop -c go test ./internal/daemon/ -v -count=1` +Expected: PASS (or fix compile errors from callers in cmd/) + +- [ ] **Step 5: Commit** + +```bash +git add internal/daemon/client.go +git commit -m "Update daemon HTTPClient to use DaemonEndpoint, rename addr to baseURL" +``` + +--- + +### Task 6: Migrate all CLI command files to DaemonEndpoint + +All CLI changes are done in a single task to keep the build green. Removing `getDaemonAddr()` and adding `getDaemonEndpoint()` must happen atomically across all callers. + +**Files:** +- Modify: `cmd/roborev/main.go` +- Modify: `cmd/roborev/daemon_lifecycle.go` +- Modify: `cmd/roborev/daemon_client.go` +- Modify: `cmd/roborev/analyze.go`, `compact.go`, `review.go`, `run.go`, `fix.go`, `list.go`, `summary.go`, `status.go`, `show.go`, `wait.go`, `comment.go`, `sync.go`, `remap.go`, `postcommit.go`, `refine.go`, `job_helpers.go`, `stream.go` + +- [ ] **Step 1: Run grep to inventory all callsites** + +Run: `nix develop -c grep -rn 'getDaemonAddr\|serverAddr.*api\|http\.Client{Timeout\|http\.Post(' cmd/roborev/*.go | grep -v _test.go` + +This identifies every callsite that needs updating. + +- [ ] **Step 2: Update main.go global variable and flag** + +```go +var ( + serverAddr string // Raw flag value, parsed via ParseEndpoint + verbose bool +) + +// In main(): +rootCmd.PersistentFlags().StringVar(&serverAddr, "server", "", "daemon server address (e.g. 127.0.0.1:7373 or unix://)") +``` + +- [ ] **Step 3: Replace getDaemonAddr with getDaemonEndpoint and getDaemonHTTPClient** + +In `daemon_lifecycle.go`: + +```go +func getDaemonEndpoint() daemon.DaemonEndpoint { + if info, err := daemon.GetAnyRunningDaemon(); err == nil { + return info.Endpoint() + } + ep, err := daemon.ParseEndpoint(serverAddr) + if err != nil { + return daemon.DaemonEndpoint{Network: "tcp", Address: "127.0.0.1:7373"} + } + return ep +} + +func getDaemonHTTPClient(timeout time.Duration) *http.Client { + return getDaemonEndpoint().HTTPClient(timeout) +} +``` + +- [ ] **Step 4: Update ensureDaemon** + +Replace `serverAddr = fmt.Sprintf("http://%s", info.Addr)` references. Replace `probeDaemonServerURL` call with direct `ProbeDaemon(ep, timeout)`. + +```go +func ensureDaemon() error { + skipVersionCheck := os.Getenv("ROBOREV_SKIP_VERSION_CHECK") == "1" + + if info, err := getAnyRunningDaemon(); err == nil { + if !skipVersionCheck { + probe, err := daemon.ProbeDaemon(info.Endpoint(), 2*time.Second) + if err != nil { + if verbose { + fmt.Printf("Daemon probe failed, restarting...\n") + } + return restartDaemonForEnsure() + } + // ... version check unchanged + } + return nil + } + + // Try the configured address + ep := getDaemonEndpoint() + if probe, err := daemon.ProbeDaemon(ep, 2*time.Second); err == nil { + if !skipVersionCheck { + // ... version check unchanged + } + return nil + } + + return startDaemon() +} +``` + +- [ ] **Step 5: Update startDaemon** + +Remove `serverAddr = fmt.Sprintf("http://%s", info.Addr)`: + +```go +func startDaemon() error { + // ... exec unchanged ... + for range 30 { + time.Sleep(100 * time.Millisecond) + if _, err := daemon.GetAnyRunningDaemon(); err == nil { + return nil + } + } + return fmt.Errorf("daemon failed to start") +} +``` + +- [ ] **Step 6: Remove probeDaemonServerURL and getDaemonAddr** + +Delete both functions entirely. + +- [ ] **Step 7: Update daemon_lifecycle.go registerRepo** + +```go +func registerRepo(repoPath string) error { + body, err := json.Marshal(map[string]string{"repo_path": repoPath}) + if err != nil { + return err + } + client := getDaemonHTTPClient(5 * time.Second) + resp, err := client.Post(getDaemonEndpoint().BaseURL()+"/api/repos/register", "application/json", bytes.NewReader(body)) + // ... rest unchanged +} +``` + +- [ ] **Step 8: Update daemon_client.go** + +`waitForJob` and `showReview` take `serverAddr string` as a parameter. Change callers to pass `getDaemonEndpoint().BaseURL()`. Inside these functions, replace `&http.Client{Timeout: N}` with `getDaemonHTTPClient(N)`. All 5 locally-constructed clients and the bare `http.Post` call are replaced. + +- [ ] **Step 9: Apply mechanical replacements across all remaining CLI files** + +For each file (`analyze.go`, `compact.go`, `review.go`, `run.go`, `fix.go`, `list.go`, `summary.go`, `status.go`, `show.go`, `wait.go`, `comment.go`, `sync.go`, `remap.go`, `postcommit.go`, `refine.go`, `job_helpers.go`, `stream.go`): + +1. Replace `getDaemonAddr()` with `getDaemonEndpoint().BaseURL()` +2. Replace `&http.Client{Timeout: N}` with `getDaemonHTTPClient(N)` +3. Replace bare `http.Post(serverAddr+...)` with `getDaemonHTTPClient(N).Post(getDaemonEndpoint().BaseURL()+...)` (affects `review.go`, `run.go`, `postcommit.go`) +4. For `postcommit.go`: replace `var hookHTTPClient = &http.Client{Timeout: 3 * time.Second}` with `func getHookHTTPClient() *http.Client { return getDaemonHTTPClient(3 * time.Second) }` and update callers. Also replace the bare `serverAddr+"/api/enqueue"` on line 81 with `getDaemonEndpoint().BaseURL()+"/api/enqueue"`. +5. For `stream.go`: use `getDaemonEndpoint()` for both the URL and the HTTP client transport (streaming client needs `Timeout: 0`). + +- [ ] **Step 10: Build to verify all compile errors are resolved** + +Run: `nix develop -c go build ./...` +Expected: PASS + +- [ ] **Step 11: Run go fmt and go vet** + +Run: `nix develop -c go fmt ./... && nix develop -c go vet ./...` + +- [ ] **Step 12: Commit** + +```bash +git add cmd/roborev/*.go +git commit -m "Migrate all CLI command files to use DaemonEndpoint" +``` + +--- + +### Task 7: TUI changes + +**Files:** +- Modify: `cmd/roborev/tui_cmd.go` +- Modify: `cmd/roborev/tui/tui.go` +- Modify: `cmd/roborev/tui/api.go` +- Modify: `cmd/roborev/tui/fetch.go` +- Modify: `cmd/roborev/tui/handlers_msg.go` + +- [ ] **Step 1: Update tui/tui.go model struct and newModel** + +```go +// In model struct: +endpoint daemon.DaemonEndpoint // replaces serverAddr string + +// newModel signature: +func newModel(ep daemon.DaemonEndpoint, opts ...option) model { + // ... + m := model{ + endpoint: ep, + client: ep.HTTPClient(10 * time.Second), + // ... + } +} +``` + +- [ ] **Step 2: Update tui/api.go** + +```go +func (m model) getJSON(path string, out any) error { + url := m.endpoint.BaseURL() + path + // ... rest unchanged +} + +func (m model) postJSON(path string, in any, out any) error { + // ... + resp, err := m.client.Post(m.endpoint.BaseURL()+path, "application/json", bytes.NewReader(body)) + // ... rest unchanged +} +``` + +- [ ] **Step 3: Update tui/fetch.go tryReconnect** + +Find `tryReconnect` and update the reconnect message to carry a `DaemonEndpoint`: + +```go +// In reconnectMsg struct: +type reconnectMsg struct { + endpoint daemon.DaemonEndpoint +} + +// In tryReconnect: +func tryReconnect() tea.Msg { + info, err := daemon.GetAnyRunningDaemon() + if err != nil { + return reconnectMsg{} + } + return reconnectMsg{endpoint: info.Endpoint()} +} +``` + +- [ ] **Step 4: Update tui/handlers_msg.go handleReconnectMsg** + +```go +// Update m.endpoint and m.client on successful reconnect: +m.endpoint = msg.endpoint +m.client = msg.endpoint.HTTPClient(10 * time.Second) +``` + +- [ ] **Step 5: Update tui/fetch.go for all m.serverAddr references** + +Replace all `m.serverAddr` with `m.endpoint.BaseURL()` in URL constructions. **Important:** Several functions (`fetchRepoNames`, `fetchRepos`, `backfillBranches`, `fetchBranchesForRepo`) capture `serverAddr` into a local variable before a closure (e.g., `serverAddr := m.serverAddr`). These closures must be updated to capture `m.endpoint.BaseURL()` into the local variable instead. + +- [ ] **Step 6: Update tui_cmd.go and tui.Config** + +The `tui.Config` struct has a `ServerAddr string` field passed to `tui.Run()` which calls `newModel`. Update the flow: + +In `tui_cmd.go`: Parse `--addr` via `ParseEndpoint` before calling `tui.Run()`. Change `Config.ServerAddr string` to `Config.Endpoint daemon.DaemonEndpoint`. Remove the old `http://` prefix logic. + +```go +// In tuiCmd RunE: +ep, err := daemon.ParseEndpoint(addr) +if err != nil { + return fmt.Errorf("invalid address: %w", err) +} +cfg := tui.Config{ + Endpoint: ep, + // ... +} +return tui.Run(cfg) +``` + +In `tui/tui.go` `Run()`: pass `cfg.Endpoint` to `newModel(cfg.Endpoint, ...)`. + +- [ ] **Step 7: Build and test** + +Run: `nix develop -c go build ./... && nix develop -c go test ./cmd/roborev/tui/ -v -count=1` +Expected: PASS + +- [ ] **Step 8: Commit** + +```bash +git add cmd/roborev/tui_cmd.go cmd/roborev/tui/*.go +git commit -m "Update TUI to use DaemonEndpoint for transport-agnostic daemon communication" +``` + +--- + +### Task 8: Update tests + +**Files:** +- Modify: `internal/daemon/runtime_test.go` +- Modify: `internal/daemon/server_test.go` +- Modify: `cmd/roborev/helpers_test.go` +- Modify: `cmd/roborev/main_test_helpers_test.go` +- Modify: Various `cmd/roborev/*_test.go` + +- [ ] **Step 1: Update patchServerAddr in helpers_test.go** + +The test helper that patches `serverAddr` needs to work with the new system. Since the CLI files now call `getDaemonEndpoint()` which falls back to `ParseEndpoint(serverAddr)`, patching `serverAddr` with a test server URL (`http://127.0.0.1:PORT`) should still work -- `ParseEndpoint` strips the `http://` prefix. + +Verify `patchServerAddr` still works or update it: + +```go +func patchServerAddr(t *testing.T, newURL string) { + old := serverAddr + serverAddr = newURL + t.Cleanup(func() { serverAddr = old }) +} +``` + +This should work as-is since `ParseEndpoint("http://127.0.0.1:PORT")` strips the prefix. + +- [ ] **Step 2: Update runtime_test.go for Network field** + +Add `Network` to any `RuntimeInfo` or `runtimeData` literals in tests. Verify existing test assertions about JSON format include the new field. + +- [ ] **Step 3: Update server_test.go for waitForServerReady signature** + +Find any direct calls to `waitForServerReady` in tests and pass a `DaemonEndpoint` instead of a string. + +- [ ] **Step 4: Update main_test_helpers_test.go** + +Add `Network` field to `daemon.RuntimeInfo{Addr: mockAddr, ...}` literals. + +- [ ] **Step 5: Build and run full test suite** + +Run: `nix develop -c go build ./... && nix develop -c go test ./... -count=1` +Expected: PASS + +- [ ] **Step 6: Run go fmt and go vet** + +Run: `nix develop -c go fmt ./... && nix develop -c go vet ./...` + +- [ ] **Step 7: Commit** + +```bash +git add -A +git commit -m "Update all tests for DaemonEndpoint migration" +``` + +--- + +### Task 9: Clean up dead code + +**Files:** +- Modify: `internal/daemon/runtime.go` +- Modify: `internal/daemon/server.go` + +- [ ] **Step 1: Remove validateDaemonBindAddr and parseDaemonBindAddr if unused** + +Check if anything still calls them: + +Run: `nix develop -c grep -rn 'validateDaemonBindAddr\|parseDaemonBindAddr' internal/ cmd/` + +If no callers remain, delete both functions. + +- [ ] **Step 2: Check for unused getDaemonAddr** + +Run: `nix develop -c grep -rn 'getDaemonAddr' cmd/` + +If no callers remain, delete. + +- [ ] **Step 3: Check for unused probeDaemonServerURL** + +Should already be deleted in Task 6, verify. + +- [ ] **Step 4: Build, vet, fmt** + +Run: `nix develop -c go build ./... && nix develop -c go vet ./... && nix develop -c go fmt ./...` +Expected: PASS + +- [ ] **Step 5: Run full test suite** + +Run: `nix develop -c go test ./... -count=1` +Expected: PASS + +- [ ] **Step 6: Commit** + +```bash +git add -A +git commit -m "Remove dead code: validateDaemonBindAddr, parseDaemonBindAddr, getDaemonAddr" +``` + +Note: `isLoopbackAddr` is still used by `ProbeDaemon` for TCP defense-in-depth and by `ParseEndpoint` via `parseTCPEndpoint`. Keep it. diff --git a/internal/daemon/client.go b/internal/daemon/client.go index 57f04c456..bff070190 100644 --- a/internal/daemon/client.go +++ b/internal/daemon/client.go @@ -54,16 +54,16 @@ var DefaultPollInterval = 2 * time.Second // HTTPClient is the default HTTP-based implementation of Client type HTTPClient struct { - addr string + baseURL string httpClient *http.Client pollInterval time.Duration } // NewHTTPClient creates a new HTTP daemon client -func NewHTTPClient(addr string) *HTTPClient { +func NewHTTPClient(ep DaemonEndpoint) *HTTPClient { return &HTTPClient{ - addr: addr, - httpClient: &http.Client{Timeout: 10 * time.Second}, + baseURL: ep.BaseURL(), + httpClient: ep.HTTPClient(10 * time.Second), pollInterval: DefaultPollInterval, } } @@ -74,7 +74,7 @@ func NewHTTPClientFromRuntime() (*HTTPClient, error) { for range 5 { info, err := GetAnyRunningDaemon() if err == nil { - return NewHTTPClient(fmt.Sprintf("http://%s", info.Addr)), nil + return NewHTTPClient(info.Endpoint()), nil } lastErr = err time.Sleep(100 * time.Millisecond) @@ -88,7 +88,7 @@ func (c *HTTPClient) SetPollInterval(interval time.Duration) { } func (c *HTTPClient) GetReviewBySHA(sha string) (*storage.Review, error) { - resp, err := c.httpClient.Get(fmt.Sprintf("%s/api/review?sha=%s", c.addr, sha)) + resp, err := c.httpClient.Get(fmt.Sprintf("%s/api/review?sha=%s", c.baseURL, sha)) if err != nil { return nil, err } @@ -111,7 +111,7 @@ func (c *HTTPClient) GetReviewBySHA(sha string) (*storage.Review, error) { } func (c *HTTPClient) GetReviewByJobID(jobID int64) (*storage.Review, error) { - resp, err := c.httpClient.Get(fmt.Sprintf("%s/api/review?job_id=%d", c.addr, jobID)) + resp, err := c.httpClient.Get(fmt.Sprintf("%s/api/review?job_id=%d", c.baseURL, jobID)) if err != nil { return nil, err } @@ -139,7 +139,7 @@ func (c *HTTPClient) MarkReviewClosed(jobID int64) error { "closed": true, }) - resp, err := c.httpClient.Post(c.addr+"/api/review/close", "application/json", bytes.NewReader(reqBody)) + resp, err := c.httpClient.Post(c.baseURL+"/api/review/close", "application/json", bytes.NewReader(reqBody)) if err != nil { return err } @@ -160,7 +160,7 @@ func (c *HTTPClient) AddComment(jobID int64, commenter, comment string) error { "comment": comment, }) - resp, err := c.httpClient.Post(c.addr+"/api/comment", "application/json", bytes.NewReader(reqBody)) + resp, err := c.httpClient.Post(c.baseURL+"/api/comment", "application/json", bytes.NewReader(reqBody)) if err != nil { return err } @@ -181,7 +181,7 @@ func (c *HTTPClient) EnqueueReview(repoPath, gitRef, agentName string) (int64, e Agent: agentName, }) - resp, err := c.httpClient.Post(c.addr+"/api/enqueue", "application/json", bytes.NewReader(reqBody)) + resp, err := c.httpClient.Post(c.baseURL+"/api/enqueue", "application/json", bytes.NewReader(reqBody)) if err != nil { return 0, err } @@ -203,7 +203,7 @@ func (c *HTTPClient) EnqueueReview(repoPath, gitRef, agentName string) (int64, e func (c *HTTPClient) WaitForReview(jobID int64) (*storage.Review, error) { missingReviewAttempts := 0 for { - resp, err := c.httpClient.Get(fmt.Sprintf("%s/api/jobs?id=%d", c.addr, jobID)) + resp, err := c.httpClient.Get(fmt.Sprintf("%s/api/jobs?id=%d", c.baseURL, jobID)) if err != nil { return nil, fmt.Errorf("polling job %d: %w", jobID, err) } @@ -267,7 +267,7 @@ func (c *HTTPClient) FindJobForCommit(repoPath, sha string) (*storage.ReviewJob, // Query by git_ref and repo to avoid matching jobs from different repos queryURL := fmt.Sprintf("%s/api/jobs?git_ref=%s&repo=%s&limit=1", - c.addr, url.QueryEscape(sha), url.QueryEscape(normalizedRepo)) + c.baseURL, url.QueryEscape(sha), url.QueryEscape(normalizedRepo)) resp, err := c.httpClient.Get(queryURL) if err != nil { @@ -293,7 +293,7 @@ func (c *HTTPClient) FindJobForCommit(repoPath, sha string) (*storage.ReviewJob, // Fallback: if repo filter yielded no results, try git_ref only. // This handles worktrees where daemon stores the main repo root path // but the caller uses the worktree path. - fallbackURL := fmt.Sprintf("%s/api/jobs?git_ref=%s&limit=100", c.addr, url.QueryEscape(sha)) + fallbackURL := fmt.Sprintf("%s/api/jobs?git_ref=%s&limit=100", c.baseURL, url.QueryEscape(sha)) fallbackResp, err := c.httpClient.Get(fallbackURL) if err != nil { return nil, fmt.Errorf("fallback query for %s: %w", sha, err) @@ -347,7 +347,7 @@ func (c *HTTPClient) FindPendingJobForRef(repoPath, gitRef string) (*storage.Rev // Query for queued first, then running - this avoids pagination issues. for _, status := range []string{"queued", "running"} { queryURL := fmt.Sprintf("%s/api/jobs?git_ref=%s&repo=%s&status=%s&limit=1", - c.addr, url.QueryEscape(gitRef), url.QueryEscape(normalizedRepo), status) + c.baseURL, url.QueryEscape(gitRef), url.QueryEscape(normalizedRepo), status) resp, err := c.httpClient.Get(queryURL) if err != nil { @@ -377,7 +377,7 @@ func (c *HTTPClient) FindPendingJobForRef(repoPath, gitRef string) (*storage.Rev } func (c *HTTPClient) GetCommentsForJob(jobID int64) ([]storage.Response, error) { - resp, err := c.httpClient.Get(fmt.Sprintf("%s/api/comments?job_id=%d", c.addr, jobID)) + resp, err := c.httpClient.Get(fmt.Sprintf("%s/api/comments?job_id=%d", c.baseURL, jobID)) if err != nil { return nil, err } @@ -412,7 +412,7 @@ func (c *HTTPClient) Remap(req RemapRequest) (*RemapResult, error) { } resp, err := c.httpClient.Post( - c.addr+"/api/remap", "application/json", + c.baseURL+"/api/remap", "application/json", bytes.NewReader(reqBody), ) if err != nil { diff --git a/internal/daemon/client_test.go b/internal/daemon/client_test.go index c29564e18..d069fb8c1 100644 --- a/internal/daemon/client_test.go +++ b/internal/daemon/client_test.go @@ -47,7 +47,9 @@ func mockAPI(t *testing.T, handler http.HandlerFunc) *HTTPClient { t.Helper() s := httptest.NewServer(handler) t.Cleanup(s.Close) - return NewHTTPClient(s.URL) + ep, err := ParseEndpoint(s.URL) + require.NoError(t, err, "ParseEndpoint") + return NewHTTPClient(ep) } func assertRequest(t *testing.T, r *http.Request, method, path string) { diff --git a/internal/daemon/endpoint.go b/internal/daemon/endpoint.go new file mode 100644 index 000000000..76253cf6e --- /dev/null +++ b/internal/daemon/endpoint.go @@ -0,0 +1,139 @@ +package daemon + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +// MaxUnixPathLen is the platform socket path length limit. +// macOS/BSD: 104, Linux: 108. +var MaxUnixPathLen = func() int { + if runtime.GOOS == "darwin" { + return 104 + } + return 108 +}() + +// DaemonEndpoint encapsulates the transport type and address for the daemon. +type DaemonEndpoint struct { + Network string // "tcp" or "unix" + Address string // "127.0.0.1:7373" or "/tmp/roborev-1000/daemon.sock" +} + +// ParseEndpoint parses a server_addr config value into a DaemonEndpoint. +func ParseEndpoint(serverAddr string) (DaemonEndpoint, error) { + if serverAddr == "" { + return DaemonEndpoint{Network: "tcp", Address: "127.0.0.1:7373"}, nil + } + + if after, ok := strings.CutPrefix(serverAddr, "http://"); ok { + return parseTCPEndpoint(after) + } + + if strings.HasPrefix(serverAddr, "unix://") { + return parseUnixEndpoint(serverAddr) + } + + return parseTCPEndpoint(serverAddr) +} + +func parseTCPEndpoint(addr string) (DaemonEndpoint, error) { + if !isLoopbackAddr(addr) { + return DaemonEndpoint{}, fmt.Errorf( + "daemon address %q must use a loopback host (127.0.0.1, localhost, or [::1])", addr) + } + return DaemonEndpoint{Network: "tcp", Address: addr}, nil +} + +func parseUnixEndpoint(raw string) (DaemonEndpoint, error) { + path := strings.TrimPrefix(raw, "unix://") + + if path == "" { + path = DefaultSocketPath() + // Fall through to validate the auto-generated path too + } + + if !filepath.IsAbs(path) { + return DaemonEndpoint{}, fmt.Errorf( + "unix socket path %q must be absolute", path) + } + + if strings.ContainsRune(path, 0) { + return DaemonEndpoint{}, fmt.Errorf( + "unix socket path contains null byte") + } + + if len(path) >= MaxUnixPathLen { + return DaemonEndpoint{}, fmt.Errorf( + "unix socket path %q (%d bytes) exceeds platform limit of %d bytes", + path, len(path), MaxUnixPathLen) + } + + return DaemonEndpoint{Network: "unix", Address: path}, nil +} + +// DefaultSocketPath returns the auto-generated socket path under os.TempDir(). +func DefaultSocketPath() string { + return filepath.Join(os.TempDir(), fmt.Sprintf("roborev-%d", os.Getuid()), "daemon.sock") +} + +// IsUnix returns true if this endpoint uses a Unix domain socket. +func (e DaemonEndpoint) IsUnix() bool { + return e.Network == "unix" +} + +// BaseURL returns the HTTP base URL for constructing API requests. +func (e DaemonEndpoint) BaseURL() string { + if e.IsUnix() { + return "http://localhost" + } + return "http://" + e.Address +} + +// HTTPClient returns an http.Client configured for this endpoint's transport. +func (e DaemonEndpoint) HTTPClient(timeout time.Duration) *http.Client { + if e.IsUnix() { + return &http.Client{ + Timeout: timeout, + Transport: &http.Transport{ + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, "unix", e.Address) + }, + DisableKeepAlives: true, + Proxy: nil, // Unix sockets are local; never proxy + }, + } + } + return &http.Client{Timeout: timeout} +} + +// Listener creates a net.Listener bound to this endpoint. +func (e DaemonEndpoint) Listener() (net.Listener, error) { + return net.Listen(e.Network, e.Address) +} + +// String returns a human-readable representation for logging. +func (e DaemonEndpoint) String() string { + return e.Network + ":" + e.Address +} + +// Port returns the TCP port, or 0 for Unix sockets. +func (e DaemonEndpoint) Port() int { + if e.IsUnix() { + return 0 + } + _, portStr, err := net.SplitHostPort(e.Address) + if err != nil { + return 0 + } + port := 0 + _, _ = fmt.Sscanf(portStr, "%d", &port) + return port +} diff --git a/internal/daemon/endpoint_test.go b/internal/daemon/endpoint_test.go new file mode 100644 index 000000000..82c2b3d12 --- /dev/null +++ b/internal/daemon/endpoint_test.go @@ -0,0 +1,180 @@ +package daemon + +import ( + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseEndpoint(t *testing.T) { + tests := []struct { + name string + input string + network string + wantErr string + }{ + {"empty defaults to TCP", "", "tcp", ""}, + {"bare host:port", "127.0.0.1:7373", "tcp", ""}, + {"ipv6 loopback", "[::1]:7373", "tcp", ""}, + {"localhost", "localhost:7373", "tcp", ""}, + {"http prefix stripped", "http://127.0.0.1:7373", "tcp", ""}, + {"unix auto", "unix://", "unix", ""}, // skipped on Windows (no os.Getuid) + {"unix explicit path", "unix:///tmp/test.sock", "unix", ""}, // skipped on Windows (not absolute) + {"non-loopback rejected", "192.168.1.1:7373", "", "loopback"}, + {"relative unix rejected", "unix://relative.sock", "", "absolute"}, + {"http non-loopback rejected", "http://192.168.1.1:7373", "", "loopback"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if runtime.GOOS == "windows" && strings.HasPrefix(tt.input, "unix://") { + t.Skip("Unix sockets not supported on Windows") + } + ep, err := ParseEndpoint(tt.input) + if tt.wantErr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.wantErr) + return + } + require.NoError(t, err) + assert.Equal(t, tt.network, ep.Network) + if tt.network == "tcp" { + assert.NotEmpty(t, ep.Address) + } + }) + } +} + +func TestParseEndpoint_UnixPathTooLong(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Unix sockets not supported on Windows") + } + long := "unix:///" + strings.Repeat("a", MaxUnixPathLen) + _, err := ParseEndpoint(long) + require.Error(t, err) + assert.Contains(t, err.Error(), "exceeds") +} + +func TestParseEndpoint_UnixNullByte(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Unix sockets not supported on Windows") + } + _, err := ParseEndpoint("unix:///tmp/bad\x00.sock") + require.Error(t, err) +} + +func TestDefaultSocketPath(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Unix sockets not supported on Windows") + } + path := DefaultSocketPath() + assert.Less(t, len(path), MaxUnixPathLen, + "default socket path %q (%d bytes) exceeds limit %d", path, len(path), MaxUnixPathLen) + assert.Contains(t, path, "roborev-") + assert.True(t, strings.HasSuffix(path, "daemon.sock")) +} + +func TestDaemonEndpoint_BaseURL(t *testing.T) { + assert := assert.New(t) + + tcp := DaemonEndpoint{Network: "tcp", Address: "127.0.0.1:7373"} + assert.Equal("http://127.0.0.1:7373", tcp.BaseURL()) + + unix := DaemonEndpoint{Network: "unix", Address: "/tmp/test.sock"} + assert.Equal("http://localhost", unix.BaseURL()) +} + +func TestDaemonEndpoint_String(t *testing.T) { + assert := assert.New(t) + + tcp := DaemonEndpoint{Network: "tcp", Address: "127.0.0.1:7373"} + assert.Equal("tcp:127.0.0.1:7373", tcp.String()) + + unix := DaemonEndpoint{Network: "unix", Address: "/tmp/test.sock"} + assert.Equal("unix:/tmp/test.sock", unix.String()) +} + +func TestDaemonEndpoint_Listener_TCP(t *testing.T) { + ep := DaemonEndpoint{Network: "tcp", Address: "127.0.0.1:0"} + ln, err := ep.Listener() + require.NoError(t, err) + defer ln.Close() + assert.Contains(t, ln.Addr().String(), "127.0.0.1:") +} + +func TestDaemonEndpoint_Listener_Unix(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Unix sockets not supported on Windows") + } + sockPath := filepath.Join("/tmp", fmt.Sprintf("roborev-test-%d.sock", os.Getpid())) + t.Cleanup(func() { os.Remove(sockPath) }) + + ep := DaemonEndpoint{Network: "unix", Address: sockPath} + ln, err := ep.Listener() + require.NoError(t, err) + defer ln.Close() + assert.Equal(t, sockPath, ln.Addr().String()) +} + +func TestDaemonEndpoint_HTTPClient_UnixRoundTrip(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Unix sockets not supported on Windows") + } + sockPath := filepath.Join("/tmp", fmt.Sprintf("roborev-test-%d.sock", os.Getpid())) + t.Cleanup(func() { os.Remove(sockPath) }) + + ln, err := net.Listen("unix", sockPath) + require.NoError(t, err) + defer ln.Close() + + srv := &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("ok")) + })} + go srv.Serve(ln) + defer srv.Close() + + ep := DaemonEndpoint{Network: "unix", Address: sockPath} + client := ep.HTTPClient(2 * time.Second) + resp, err := client.Get(ep.BaseURL() + "/test") + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, http.StatusOK, resp.StatusCode) +} + +func TestDaemonEndpoint_HTTPClient_UnixIgnoresProxy(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Unix sockets not supported on Windows") + } + sockPath := filepath.Join("/tmp", fmt.Sprintf("roborev-test-%d.sock", os.Getpid())) + t.Cleanup(func() { os.Remove(sockPath) }) + + ln, err := net.Listen("unix", sockPath) + require.NoError(t, err) + defer ln.Close() + + srv := &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("ok")) + })} + go srv.Serve(ln) + defer srv.Close() + + // Set proxy env vars that would break the request if honored + t.Setenv("HTTP_PROXY", "http://nonexistent-proxy.invalid:9999") + t.Setenv("HTTPS_PROXY", "http://nonexistent-proxy.invalid:9999") + + ep := DaemonEndpoint{Network: "unix", Address: sockPath} + client := ep.HTTPClient(2 * time.Second) + resp, err := client.Get(ep.BaseURL() + "/test") + require.NoError(t, err, "Unix socket client should bypass HTTP_PROXY") + defer resp.Body.Close() + assert.Equal(t, http.StatusOK, resp.StatusCode) +} diff --git a/internal/daemon/kill_unix.go b/internal/daemon/kill_unix.go index d44978dbd..6ad4c07ab 100644 --- a/internal/daemon/kill_unix.go +++ b/internal/daemon/kill_unix.go @@ -122,6 +122,14 @@ func looksLikeFlagValue(token string) bool { return false } +// isProcessAlive checks whether a process with the given PID exists. +// It uses signal 0 which doesn't actually send a signal but checks for existence. +func isProcessAlive(pid int) bool { + process, _ := os.FindProcess(pid) + err := process.Signal(syscall.Signal(0)) + return err == nil || errors.Is(err, syscall.EPERM) +} + // killProcess kills a process by PID on Unix systems. // Returns true only if the process is confirmed dead. // Verifies the process is a roborev daemon before killing to prevent diff --git a/internal/daemon/kill_windows.go b/internal/daemon/kill_windows.go index a0293cacc..184d290dd 100644 --- a/internal/daemon/kill_windows.go +++ b/internal/daemon/kill_windows.go @@ -171,6 +171,11 @@ func looksLikeFlagValue(token string) bool { return false } +// isProcessAlive checks whether a process with the given PID exists. +func isProcessAlive(pid int) bool { + return processExists(pid) +} + // killProcess kills a process by PID on Windows. // Returns true only if the process is confirmed dead. // Verifies the process is a roborev daemon before killing to prevent diff --git a/internal/daemon/runtime.go b/internal/daemon/runtime.go index 7356b3f1e..c814548ed 100644 --- a/internal/daemon/runtime.go +++ b/internal/daemon/runtime.go @@ -22,10 +22,21 @@ type RuntimeInfo struct { PID int `json:"pid"` Addr string `json:"addr"` Port int `json:"port"` + Network string `json:"network"` Version string `json:"version"` SourcePath string `json:"-"` // Path to the runtime file (not serialized, set by ListAllRuntimes) } +// Endpoint returns a DaemonEndpoint for this runtime. An empty Network defaults to "tcp" +// for backwards compatibility with old runtime files that predate the Network field. +func (r RuntimeInfo) Endpoint() DaemonEndpoint { + network := r.Network + if network == "" { + network = "tcp" + } + return DaemonEndpoint{Network: network, Address: r.Addr} +} + // PingInfo is the minimal daemon identity payload used for liveness probes. type PingInfo struct { Service string `json:"service"` @@ -50,11 +61,12 @@ func LegacyRuntimePath() string { // WriteRuntime saves the daemon runtime info atomically. // Uses write-to-temp-then-rename to prevent readers from seeing partial writes. -func WriteRuntime(addr string, port int, version string) error { +func WriteRuntime(ep DaemonEndpoint, version string) error { info := RuntimeInfo{ PID: os.Getpid(), - Addr: addr, - Port: port, + Addr: ep.Address, + Port: ep.Port(), + Network: ep.Network, Version: version, } @@ -209,7 +221,7 @@ func GetAnyRunningDaemon() (*RuntimeInfo, error) { // Only return a daemon that's actually responding for _, info := range runtimes { - if IsDaemonAlive(info.Addr) { + if IsDaemonAlive(info.Endpoint()) { return info, nil } } @@ -217,32 +229,30 @@ func GetAnyRunningDaemon() (*RuntimeInfo, error) { return nil, os.ErrNotExist } -// ProbeDaemon validates that a loopback address is serving the roborev daemon. +// ProbeDaemon validates that a daemon endpoint is serving the roborev daemon. // It prefers the lightweight /api/ping endpoint and falls back to /api/status // for older daemon versions that do not implement /api/ping yet. -func ProbeDaemon(addr string, timeout time.Duration) (*PingInfo, error) { - if addr == "" { +func ProbeDaemon(ep DaemonEndpoint, timeout time.Duration) (*PingInfo, error) { + if ep.Address == "" { return nil, fmt.Errorf("empty daemon address") } - - if !isLoopbackAddr(addr) { - return nil, fmt.Errorf("non-loopback daemon address: %s", addr) + if !ep.IsUnix() && !isLoopbackAddr(ep.Address) { + return nil, fmt.Errorf("non-loopback daemon address: %s", ep.Address) } - - client := &http.Client{Timeout: timeout} - if info, shouldFallback, err := probeDaemonPing(client, addr); !shouldFallback { + client := ep.HTTPClient(timeout) + baseURL := ep.BaseURL() + if info, shouldFallback, err := probeDaemonPing(client, baseURL); !shouldFallback { return info, err } - - return probeLegacyDaemonStatus(client, addr) + return probeLegacyDaemonStatus(client, baseURL) } -// IsDaemonAlive checks if a daemon at the given address is actually responding. +// IsDaemonAlive checks if a daemon at the given endpoint is actually responding. // This is more reliable than checking PID and works cross-platform. -// Only allows loopback addresses to prevent SSRF via malicious runtime files. +// Only allows loopback addresses (for TCP) to prevent SSRF via malicious runtime files. // Uses retry logic to avoid misclassifying a slow or transiently failing daemon. -func IsDaemonAlive(addr string) bool { - if addr == "" { +func IsDaemonAlive(ep DaemonEndpoint) bool { + if ep.Address == "" { return false } @@ -251,15 +261,15 @@ func IsDaemonAlive(addr string) bool { if attempt > 0 { time.Sleep(200 * time.Millisecond) } - if _, err := ProbeDaemon(addr, 1*time.Second); err == nil { + if _, err := ProbeDaemon(ep, 1*time.Second); err == nil { return true } } return false } -func probeDaemonPing(client *http.Client, addr string) (*PingInfo, bool, error) { - resp, err := client.Get(fmt.Sprintf("http://%s/api/ping", addr)) +func probeDaemonPing(client *http.Client, baseURL string) (*PingInfo, bool, error) { + resp, err := client.Get(baseURL + "/api/ping") if err != nil { return nil, false, err } @@ -282,8 +292,8 @@ func probeDaemonPing(client *http.Client, addr string) (*PingInfo, bool, error) } } -func probeLegacyDaemonStatus(client *http.Client, addr string) (*PingInfo, error) { - resp, err := client.Get(fmt.Sprintf("http://%s/api/status", addr)) +func probeLegacyDaemonStatus(client *http.Client, baseURL string) (*PingInfo, error) { + resp, err := client.Get(baseURL + "/api/status") if err != nil { return nil, err } @@ -315,31 +325,6 @@ func probeLegacyDaemonStatus(client *http.Client, addr string) (*PingInfo, error }, nil } -func validateDaemonBindAddr(addr string) error { - if addr == "" { - return nil - } - - host, _, err := parseDaemonBindAddr(addr) - if err != nil { - return err - } - if host == "" { - return fmt.Errorf( - "daemon server address %q must use an explicit loopback host (127.0.0.1, localhost, or [::1])", - addr, - ) - } - if !isLoopbackAddr(addr) { - return fmt.Errorf( - "daemon server address %q must use a loopback host (127.0.0.1, localhost, or [::1])", - addr, - ) - } - - return nil -} - func parseDaemonBindAddr(addr string) (string, int, error) { if addr == "" { return "127.0.0.1", 7373, nil @@ -396,8 +381,14 @@ func KillDaemon(info *RuntimeInfo) bool { return true } - // Helper to remove the runtime file using SourcePath if available, otherwise by PID + ep := info.Endpoint() + + // Helper to remove the runtime file using SourcePath if available, otherwise by PID. + // Also cleans up Unix domain sockets. removeRuntimeFile := func() { + if ep.IsUnix() { + os.Remove(ep.Address) + } if info.SourcePath != "" { os.Remove(info.SourcePath) } else if info.PID > 0 { @@ -405,16 +396,16 @@ func KillDaemon(info *RuntimeInfo) bool { } } - // First try graceful HTTP shutdown (only for loopback addresses) - if info.Addr != "" && isLoopbackAddr(info.Addr) { - client := &http.Client{Timeout: 2 * time.Second} - resp, err := client.Post(fmt.Sprintf("http://%s/api/shutdown", info.Addr), "application/json", nil) + // First try graceful HTTP shutdown + if ep.Address != "" { + client := ep.HTTPClient(2 * time.Second) + resp, err := client.Post(ep.BaseURL()+"/api/shutdown", "application/json", nil) if err == nil { resp.Body.Close() // Wait for graceful shutdown for range 10 { time.Sleep(200 * time.Millisecond) - if !IsDaemonAlive(info.Addr) { + if !IsDaemonAlive(ep) { removeRuntimeFile() return true } @@ -434,7 +425,7 @@ func KillDaemon(info *RuntimeInfo) bool { } // No valid PID, just check if it's still alive - if info.Addr != "" && !IsDaemonAlive(info.Addr) { + if ep.Address != "" && !IsDaemonAlive(ep) { removeRuntimeFile() return true } @@ -452,8 +443,23 @@ func CleanupZombieDaemons() int { cleaned := 0 for _, info := range runtimes { + ep := info.Endpoint() + + // For Unix sockets, check PID liveness first to avoid slow HTTP probes + // against sockets whose owner process is already dead. + if ep.IsUnix() && info.PID > 0 && !isProcessAlive(info.PID) { + os.Remove(ep.Address) + if info.SourcePath != "" { + os.Remove(info.SourcePath) + } else { + RemoveRuntimeForPID(info.PID) + } + cleaned++ + continue + } + // Skip responsive daemons - if IsDaemonAlive(info.Addr) { + if IsDaemonAlive(ep) { continue } @@ -471,7 +477,7 @@ func CleanupZombieDaemons() int { if data, err := os.ReadFile(legacyPath); err == nil { var info RuntimeInfo if json.Unmarshal(data, &info) == nil { - if !IsDaemonAlive(info.Addr) { + if !IsDaemonAlive(info.Endpoint()) { // Legacy file points to dead daemon, remove it os.Remove(legacyPath) } diff --git a/internal/daemon/runtime_test.go b/internal/daemon/runtime_test.go index 579fb448a..0e52ba605 100644 --- a/internal/daemon/runtime_test.go +++ b/internal/daemon/runtime_test.go @@ -162,7 +162,7 @@ func TestRuntimeInfoReadWrite(t *testing.T) { t.Run("WriteAndRead", func(t *testing.T) { // Write runtime info - err := WriteRuntime(defaultTestAddr, defaultTestPort, "test-version") + err := WriteRuntime(DaemonEndpoint{Network: "tcp", Address: defaultTestAddr}, "test-version") if err != nil { require.Condition(t, func() bool { return false @@ -401,7 +401,7 @@ func TestProbeDaemonPrefersPing(t *testing.T) { }) }) - info, err := ProbeDaemon(addr, time.Second) + info, err := ProbeDaemon(DaemonEndpoint{Network: "tcp", Address: addr}, time.Second) if err != nil { require.Condition(t, func() bool { return false @@ -428,7 +428,7 @@ func TestProbeDaemonFallsBackToLegacyStatus(t *testing.T) { _ = json.NewEncoder(w).Encode(map[string]string{"version": "v-legacy"}) }) - info, err := ProbeDaemon(addr, time.Second) + info, err := ProbeDaemon(DaemonEndpoint{Network: "tcp", Address: addr}, time.Second) if err != nil { require.Condition(t, func() bool { return false @@ -482,7 +482,7 @@ func TestIsDaemonAliveLegacyStatusCodes(t *testing.T) { _ = json.NewEncoder(w).Encode(map[string]string{"version": "v-legacy"}) } }) - got := IsDaemonAlive(addr) + got := IsDaemonAlive(DaemonEndpoint{Network: "tcp", Address: addr}) if got != tt.wantAlive { assert.Condition(t, func() bool { return false @@ -492,6 +492,38 @@ func TestIsDaemonAliveLegacyStatusCodes(t *testing.T) { } } +func TestRuntimeInfo_Endpoint(t *testing.T) { + assert := assert.New(t) + + // TCP with explicit network + info := RuntimeInfo{PID: 1, Addr: "127.0.0.1:7373", Port: 7373, Network: "tcp"} + ep := info.Endpoint() + assert.Equal("tcp", ep.Network) + assert.Equal("127.0.0.1:7373", ep.Address) + + // Unix + info = RuntimeInfo{PID: 1, Addr: "/tmp/test.sock", Port: 0, Network: "unix"} + ep = info.Endpoint() + assert.Equal("unix", ep.Network) + assert.Equal("/tmp/test.sock", ep.Address) + + // Empty network defaults to TCP (backwards compat) + info = RuntimeInfo{PID: 1, Addr: "127.0.0.1:7373", Port: 7373, Network: ""} + ep = info.Endpoint() + assert.Equal("tcp", ep.Network) +} + +func TestRuntimeInfo_BackwardsCompat_NoNetworkField(t *testing.T) { + // Simulate old JSON without "network" field + data := []byte(`{"pid": 1234, "addr": "127.0.0.1:7373", "port": 7373, "version": "0.47.0"}`) + var info RuntimeInfo + require.NoError(t, json.Unmarshal(data, &info)) + assert.Empty(t, info.Network) + ep := info.Endpoint() + assert.Equal(t, "tcp", ep.Network) + assert.Equal(t, "127.0.0.1:7373", ep.Address) +} + func TestListAllRuntimesWithGlobMetacharacters(t *testing.T) { // Create a temp directory with glob metacharacters in the name tmpDir := t.TempDir() diff --git a/internal/daemon/server.go b/internal/daemon/server.go index 4571900fa..4560fd552 100644 --- a/internal/daemon/server.go +++ b/internal/daemon/server.go @@ -38,6 +38,8 @@ type Server struct { errorLog *ErrorLog activityLog *ActivityLog startTime time.Time + endpointMu sync.Mutex // protects endpoint (written by Start, read by Stop) + endpoint DaemonEndpoint // Cached machine ID to avoid INSERT on every status request machineIDMu sync.Mutex @@ -121,7 +123,9 @@ func NewServer(db *storage.DB, cfg *config.Config, configPath string) *Server { // Start begins the server and worker pool func (s *Server) Start(ctx context.Context) error { cfg := s.configWatcher.Config() - if err := validateDaemonBindAddr(cfg.ServerAddr); err != nil { + + ep, err := ParseEndpoint(cfg.ServerAddr) + if err != nil { return err } @@ -138,7 +142,7 @@ func (s *Server) Start(ctx context.Context) error { } // Check if a responsive daemon is still running after cleanup - if info, err := GetAnyRunningDaemon(); err == nil && IsDaemonAlive(info.Addr) { + if info, err := GetAnyRunningDaemon(); err == nil && IsDaemonAlive(info.Endpoint()) { return fmt.Errorf("daemon already running (pid %d on %s)", info.PID, info.Addr) } @@ -153,29 +157,61 @@ func (s *Server) Start(ctx context.Context) error { // Continue without hot-reloading - not a fatal error } - // Find available port - addr, port, err := FindAvailablePort(cfg.ServerAddr) - if err != nil { - s.configWatcher.Stop() - return fmt.Errorf("find available port: %w", err) - } - s.httpServer.Addr = addr - // Bind the listener before publishing runtime metadata so concurrent CLI // invocations cannot race a half-started daemon and kill it as a zombie. - listener, err := net.Listen("tcp", addr) - if err != nil { - s.configWatcher.Stop() - return fmt.Errorf("listen on %s: %w", addr, err) - } - addr = listener.Addr().String() - if tcpAddr, ok := listener.Addr().(*net.TCPAddr); ok { - port = tcpAddr.Port + var listener net.Listener + if ep.IsUnix() { + socketPath := ep.Address + socketDir := filepath.Dir(socketPath) + if err := os.MkdirAll(socketDir, 0700); err != nil { + s.configWatcher.Stop() + return fmt.Errorf("create socket directory: %w", err) + } + // Verify the parent directory has safe permissions (owner-only) + if fi, err := os.Stat(socketDir); err == nil { + if perm := fi.Mode().Perm(); perm&0077 != 0 { + s.configWatcher.Stop() + return fmt.Errorf("socket directory %s has unsafe permissions %o (must not be group/world accessible)", socketDir, perm) + } + } + // Remove stale socket from a previous run + os.Remove(socketPath) + listener, err = ep.Listener() + if err != nil { + s.configWatcher.Stop() + return fmt.Errorf("listen on %s: %w", ep, err) + } + if err := os.Chmod(socketPath, 0600); err != nil { + _ = listener.Close() + s.configWatcher.Stop() + return fmt.Errorf("chmod socket: %w", err) + } + } else { + // TCP: find an available port first + addr, _, err := FindAvailablePort(ep.Address) + if err != nil { + s.configWatcher.Stop() + return fmt.Errorf("find available port: %w", err) + } + ep = DaemonEndpoint{Network: "tcp", Address: addr} + s.httpServer.Addr = addr + + listener, err = ep.Listener() + if err != nil { + s.configWatcher.Stop() + return fmt.Errorf("listen on %s: %w", ep, err) + } + // Update ep with actual bound address + ep = DaemonEndpoint{Network: "tcp", Address: listener.Addr().String()} + s.httpServer.Addr = ep.Address } - s.httpServer.Addr = addr + + s.endpointMu.Lock() + s.endpoint = ep + s.endpointMu.Unlock() serveErrCh := make(chan error, 1) - log.Printf("Starting HTTP server on %s", addr) + log.Printf("Starting HTTP server on %s", ep) go func() { serveErrCh <- s.httpServer.Serve(listener) }() @@ -183,7 +219,7 @@ func (s *Server) Start(ctx context.Context) error { // Start worker pool before advertising availability. s.workerPool.Start() - ready, serveExited, err := waitForServerReady(ctx, addr, 2*time.Second, serveErrCh) + ready, serveExited, err := waitForServerReady(ctx, ep, 2*time.Second, serveErrCh) if err != nil { _ = listener.Close() s.configWatcher.Stop() @@ -200,7 +236,7 @@ func (s *Server) Start(ctx context.Context) error { } // Write runtime info only after the HTTP server is accepting requests. - if err := WriteRuntime(addr, port, version.Version); err != nil { + if err := WriteRuntime(ep, version.Version); err != nil { log.Printf("Warning: failed to write runtime info: %v", err) } @@ -209,11 +245,11 @@ func (s *Server) Start(ctx context.Context) error { binary, _ := os.Executable() s.activityLog.Log( "daemon.started", "server", - fmt.Sprintf("daemon started on %s", addr), + fmt.Sprintf("daemon started on %s", ep), map[string]string{ "version": version.Version, "binary": binary, - "addr": addr, + "addr": ep.Address, "pid": strconv.Itoa(os.Getpid()), "workers": strconv.Itoa(cfg.MaxWorkers), }, @@ -236,7 +272,7 @@ func (s *Server) Start(ctx context.Context) error { return nil } -func waitForServerReady(ctx context.Context, addr string, timeout time.Duration, serveErrCh <-chan error) (bool, bool, error) { +func waitForServerReady(ctx context.Context, ep DaemonEndpoint, timeout time.Duration, serveErrCh <-chan error) (bool, bool, error) { deadline := time.Now().Add(timeout) var lastErr error @@ -255,7 +291,7 @@ func waitForServerReady(ctx context.Context, addr string, timeout time.Duration, return false, true, err default: } - if _, err := ProbeDaemon(addr, 200*time.Millisecond); err == nil { + if _, err := ProbeDaemon(ep, 200*time.Millisecond); err == nil { return true, false, nil } else { lastErr = err @@ -280,7 +316,7 @@ func waitForServerReady(ctx context.Context, addr string, timeout time.Duration, if lastErr == nil { lastErr = fmt.Errorf("server did not respond before timeout") } - return false, false, fmt.Errorf("daemon failed to become ready on %s within %s: %w", addr, timeout, lastErr) + return false, false, fmt.Errorf("daemon failed to become ready on %s within %s: %w", ep, timeout, lastErr) } func awaitServeExitOnUnreadyStartup(serveExited bool, serveErrCh <-chan error) error { @@ -332,6 +368,14 @@ func (s *Server) Stop() error { log.Printf("HTTP server shutdown error: %v", err) } + // Clean up Unix domain socket + s.endpointMu.Lock() + ep := s.endpoint + s.endpointMu.Unlock() + if ep.IsUnix() { + os.Remove(ep.Address) + } + // Stop CI poller if s.ciPoller != nil { s.ciPoller.Stop() diff --git a/internal/daemon/server_test.go b/internal/daemon/server_test.go index d7fca0d7e..3b87d5818 100644 --- a/internal/daemon/server_test.go +++ b/internal/daemon/server_test.go @@ -205,7 +205,7 @@ func TestWaitForServerReadySurfacesServeError(t *testing.T) { wantErr := errors.New("serve failed") serveErrCh <- wantErr - ready, serveExited, err := waitForServerReady(context.Background(), "127.0.0.1:1", 50*time.Millisecond, serveErrCh) + ready, serveExited, err := waitForServerReady(context.Background(), DaemonEndpoint{Network: "tcp", Address: "127.0.0.1:1"}, 50*time.Millisecond, serveErrCh) if ready { require.Condition(t, func() bool { return false @@ -230,7 +230,7 @@ func TestWaitForServerReadyLeavesServeExitUnreadWhenContextAlreadyCanceled(t *te serveErrCh := make(chan error, 1) serveErrCh <- http.ErrServerClosed - ready, serveExited, err := waitForServerReady(ctx, "127.0.0.1:1", 50*time.Millisecond, serveErrCh) + ready, serveExited, err := waitForServerReady(ctx, DaemonEndpoint{Network: "tcp", Address: "127.0.0.1:1"}, 50*time.Millisecond, serveErrCh) if ready { require.Condition(t, func() bool { return false