diff --git a/client/README.md b/client/README.md new file mode 100644 index 0000000..7d1c3a1 --- /dev/null +++ b/client/README.md @@ -0,0 +1,676 @@ +# Cloud Team Agent — Client SDK + +Cross-machine multi-agent collaboration SDK. One machine acts as the **Leader** (plans work, handles approvals), any number of machines act as **Teammates** (execute tasks, answer explore queries). All communication flows through the cloud control plane. + +``` +Leader ──REST/WS──▶ Cloud Control Plane ──WS──▶ Teammate A + ──WS──▶ Teammate B +``` + +Both a **Go** package and a **TypeScript** package are provided — they expose identical plugin interfaces so you can swap host environments without rewriting business logic. + +--- + +## Table of Contents + +1. [Architecture](#architecture) +2. [Go SDK](#go-sdk) + - [Installation](#go-installation) + - [Quick Start — Leader](#go-leader-quick-start) + - [Quick Start — Teammate](#go-teammate-quick-start) +3. [TypeScript SDK](#typescript-sdk) + - [Installation](#ts-installation) + - [Quick Start — Leader](#ts-leader-quick-start) + - [Quick Start — Teammate](#ts-teammate-quick-start) +4. [Plugin Reference](#plugin-reference) +5. [Config Reference](#config-reference) +6. [Task DAG & Dependencies](#task-dag--dependencies) +7. [Repo Affinity](#repo-affinity) +8. [REST API Reference](#rest-api-reference) +9. [WebSocket Event Reference](#websocket-event-reference) +10. [Examples](#examples) + +--- + +## Architecture + +### Roles + +| Role | Responsibilities | +|------|-----------------| +| **Leader** | Creates / joins a session, decomposes goals into a task DAG, distributes tasks to Teammates, handles approvals | +| **Teammate** | Joins a session, executes assigned tasks, answers remote explore requests, requests approvals for risky operations | + +### Session lifecycle + +``` +1. Leader → POST /api/team/sessions (create session) +2. Leader → POST /sessions/:id/leader/elect (acquire leader lock) +3. Teammate → POST /sessions/:id/members (register machine) +4. Both → WS /ws/sessions/:id (open event channel) +5. Leader → POST /sessions/:id/tasks (submit task plan) +6. Server → WS task.assigned ──▶ Teammate (dispatch tasks) +7. Teammate → WS task.progress/complete/fail (report execution) +8. Teammate → WS approval.request ──▶ Leader (ask permission) +9. Leader → PATCH /approvals/:id (respond to approval) +``` + +### Plugin architecture + +The SDK exposes four extension points. Register only the ones your role needs: + +``` +┌─────────────┐ PlanTasks() ┌──────────────────────┐ +│ LeaderPlugin│◀──────────────────── │ Client (leader role) │ +└─────────────┘ └──────────────────────┘ + │ +┌──────────────┐ HandleApproval() │ approval.push +│ApprovalPlugin│◀──────────────────────────────┤ +└──────────────┘ │ + │ +┌───────────────┐ ExecuteTask() ┌──────────────────────┐ +│TeammatePlugin │◀───────────────── │ Client (teammate role)│ +└───────────────┘ └──────────────────────┘ + │ +┌───────────────┐ Explore() │ explore.request +│ ExplorePlugin │◀──────────────────────────────┘ +└───────────────┘ +``` + +--- + +## Go SDK + +### Go Installation + +The Go SDK lives in `client/go/` in the same `go.work` workspace as the server. + +If you're working inside the monorepo: + +```bash +# The go.work at the repo root already includes client/go. +# Just import the package in your code: +import "github.com/costrict/costrict-web/client/go/team" +``` + +If you're using it as a standalone dependency: + +```bash +go get github.com/costrict/costrict-web/client/go@latest +``` + +### Go Leader Quick Start + +```go +package main + +import ( + "context" + "log" + "os" + "os/signal" + + "github.com/costrict/costrict-web/client/go/team" +) + +func main() { + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) + defer stop() + + c := team.New(team.Config{ + ServerURL: "https://api.example.com", + Token: os.Getenv("TEAM_TOKEN"), + SessionID: os.Getenv("TEAM_SESSION_ID"), + MachineID: "leader-machine-1", + MachineName: "My Leader", + Role: team.MemberRoleLeader, + }). + WithLeaderPlugin(&MyPlanner{}). + WithApprovalPlugin(&CLIApprover{}) + + // SubmitPlan can be called after Start in a separate goroutine. + go func() { + if err := c.SubmitPlan(ctx, "refactor the auth module"); err != nil { + log.Println("plan error:", err) + } + }() + + if err := c.Start(ctx); err != nil { + log.Println("client stopped:", err) + } +} +``` + +### Go Teammate Quick Start + +```go +c := team.New(team.Config{ + ServerURL: "https://api.example.com", + Token: os.Getenv("TEAM_TOKEN"), + SessionID: os.Getenv("TEAM_SESSION_ID"), + MachineID: "teammate-machine-a", + MachineName: "Machine A", + Role: team.MemberRoleTeammate, +}). + WithTeammatePlugin(&ShellExecutor{}). + WithExplorePlugin(&LocalExplorer{}). + WithApprovalPlugin(&CLIApprover{}) + +if err := c.Start(ctx); err != nil { + log.Println("client stopped:", err) +} +``` + +--- + +## TypeScript SDK + +### TS Installation + +```bash +# Inside the monorepo (portal or other package): +npm install ../client/ts + +# Or from npm (once published): +npm install @costrict/team-client +# Peer dependency for Node.js environments: +npm install ws +``` + +### TS Leader Quick Start + +```ts +import { TeamClient, MemberRoleLeader } from '@costrict/team-client'; + +const ac = new AbortController(); +process.on('SIGINT', () => ac.abort()); + +const client = new TeamClient({ + serverUrl: 'https://api.example.com', + token: process.env.TEAM_TOKEN!, + sessionId: process.env.TEAM_SESSION_ID!, + machineId: 'leader-machine-1', + machineName: 'My Leader', + role: MemberRoleLeader, +}) + .withLeaderPlugin(new MyPlanner()) + .withApprovalPlugin(new CLIApprover()); + +// Submit a plan once the client is connected. +client.start(ac.signal).catch(console.error); + +setTimeout(async () => { + await client.submitPlan('refactor the auth module'); +}, 2000); +``` + +### TS Teammate Quick Start + +```ts +import { TeamClient, MemberRoleTeammate } from '@costrict/team-client'; + +const client = new TeamClient({ + serverUrl: 'https://api.example.com', + token: process.env.TEAM_TOKEN!, + sessionId: process.env.TEAM_SESSION_ID!, + machineId: 'teammate-machine-a', + machineName: 'Machine A', + role: MemberRoleTeammate, +}) + .withTeammatePlugin(new ShellExecutor()) + .withExplorePlugin(new LocalExplorer()) + .withApprovalPlugin(new CLIApprover()); + +await client.start(ac.signal); +``` + +--- + +## Plugin Reference + +### LeaderPlugin + +Called when `client.SubmitPlan(goal)` is invoked. Receives session context (including the list of online Teammates) and must return an ordered list of `TaskSpec`s. + +**Go:** +```go +type LeaderPlugin interface { + PlanTasks(ctx context.Context, req team.PlanTasksInput) ([]team.TaskSpec, error) +} + +// PlanTasksInput provides context for planning decisions: +type PlanTasksInput struct { + Goal string + SessionID string + Members []Member // online session participants +} +``` + +**TypeScript:** +```ts +interface LeaderPlugin { + planTasks(signal: AbortSignal, req: PlanTasksInput): Promise; +} +``` + +**Implementation tips:** +- Call your LLM with `req.Goal` and the member list to produce an intelligent plan +- Pre-set `TaskSpec.ID` (UUID) on each spec to wire up `Dependencies` across the batch +- Set `TaskSpec.AssignedMemberId` using a member's `id` from `req.Members` to target a specific machine +- Use `TaskSpec.RepoAffinity` with repo remote URLs to let the server schedule to the right machine +- Return an empty slice to cancel the plan + +**Example (Go):** +```go +type LLMPlanner struct{ llmClient *openai.Client } + +func (p *LLMPlanner) PlanTasks(ctx context.Context, req team.PlanTasksInput) ([]team.TaskSpec, error) { + prompt := fmt.Sprintf("Break this goal into coding tasks: %s\nTeammates: %v", req.Goal, req.Members) + // ... call LLM, parse result into TaskSpecs ... + idA := uuid.New().String() + idB := uuid.New().String() + return []team.TaskSpec{ + {ID: idA, Description: "Write tests for auth module", Priority: 9, + RepoAffinity: []string{"https://github.com/org/repo"}}, + {ID: idB, Description: "Refactor auth module", Dependencies: []string{idA}, + RepoAffinity: []string{"https://github.com/org/repo"}}, + }, nil +} +``` + +--- + +### TeammatePlugin + +Called for each `task.assigned` event. Runs in its own goroutine — report progress via `reporter` and return a `TaskResult` on completion. + +**Go:** +```go +type TeammatePlugin interface { + ExecuteTask(ctx context.Context, task team.Task, reporter team.ProgressReporter) (team.TaskResult, error) +} + +type ProgressReporter interface { + Report(pct int, message string) +} +``` + +**TypeScript:** +```ts +interface TeammatePlugin { + executeTask(signal: AbortSignal, task: Task, reporter: ProgressReporter): Promise; +} +``` + +**Implementation tips:** +- Call `reporter.Report(pct, msg)` periodically to update the leader's dashboard +- Return an error / rejected Promise to mark the task `failed`; the server will broadcast the failure and the leader can reassign +- Use `task.FileHints` to scope your operations to specific files +- Use `ctx` / `signal` cancellation to stop long-running work if the session ends + +**Example (Go):** +```go +type AIExecutor struct{ agent *MyAIAgent } + +func (e *AIExecutor) ExecuteTask(ctx context.Context, t team.Task, r team.ProgressReporter) (team.TaskResult, error) { + r.Report(5, "analysing task") + plan, err := e.agent.Plan(ctx, t.Description, t.FileHints) + if err != nil { + return team.TaskResult{}, fmt.Errorf("planning failed: %w", err) + } + r.Report(20, "executing") + output, err := e.agent.Execute(ctx, plan) + if err != nil { + return team.TaskResult{}, err + } + r.Report(100, "done") + return team.TaskResult{Output: output}, nil +} +``` + +--- + +### ApprovalPlugin + +Called on both roles: +- **Leader** receives `approval.push` when a Teammate requests permission → call `PATCH /approvals/:id` +- **Teammate** can surface the `approval.response` result if needed + +**Go:** +```go +type ApprovalPlugin interface { + HandleApproval(ctx context.Context, req team.ApprovalRequest) (approved bool, note string, err error) +} +``` + +**TypeScript:** +```ts +interface ApprovalPlugin { + handleApproval(signal: AbortSignal, req: ApprovalRequest): Promise<{ approved: boolean; note?: string }>; +} +``` + +**Approval payload fields:** + +| Field | Description | +|-------|-------------| +| `ToolName` | The tool requesting permission (e.g. `"bash"`, `"file_write"`) | +| `Description` | Human-readable description of what the tool will do | +| `RiskLevel` | `"low"` / `"medium"` / `"high"` | +| `ToolInput` | The exact parameters the tool will be called with | + +**Example (Go — CLI prompt):** +```go +type CLIApprover struct{} + +func (a *CLIApprover) HandleApproval(_ context.Context, req team.ApprovalRequest) (bool, string, error) { + fmt.Printf("\n[APPROVAL REQUEST] %s — risk: %s\n", req.ToolName, req.RiskLevel) + fmt.Printf(" %s\n", req.Description) + fmt.Printf(" input: %v\n", req.ToolInput) + fmt.Print(" Approve? [y/N]: ") + var answer string + fmt.Scan(&answer) + return strings.EqualFold(answer, "y"), "", nil +} +``` + +--- + +### ExplorePlugin + +Called when the Leader sends a remote explore request targeting this Teammate's machine. + +**Go:** +```go +type ExplorePlugin interface { + Explore(ctx context.Context, req team.ExploreRequest) (team.ExploreResult, error) +} +``` + +**TypeScript:** +```ts +interface ExplorePlugin { + explore(signal: AbortSignal, req: ExploreRequest): Promise; +} +``` + +**Query types:** + +| Type | Params | Description | +|------|--------|-------------| +| `file_tree` | `path: string` | List files under a directory | +| `symbol_search` | `symbol: string, dir?: string` | Find symbol definitions / usages | +| `content_search` | `pattern: string, dir?: string, fileGlob?: string` | Full-text search | +| `git_log` | `dir: string, n?: int` | Recent commits | +| `dependency_graph` | `entry: string` | Import / module dependency graph | + +**Example (Go — sandboxed shell queries):** +```go +type LocalExplorer struct{} + +func (e *LocalExplorer) Explore(_ context.Context, req team.ExploreRequest) (team.ExploreResult, error) { + results := make([]team.ExploreQueryResult, 0, len(req.Queries)) + for _, q := range req.Queries { + r := team.ExploreQueryResult{Type: q.Type} + switch q.Type { + case "file_tree": + path, _ := q.Params["path"].(string) + out, _ := exec.Command("find", orDot(path), "-type", "f", + "-not", "-path", "*/.*").Output() + r.Output = truncate(string(out), 8192) + case "content_search": + pattern, _ := q.Params["pattern"].(string) + dir, _ := q.Params["dir"].(string) + out, _ := exec.Command("rg", "--no-heading", "-n", "-m", "20", + pattern, orDot(dir)).Output() + r.Output = string(out) + case "git_log": + dir, _ := q.Params["dir"].(string) + out, _ := exec.Command("git", "-C", orDot(dir), "log", + "--oneline", "-20").Output() + r.Output = string(out) + } + results = append(results, r) + } + return team.ExploreResult{RequestID: req.RequestID, QueryResults: results}, nil +} + +func orDot(s string) string { + if s == "" { return "." } + return s +} +``` + +--- + +## Config Reference + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `ServerURL` / `serverUrl` | string | ✓ | Base HTTP/HTTPS URL of the server, e.g. `"https://api.example.com"` | +| `Token` / `token` | string | ✓ | JWT bearer token obtained from Casdoor login | +| `SessionID` / `sessionId` | string | ✓ | UUID of the team session to join | +| `MachineID` / `machineId` | string | ✓ | Stable, unique ID for this machine — **must be consistent across reconnects** | +| `MachineName` / `machineName` | string | | Human-readable display name shown in dashboards | +| `Role` / `role` | string | ✓ | `"leader"` or `"teammate"` | + +**Creating a session (before constructing the client):** + +```bash +# REST — create session first, then use the returned id as SessionID +curl -X POST https://api.example.com/api/team/sessions \ + -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"name": "Sprint 42 refactor"}' +``` + +--- + +## Task DAG & Dependencies + +Tasks within a single plan submission can depend on each other. The server unlocks a dependent task (moves it to `pending`) only after **all** its declared dependencies have `completed`. + +```go +// Pre-assign UUIDs so dependency references survive serialisation. +idLint := uuid.New().String() +idTest := uuid.New().String() +idBuild := uuid.New().String() + +specs := []team.TaskSpec{ + {ID: idLint, Description: "Run linter", Priority: 9}, + {ID: idTest, Description: "Run unit tests", Priority: 8, Dependencies: []string{idLint}}, + {ID: idBuild, Description: "Build artifacts", Priority: 7, Dependencies: []string{idTest}}, +} +``` + +``` +lint ──▶ test ──▶ build +``` + +When `lint` completes the server automatically unlocks `test` and pushes a `task.assigned` event to the assigned Teammate. + +> **Note:** The SDK pre-assigns UUIDs to all `TaskSpec`s that lack one before POSTing to the server. The server respects provided IDs, so dependency references are always stable. + +--- + +## Repo Affinity + +Set `TaskSpec.RepoAffinity` to a list of git remote URLs. The scheduler will prefer assigning the task to a Teammate that has already registered those repos. + +Register a repo on the Teammate side: + +**Go:** +```go +// Call after Start(), before or right after the client connects. +err := c.RegisterRepo( + "https://github.com/org/repo", // remote URL (used as the affinity key) + "/home/user/projects/repo", // local path + "feat/auth-refactor", // current branch + true, // has uncommitted changes +) +``` + +**TypeScript:** +```ts +// Available on TeammateAgent via the client internals: +await client.registerRepo( + 'https://github.com/org/repo', + '/home/user/projects/repo', + 'feat/auth-refactor', + true, +); +``` + +> `RegisterRepo` is also available on the Go Client as a convenience wrapper. +> Internally it calls `POST /api/team/sessions/:id/repos`. + +--- + +## REST API Reference + +All REST endpoints require `Authorization: Bearer ` unless noted. + +### Sessions + +| Method | Path | Body / Query | Description | +|--------|------|--------------|-------------| +| `POST` | `/api/team/sessions` | `{name}` | Create a session | +| `GET` | `/api/team/sessions/:id` | — | Get session details | +| `PATCH` | `/api/team/sessions/:id` | `{status?, name?}` | Update session | +| `DELETE` | `/api/team/sessions/:id` | — | Delete session | + +### Members + +| Method | Path | Body / Query | Description | +|--------|------|--------------|-------------| +| `POST` | `/api/team/sessions/:id/members` | `{machineId, machineName?}` | Join session | +| `GET` | `/api/team/sessions/:id/members` | — | List members | +| `DELETE` | `/api/team/sessions/:id/members/:mid` | — | Leave session | + +### Tasks + +| Method | Path | Body | Description | +|--------|------|------|-------------| +| `POST` | `/api/team/sessions/:id/tasks` | `{tasks[], fencingToken}` | Submit task plan | +| `GET` | `/api/team/sessions/:id/tasks` | — | List all tasks | +| `GET` | `/api/team/tasks/:taskId` | — | Get single task | +| `PATCH` | `/api/team/tasks/:taskId` | `{status, result?, errorMessage?}` | Update task status | + +### Approvals + +| Method | Path | Body | Description | +|--------|------|------|-------------| +| `GET` | `/api/team/sessions/:id/approvals` | — | List pending approvals | +| `PATCH` | `/api/team/approvals/:approvalId` | `{status, feedback?}` | Respond to approval | + +### Leader Election + +| Method | Path | Body | Description | +|--------|------|------|-------------| +| `POST` | `/api/team/sessions/:id/leader/elect` | `{machineId}` | Attempt election | +| `POST` | `/api/team/sessions/:id/leader/heartbeat` | `{machineId}` | Renew leader lock (every 10 s) | +| `GET` | `/api/team/sessions/:id/leader` | — | Get current leader | + +### Repos & Progress + +| Method | Path | Body / Query | Description | +|--------|------|--------------|-------------| +| `POST` | `/api/team/sessions/:id/repos` | `{memberId, repoRemoteUrl, repoLocalPath, currentBranch, hasUncommittedChanges, lastSyncedAt}` | Register local repo | +| `GET` | `/api/team/sessions/:id/repos` | `?remoteUrl=` or `?memberId=` | Query repo affinity | +| `GET` | `/api/team/sessions/:id/progress` | — | Get session progress snapshot | + +### Remote Explore + +| Method | Path | Body | Description | +|--------|------|------|-------------| +| `POST` | `/api/team/sessions/:id/explore` | `{targetMachineId, queries[]}` | Synchronous explore (30 s timeout) | + +--- + +## WebSocket Event Reference + +**Connect:** `wss:///ws/sessions/?machineId=&token=` + +All messages use the `CloudEvent` envelope: + +```json +{ + "eventId": "uuid", + "type": "event.type", + "sessionId": "uuid", + "timestamp": 1713100000000, + "payload": { ... } +} +``` + +### Client → Server Events + +| Type | Payload | Description | +|------|---------|-------------| +| `task.claim` | `{taskId}` | Claim a pending task (called automatically by the SDK) | +| `task.progress` | `{taskId, percent, message}` | Report execution progress | +| `task.complete` | `{taskId, result}` | Mark task completed | +| `task.fail` | `{taskId, errorMessage}` | Mark task failed | +| `approval.request` | `{toolName, description, riskLevel, toolInput}` | Request leader approval | +| `approval.respond` | `{approvalId, status, feedback?}` | Leader responds to approval | +| `explore.result` | `{requestId, queryResults[], fromMachineId, error?}` | Return explore results | +| `repo.register` | `{repoRemoteUrl, repoLocalPath, currentBranch, hasUncommittedChanges}` | Register local repo | +| `leader.elect` | — | Trigger leader election | +| `leader.heartbeat` | — | Renew leader lock (sent automatically every 10 s) | +| `message.send` | `{to, body}` | Send a message (`to` = machineId or `"broadcast"`) | + +### Server → Client Events + +| Type | Payload | Description | +|------|---------|-------------| +| `task.assigned` | `{task: Task}` | A new task has been assigned to this machine | +| `approval.push` | `{approval: ApprovalRequest}` | Teammate is requesting approval (leader only) | +| `approval.response` | `{approvalId, status, feedback}` | Leader responded to approval request | +| `explore.request` | `{requestId, fromMachineId, queries[]}` | Leader is requesting code exploration | +| `teammate.status` | `{machineId, status}` | A member came online / went offline | +| `leader.elected` | `{leaderId, fencingToken}` | A new leader was elected | +| `leader.expired` | `{expiredLeaderId}` | Leader lock expired | +| `session.updated` | `{taskId, status}` | Task status changed | +| `message.receive` | `{from, body}` | Message from another machine | +| `error` | `{message}` | Server-side error notification | + +--- + +## Examples + +Runnable examples are in: + +- **Go:** [`client/go/example/main.go`](go/example/main.go) — CLI tool, supports both leader and teammate modes +- **TypeScript Leader:** [`client/ts/examples/leader.ts`](ts/examples/leader.ts) +- **TypeScript Teammate:** [`client/ts/examples/teammate.ts`](ts/examples/teammate.ts) + +### Running the Go example + +```bash +# Teammate +go run ./client/go/example/main.go \ + --server https://api.example.com \ + --token "$TOKEN" \ + --session "$SESSION_ID" \ + --machine my-mac-$(hostname) \ + --role teammate + +# Leader (submits a plan immediately after connecting) +go run ./client/go/example/main.go \ + --server https://api.example.com \ + --token "$TOKEN" \ + --session "$SESSION_ID" \ + --machine leader-$(hostname) \ + --role leader \ + --goal "refactor the authentication module" +``` + +### Running the TypeScript example + +```bash +cd client/ts +npm install +npx ts-node examples/leader.ts +# or +npx ts-node examples/teammate.ts +``` diff --git a/client/go/example/main.go b/client/go/example/main.go new file mode 100644 index 0000000..c5ab467 --- /dev/null +++ b/client/go/example/main.go @@ -0,0 +1,338 @@ +// example/main.go — Runnable Cloud Team Agent client example. +// +// Usage: +// +// # Teammate — listens for assigned tasks and executes them as shell commands +// go run ./example/main.go \ +// --server https://api.example.com \ +// --token "$TOKEN" \ +// --session "$SESSION_ID" \ +// --machine my-mac-$(hostname) \ +// --role teammate +// +// # Leader — submits a plan, then routes approvals via stdin prompt +// go run ./example/main.go \ +// --server https://api.example.com \ +// --token "$TOKEN" \ +// --session "$SESSION_ID" \ +// --machine leader-$(hostname) \ +// --role leader \ +// --goal "refactor the authentication module" +package main + +import ( + "bufio" + "context" + "flag" + "fmt" + "log" + "os" + "os/exec" + "os/signal" + "strings" + "syscall" + "time" + + "github.com/google/uuid" + + "github.com/costrict/costrict-web/client/go/team" +) + +func main() { + serverURL := flag.String("server", "", "Server base URL (required)") + token := flag.String("token", "", "JWT bearer token (required)") + sessionID := flag.String("session", "", "Team session UUID (required)") + machineID := flag.String("machine", "", "Stable machine identifier (required)") + machineName := flag.String("name", "", "Human-readable machine name (optional)") + role := flag.String("role", "teammate", "Role: leader or teammate") + goal := flag.String("goal", "", "Goal string for the leader's initial plan (leader only)") + flag.Parse() + + if *serverURL == "" || *token == "" || *sessionID == "" || *machineID == "" { + flag.Usage() + os.Exit(1) + } + if *machineName == "" { + hostname, _ := os.Hostname() + *machineName = hostname + } + + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + defer stop() + + cfg := team.Config{ + ServerURL: *serverURL, + Token: *token, + SessionID: *sessionID, + MachineID: *machineID, + MachineName: *machineName, + Role: *role, + } + + var c *team.Client + + switch *role { + case team.MemberRoleLeader: + c = buildLeader(cfg, *goal, ctx) + case team.MemberRoleTeammate: + c = buildTeammate(cfg) + default: + log.Fatalf("unknown role %q — must be leader or teammate", *role) + } + + log.Printf("[%s] connecting to %s (session=%s)", *machineID, *serverURL, *sessionID) + if err := c.Start(ctx); err != nil && err != context.Canceled { + log.Printf("[%s] stopped: %v", *machineID, err) + } +} + +// ─── Leader setup ───────────────────────────────────────────────────────── + +func buildLeader(cfg team.Config, goal string, ctx context.Context) *team.Client { + c := team.New(cfg). + WithLeaderPlugin(&SimplePlanner{}). + WithApprovalPlugin(&StdinApprover{prefix: "[LEADER]"}) + + if goal != "" { + go func() { + // Give the WS connection a moment to establish before submitting the plan. + select { + case <-time.After(2 * time.Second): + case <-ctx.Done(): + return + } + log.Printf("[leader] submitting plan: %q", goal) + if err := c.SubmitPlan(ctx, goal); err != nil { + log.Printf("[leader] plan submission failed: %v", err) + } else { + log.Printf("[leader] plan submitted successfully") + } + }() + } + + return c +} + +// SimplePlanner creates a 3-task dependency chain from the goal string. +// In production, replace this with an LLM call or your own planning logic. +type SimplePlanner struct{} + +func (p *SimplePlanner) PlanTasks(ctx context.Context, req team.PlanTasksInput) ([]team.TaskSpec, error) { + log.Printf("[leader] planning tasks for goal: %q (%d members online)", req.Goal, len(req.Members)) + + // Pre-assign UUIDs so we can wire up inter-task dependencies. + idA := uuid.New().String() + idB := uuid.New().String() + idC := uuid.New().String() + + // Pick the first available teammate for assignment (if any). + var teammateID string + for _, m := range req.Members { + if m.Role == team.MemberRoleTeammate && m.Status == team.MemberStatusOnline { + teammateID = m.ID + break + } + } + + makeSpec := func(id, desc string, deps []string, assignee string) team.TaskSpec { + return team.TaskSpec{ + ID: id, + Description: desc, + Dependencies: deps, + AssignedMemberID: assignee, + Priority: 5, + } + } + + return []team.TaskSpec{ + makeSpec(idA, fmt.Sprintf("Analyse codebase — %s", req.Goal), nil, teammateID), + makeSpec(idB, fmt.Sprintf("Implement changes — %s", req.Goal), []string{idA}, teammateID), + makeSpec(idC, fmt.Sprintf("Run tests and verify — %s", req.Goal), []string{idB}, teammateID), + }, nil +} + +// ─── Teammate setup ──────────────────────────────────────────────────────── + +func buildTeammate(cfg team.Config) *team.Client { + return team.New(cfg). + WithTeammatePlugin(&ShellExecutor{}). + WithExplorePlugin(&LocalExplorer{}). + WithApprovalPlugin(&StdinApprover{prefix: "[TEAMMATE]"}) +} + +// ShellExecutor interprets the task description as a shell command and runs it. +// In production, replace this with your AI agent or task runner. +type ShellExecutor struct{} + +func (e *ShellExecutor) ExecuteTask(ctx context.Context, t team.Task, r team.ProgressReporter) (team.TaskResult, error) { + log.Printf("[teammate] executing task %s: %q", t.ID[:8], t.Description) + r.Report(10, "preparing") + + // Treat the description as a shell command for this example. + // A real implementation would parse the description and invoke appropriate tools. + cmd := exec.CommandContext(ctx, "sh", "-c", t.Description) //nolint:gosec + cmd.Dir = "." + + r.Report(30, "running") + + output, err := cmd.CombinedOutput() + if err != nil { + // Non-zero exit: report as task failure with the captured output. + return team.TaskResult{}, fmt.Errorf("command failed: %w\noutput: %s", err, output) + } + + r.Report(100, "done") + log.Printf("[teammate] task %s completed (%d bytes output)", t.ID[:8], len(output)) + return team.TaskResult{ + Output: string(output), + ExtraData: map[string]any{"exitCode": 0}, + }, nil +} + +// ─── LocalExplorer ───────────────────────────────────────────────────────── + +// LocalExplorer handles remote explore requests using common Unix tools. +// Only read-only, sandboxed operations are allowed. +type LocalExplorer struct{} + +func (e *LocalExplorer) Explore(_ context.Context, req team.ExploreRequest) (team.ExploreResult, error) { + log.Printf("[teammate] handling explore request %s (%d queries)", req.RequestID[:8], len(req.Queries)) + + results := make([]team.ExploreQueryResult, 0, len(req.Queries)) + for _, q := range req.Queries { + r := team.ExploreQueryResult{Type: q.Type} + + switch q.Type { + case "file_tree": + path := stringParam(q.Params, "path", ".") + out, err := exec.Command( //nolint:gosec + "find", path, "-type", "f", + "-not", "-path", "*/.*", // exclude hidden files + "-not", "-path", "*/vendor/*", + "-not", "-path", "*/node_modules/*", + ).Output() + if err != nil { + r.Output = fmt.Sprintf("error: %v", err) + } else { + r.Output = limitOutput(string(out), 8192) + r.Truncated = len(out) > 8192 + } + + case "content_search": + pattern := stringParam(q.Params, "pattern", "") + dir := stringParam(q.Params, "dir", ".") + if pattern == "" { + r.Output = "error: pattern is required" + break + } + // Use ripgrep if available, fall back to grep. + cmd := exec.Command("rg", "--no-heading", "-n", "-m", "50", pattern, dir) //nolint:gosec + if _, err := exec.LookPath("rg"); err != nil { + cmd = exec.Command("grep", "-rn", "--include=*", pattern, dir) //nolint:gosec + } + out, _ := cmd.Output() + r.Output = limitOutput(string(out), 8192) + r.Truncated = len(out) > 8192 + + case "git_log": + dir := stringParam(q.Params, "dir", ".") + n := intParam(q.Params, "n", 20) + out, err := exec.Command( //nolint:gosec + "git", "-C", dir, "log", + fmt.Sprintf("-n%d", n), + "--oneline", + ).Output() + if err != nil { + r.Output = fmt.Sprintf("error: %v (is %s a git repo?)", err, dir) + } else { + r.Output = string(out) + } + + case "symbol_search": + symbol := stringParam(q.Params, "symbol", "") + dir := stringParam(q.Params, "dir", ".") + if symbol == "" { + r.Output = "error: symbol is required" + break + } + out, _ := exec.Command("rg", "--no-heading", "-n", "-w", symbol, dir).Output() //nolint:gosec + r.Output = limitOutput(string(out), 8192) + + case "dependency_graph": + entry := stringParam(q.Params, "entry", ".") + out, err := exec.Command("go", "list", "-deps", entry).Output() //nolint:gosec + if err != nil { + r.Output = fmt.Sprintf("error: %v", err) + } else { + r.Output = limitOutput(string(out), 8192) + } + + default: + r.Output = fmt.Sprintf("unsupported query type %q", q.Type) + } + + results = append(results, r) + } + + return team.ExploreResult{ + RequestID: req.RequestID, + QueryResults: results, + }, nil +} + +// ─── StdinApprover ───────────────────────────────────────────────────────── + +// StdinApprover presents approval requests on stdout and reads y/n from stdin. +type StdinApprover struct { + prefix string +} + +func (a *StdinApprover) HandleApproval(_ context.Context, req team.ApprovalRequest) (bool, string, error) { + fmt.Printf("\n%s ─────────────────── APPROVAL REQUEST ───────────────────\n", a.prefix) + fmt.Printf(" Tool: %s\n", req.ToolName) + fmt.Printf(" Risk level: %s\n", req.RiskLevel) + fmt.Printf(" Description: %s\n", req.Description) + if len(req.ToolInput) > 0 { + fmt.Printf(" Input: %v\n", req.ToolInput) + } + fmt.Printf("%s ──────────────────────────────────────────────────────────\n", a.prefix) + fmt.Printf("%s Approve? [y/N]: ", a.prefix) + + scanner := bufio.NewScanner(os.Stdin) + scanner.Scan() + answer := strings.TrimSpace(scanner.Text()) + approved := strings.EqualFold(answer, "y") + + if approved { + fmt.Printf("%s Approved.\n", a.prefix) + } else { + fmt.Printf("%s Rejected.\n", a.prefix) + } + return approved, "", nil +} + +// ─── Helpers ─────────────────────────────────────────────────────────────── + +func stringParam(params map[string]any, key, def string) string { + if v, ok := params[key].(string); ok && v != "" { + return v + } + return def +} + +func intParam(params map[string]any, key string, def int) int { + switch v := params[key].(type) { + case float64: + return int(v) + case int: + return v + } + return def +} + +func limitOutput(s string, max int) string { + if len(s) <= max { + return s + } + return s[:max] +} diff --git a/client/go/go.mod b/client/go/go.mod new file mode 100644 index 0000000..fdf9207 --- /dev/null +++ b/client/go/go.mod @@ -0,0 +1,8 @@ +module github.com/costrict/costrict-web/client/go + +go 1.25.0 + +require ( + github.com/google/uuid v1.6.0 + github.com/gorilla/websocket v1.5.3 +) diff --git a/client/go/team/client.go b/client/go/team/client.go new file mode 100644 index 0000000..e5f76e8 --- /dev/null +++ b/client/go/team/client.go @@ -0,0 +1,247 @@ +package team + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/google/uuid" +) + +// Config holds all the settings needed to connect to a team session. +type Config struct { + // ServerURL is the base HTTP/HTTPS URL of the costrict server, + // e.g. "https://api.example.com". The client derives the WebSocket + // URL automatically (https → wss, http → ws). + ServerURL string + + // Token is the JWT bearer token for authentication. + Token string + + // SessionID is the UUID of the existing team session to join. + SessionID string + + // MachineID is a stable, unique identifier for this machine. + // Must be consistent across reconnects so the server can route + // offline messages back to this machine. + MachineID string + + // MachineName is a human-readable label for this machine (optional). + MachineName string + + // Role is either "leader" or "teammate". + Role string +} + +// Client is the top-level entry point for the Cloud Team Agent SDK. +// Create one with New, register plugins with the With* methods, then call Start. +// +// Usage (leader): +// +// c := team.New(cfg). +// WithLeaderPlugin(myPlanner). +// WithApprovalPlugin(myApprover) +// if err := c.Start(ctx); err != nil { ... } +// +// Usage (teammate): +// +// c := team.New(cfg). +// WithTeammatePlugin(myExecutor). +// WithExplorePlugin(myExplorer) +// if err := c.Start(ctx); err != nil { ... } +type Client struct { + cfg Config + httpClient *http.Client + + ws *wsConn + leader *leaderAgent + teammate *teammateAgent + + // Plugin slots — set via With* methods before calling Start. + leaderPlugin LeaderPlugin + teammatePlugin TeammatePlugin + approvalPlugin ApprovalPlugin + explorePlugin ExplorePlugin + + cancelFn context.CancelFunc +} + +// New creates a Client from cfg. No network activity happens until Start is called. +func New(cfg Config) *Client { + return &Client{ + cfg: cfg, + httpClient: &http.Client{Timeout: 30 * time.Second}, + ws: newWSConn(cfg), + } +} + +// ─── Fluent plugin registration ─────────────────────────────────────────── + +// WithLeaderPlugin registers the planning plugin (leader role only). +func (c *Client) WithLeaderPlugin(p LeaderPlugin) *Client { + c.leaderPlugin = p + return c +} + +// WithTeammatePlugin registers the task-execution plugin (teammate role only). +func (c *Client) WithTeammatePlugin(p TeammatePlugin) *Client { + c.teammatePlugin = p + return c +} + +// WithApprovalPlugin registers the approval-display plugin (both roles). +func (c *Client) WithApprovalPlugin(p ApprovalPlugin) *Client { + c.approvalPlugin = p + return c +} + +// WithExplorePlugin registers the local code-query plugin (teammate role only). +func (c *Client) WithExplorePlugin(p ExplorePlugin) *Client { + c.explorePlugin = p + return c +} + +// ─── Lifecycle ──────────────────────────────────────────────────────────── + +// Start connects to the server and begins processing events. +// It blocks until ctx is cancelled (or a fatal error occurs). +// +// For the leader role, call SubmitPlan after Start returns (or from a separate +// goroutine while Start is running). +func (c *Client) Start(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) + c.cancelFn = cancel + defer cancel() + + // Start the WebSocket connection loop in the background. + go c.ws.run(ctx) + + // Role-specific initialisation via REST. + switch c.cfg.Role { + case MemberRoleLeader: + c.leader = newLeaderAgent(c) + if err := c.leader.init(ctx); err != nil { + return fmt.Errorf("leader init: %w", err) + } + case MemberRoleTeammate: + c.teammate = newTeammateAgent(c) + if err := c.teammate.init(ctx); err != nil { + return fmt.Errorf("teammate init: %w", err) + } + default: + return fmt.Errorf("unknown role %q (must be %q or %q)", + c.cfg.Role, MemberRoleLeader, MemberRoleTeammate) + } + + // Event dispatch loop. + for { + select { + case evt := <-c.ws.inbound: + c.dispatch(evt) + case <-ctx.Done(): + c.stop() + return ctx.Err() + } + } +} + +// SubmitPlan calls LeaderPlugin.PlanTasks and submits the resulting task plan +// to the server. Must only be called from the leader role after Start. +func (c *Client) SubmitPlan(ctx context.Context, goal string) error { + if c.leader == nil { + return fmt.Errorf("SubmitPlan requires Role = %q", MemberRoleLeader) + } + return c.leader.submitPlan(ctx, goal) +} + +// Stop signals the client to shut down. It is safe to call from any goroutine. +func (c *Client) Stop() { + if c.cancelFn != nil { + c.cancelFn() + } +} + +// ─── Internal helpers ───────────────────────────────────────────────────── + +func (c *Client) stop() { + c.ws.close() + if c.leader != nil { + c.leader.stop() + } +} + +func (c *Client) dispatch(evt CloudEvent) { + if c.leader != nil { + c.leader.handle(evt) + } + if c.teammate != nil { + c.teammate.handle(evt) + } +} + +// doJSON executes a REST request against the server and decodes the JSON response. +// body and out may both be nil. +func (c *Client) doJSON(method, path string, body, out any) error { + var bodyReader io.Reader + if body != nil { + data, err := json.Marshal(body) + if err != nil { + return fmt.Errorf("marshal request: %w", err) + } + bodyReader = bytes.NewReader(data) + } + + apiURL := strings.TrimRight(c.cfg.ServerURL, "/") + path + req, err := http.NewRequest(method, apiURL, bodyReader) + if err != nil { + return fmt.Errorf("build request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + if c.cfg.Token != "" { + req.Header.Set("Authorization", "Bearer "+c.cfg.Token) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("HTTP %s %s: %w", method, path, err) + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + var e struct { + Error string `json:"error"` + } + json.NewDecoder(resp.Body).Decode(&e) //nolint:errcheck + return fmt.Errorf("HTTP %d %s: %s", resp.StatusCode, path, e.Error) + } + + if out != nil { + return json.NewDecoder(resp.Body).Decode(out) + } + return nil +} + +// fetchMembers calls GET /api/team/sessions/:id/members and returns the list. +func (c *Client) fetchMembers() ([]Member, error) { + var resp struct { + Members []Member `json:"members"` + } + err := c.doJSON("GET", "/api/team/sessions/"+c.cfg.SessionID+"/members", nil, &resp) + return resp.Members, err +} + +// newEvent builds a CloudEvent with a fresh UUID and the current timestamp. +func newEvent(eventType, sessionID string, payload map[string]any) CloudEvent { + return CloudEvent{ + EventID: uuid.New().String(), + Type: eventType, + SessionID: sessionID, + Timestamp: time.Now().UnixMilli(), + Payload: payload, + } +} diff --git a/client/go/team/leader.go b/client/go/team/leader.go new file mode 100644 index 0000000..c3d8d98 --- /dev/null +++ b/client/go/team/leader.go @@ -0,0 +1,212 @@ +package team + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/google/uuid" +) + +// leaderAgent handles all leader-role behaviour: +// - leader election and lock renewal (heartbeat) +// - task plan submission +// - routing incoming approval.push events to the ApprovalPlugin +// - re-election when the leader lock expires +type leaderAgent struct { + c *Client + fencingToken int64 + cancelHB context.CancelFunc +} + +func newLeaderAgent(c *Client) *leaderAgent { + return &leaderAgent{c: c} +} + +// init performs the REST calls needed before the event loop starts: +// 1. Attempts leader election. +// 2. Starts the heartbeat goroutine to renew the lock every 10 s. +func (l *leaderAgent) init(ctx context.Context) error { + var resp struct { + Elected bool `json:"elected"` + FencingToken int64 `json:"fencingToken"` + LeaderID string `json:"leaderId"` + } + if err := l.c.doJSON("POST", + "/api/team/sessions/"+l.c.cfg.SessionID+"/leader/elect", + map[string]any{"machineId": l.c.cfg.MachineID}, + &resp, + ); err != nil { + return fmt.Errorf("leader election: %w", err) + } + l.fencingToken = resp.FencingToken + + hbCtx, cancel := context.WithCancel(ctx) + l.cancelHB = cancel + go l.heartbeatLoop(hbCtx) + + return nil +} + +// heartbeatLoop renews the Redis leader lock every leaderHeartbeatSec seconds. +func (l *leaderAgent) heartbeatLoop(ctx context.Context) { + ticker := time.NewTicker(leaderHeartbeatSec * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + var resp struct { + Renewed bool `json:"renewed"` + } + // Best-effort — ignore errors here, the server will broadcast + // leader.expired if the lock expires. + l.c.doJSON("POST", //nolint:errcheck + "/api/team/sessions/"+l.c.cfg.SessionID+"/leader/heartbeat", + map[string]any{"machineId": l.c.cfg.MachineID}, + &resp) + case <-ctx.Done(): + return + } + } +} + +// submitPlan calls LeaderPlugin.PlanTasks then POSTs the resulting tasks to +// the server. It pre-assigns UUIDs to each task so that dependency IDs are +// stable at creation time (the server respects provided IDs). +func (l *leaderAgent) submitPlan(ctx context.Context, goal string) error { + if l.c.leaderPlugin == nil { + return fmt.Errorf("no LeaderPlugin registered") + } + + members, err := l.c.fetchMembers() + if err != nil { + return fmt.Errorf("fetch members: %w", err) + } + + specs, err := l.c.leaderPlugin.PlanTasks(ctx, PlanTasksInput{ + Goal: goal, + SessionID: l.c.cfg.SessionID, + Members: members, + }) + if err != nil { + return fmt.Errorf("plan tasks: %w", err) + } + if len(specs) == 0 { + return fmt.Errorf("LeaderPlugin returned an empty plan") + } + + // Pre-assign stable UUIDs so dependency references within the batch + // are preserved when the server creates the tasks. + for i := range specs { + if specs[i].ID == "" { + specs[i].ID = uuid.New().String() + } + } + + var result struct { + Tasks []Task `json:"tasks"` + } + return l.c.doJSON("POST", + "/api/team/sessions/"+l.c.cfg.SessionID+"/tasks", + map[string]any{ + "tasks": specs, + "fencingToken": l.fencingToken, + }, + &result, + ) +} + +// handle dispatches incoming server events relevant to the leader role. +func (l *leaderAgent) handle(evt CloudEvent) { + switch evt.Type { + + case EventApprovalPush: + // Extract the nested approval object and forward to ApprovalPlugin. + if l.c.approvalPlugin == nil { + return + } + approvalRaw, _ := json.Marshal(evt.Payload["approval"]) + var req ApprovalRequest + if json.Unmarshal(approvalRaw, &req) != nil { + return + } + go func() { + ctx := context.Background() + approved, note, err := l.c.approvalPlugin.HandleApproval(ctx, req) + if err != nil { + return + } + status := "approved" + if !approved { + status = "rejected" + } + l.c.doJSON("PATCH", "/api/team/approvals/"+req.ID, //nolint:errcheck + map[string]any{"status": status, "feedback": note}, nil) + }() + + case EventLeaderExpired: + // Our lock expired — try to re-acquire. + go func() { + var resp struct { + Elected bool `json:"elected"` + FencingToken int64 `json:"fencingToken"` + } + if err := l.c.doJSON("POST", + "/api/team/sessions/"+l.c.cfg.SessionID+"/leader/elect", + map[string]any{"machineId": l.c.cfg.MachineID}, + &resp, + ); err == nil && resp.Elected { + l.fencingToken = resp.FencingToken + } + }() + + case EventTeammateStatus: + // Teammate came online or went offline — useful for dashboards. + // The host application can embed a custom handler via a callback if needed. + } +} + +// stop cancels the heartbeat goroutine. +func (l *leaderAgent) stop() { + if l.cancelHB != nil { + l.cancelHB() + } +} + +// RequestApproval sends an approval.request event via WebSocket. +// Leaders can use this to request user confirmation for risky operations. +func (l *leaderAgent) RequestApproval(toolName, description, riskLevel string, toolInput map[string]any) error { + return l.c.ws.send(newEvent(EventApprovalRequest, l.c.cfg.SessionID, map[string]any{ + "toolName": toolName, + "description": description, + "riskLevel": riskLevel, + "toolInput": toolInput, + })) +} + +// RegisterRepo registers a local repository with the session's affinity registry. +func (l *leaderAgent) RegisterRepo(remoteURL, localPath, branch string, dirty bool) error { + return l.c.doJSON("POST", + "/api/team/sessions/"+l.c.cfg.SessionID+"/repos", + map[string]any{ + "memberId": l.findMemberID(), + "repoRemoteUrl": remoteURL, + "repoLocalPath": localPath, + "currentBranch": branch, + "hasUncommittedChanges": dirty, + "lastSyncedAt": time.Now().Format(time.RFC3339), + }, + nil, + ) +} + +func (l *leaderAgent) findMemberID() string { + members, _ := l.c.fetchMembers() + for _, m := range members { + if m.MachineID == l.c.cfg.MachineID { + return m.ID + } + } + return "" +} diff --git a/client/go/team/plugin.go b/client/go/team/plugin.go new file mode 100644 index 0000000..a3b4749 --- /dev/null +++ b/client/go/team/plugin.go @@ -0,0 +1,40 @@ +package team + +import "context" + +// LeaderPlugin decomposes a natural-language goal into an ordered task DAG. +// Inject your LLM / planning logic here. +// The SDK calls PlanTasks when the host application calls Client.SubmitPlan. +type LeaderPlugin interface { + PlanTasks(ctx context.Context, req PlanTasksInput) ([]TaskSpec, error) +} + +// TeammatePlugin executes an assigned task and streams progress updates. +// Inject your shell runner / code executor here. +// The SDK calls ExecuteTask for each incoming task.assigned event. +type TeammatePlugin interface { + ExecuteTask(ctx context.Context, task Task, reporter ProgressReporter) (TaskResult, error) +} + +// ApprovalPlugin displays an incoming approval request to the user and collects +// a decision. Inject a CLI prompt, GUI dialog, or any other UI here. +// The SDK calls HandleApproval for each incoming approval.push event (leader role) +// or approval.response event (teammate role). +type ApprovalPlugin interface { + HandleApproval(ctx context.Context, req ApprovalRequest) (approved bool, note string, err error) +} + +// ExplorePlugin executes local file / code queries on behalf of a remote +// explore.request. The SDK calls Explore when the server routes an +// explore.request to this machine. +// Allowed operations: file tree listing, symbol search, content search, +// git log, dependency graph — read-only, sandboxed to the local repo. +type ExplorePlugin interface { + Explore(ctx context.Context, req ExploreRequest) (ExploreResult, error) +} + +// ProgressReporter lets TeammatePlugin stream incremental progress updates +// back to the session without blocking the execution goroutine. +type ProgressReporter interface { + Report(pct int, message string) +} diff --git a/client/go/team/teammate.go b/client/go/team/teammate.go new file mode 100644 index 0000000..37e0de9 --- /dev/null +++ b/client/go/team/teammate.go @@ -0,0 +1,193 @@ +package team + +import ( + "context" + "encoding/json" + "fmt" + "time" +) + +// teammateAgent handles all teammate-role behaviour: +// - joining the session (REST) +// - executing assigned tasks via TeammatePlugin +// - handling explore.request events via ExplorePlugin +// - sending approval.request events to the leader +type teammateAgent struct { + c *Client + memberID string // populated after init +} + +func newTeammateAgent(c *Client) *teammateAgent { + return &teammateAgent{c: c} +} + +// init registers this machine as a session member via REST. +func (t *teammateAgent) init(_ context.Context) error { + var resp struct { + ID string `json:"id"` + } + err := t.c.doJSON("POST", + "/api/team/sessions/"+t.c.cfg.SessionID+"/members", + map[string]any{ + "machineId": t.c.cfg.MachineID, + "machineName": t.c.cfg.MachineName, + }, + &resp, + ) + if err != nil { + return fmt.Errorf("join session: %w", err) + } + t.memberID = resp.ID + return nil +} + +// handle dispatches incoming server events relevant to the teammate role. +func (t *teammateAgent) handle(evt CloudEvent) { + switch evt.Type { + + case EventTaskAssigned: + taskRaw, _ := json.Marshal(evt.Payload["task"]) + var task Task + if json.Unmarshal(taskRaw, &task) != nil || task.ID == "" { + return + } + if t.c.teammatePlugin == nil { + return + } + // Claim the task immediately so the leader knows it's being worked on. + t.c.ws.send(newEvent(EventTaskClaim, t.c.cfg.SessionID, map[string]any{ //nolint:errcheck + "taskId": task.ID, + })) + go t.executeTask(task) + + case EventExploreRequest: + if t.c.explorePlugin == nil { + return + } + requestID, _ := evt.Payload["requestId"].(string) + if requestID == "" { + return + } + go t.handleExplore(evt) + + case EventApprovalResponse: + // The leader responded to our approval request. Optionally notify the plugin. + if t.c.approvalPlugin == nil { + return + } + // We receive only the response here, not a full ApprovalRequest, so we + // build a minimal stub for the plugin in case it wants to log the outcome. + approvalID, _ := evt.Payload["approvalId"].(string) + status, _ := evt.Payload["status"].(string) + feedback, _ := evt.Payload["feedback"].(string) + if approvalID == "" { + return + } + go func() { + _ = approvalID + _ = status + _ = feedback + // Host can extend via ApprovalPlugin if needed; no further action here. + }() + } +} + +// executeTask runs the task through TeammatePlugin and reports progress/result. +func (t *teammateAgent) executeTask(task Task) { + ctx := context.Background() + reporter := &wsProgressReporter{c: t.c, taskID: task.ID} + + // Signal that we've started. + t.c.ws.send(newEvent(EventTaskProgress, t.c.cfg.SessionID, map[string]any{ //nolint:errcheck + "taskId": task.ID, + "percent": 0, + "message": "started", + })) + + result, err := t.c.teammatePlugin.ExecuteTask(ctx, task, reporter) + if err != nil { + t.c.ws.send(newEvent(EventTaskFail, t.c.cfg.SessionID, map[string]any{ //nolint:errcheck + "taskId": task.ID, + "errorMessage": err.Error(), + })) + return + } + + t.c.ws.send(newEvent(EventTaskComplete, t.c.cfg.SessionID, map[string]any{ //nolint:errcheck + "taskId": task.ID, + "result": result, + })) +} + +// handleExplore runs the explore request through ExplorePlugin and sends the result. +func (t *teammateAgent) handleExplore(evt CloudEvent) { + requestID, _ := evt.Payload["requestId"].(string) + fromMachineID, _ := evt.Payload["fromMachineId"].(string) + + // Re-marshal the payload into an ExploreRequest. + raw, _ := json.Marshal(evt.Payload) + var req ExploreRequest + if err := json.Unmarshal(raw, &req); err != nil { + req.RequestID = requestID + } + + result, err := t.c.explorePlugin.Explore(context.Background(), req) + if err != nil { + result = ExploreResult{ + RequestID: requestID, + Error: err.Error(), + } + } + result.RequestID = requestID + + t.c.ws.send(newEvent(EventExploreResult, t.c.cfg.SessionID, map[string]any{ //nolint:errcheck + "requestId": requestID, + "queryResults": result.QueryResults, + "fromMachineId": fromMachineID, + "error": result.Error, + })) +} + +// RequestApproval sends an approval.request to the leader via WebSocket. +// riskLevel should be "low", "medium", or "high". +func (t *teammateAgent) RequestApproval(toolName, description, riskLevel string, toolInput map[string]any) error { + return t.c.ws.send(newEvent(EventApprovalRequest, t.c.cfg.SessionID, map[string]any{ + "toolName": toolName, + "description": description, + "riskLevel": riskLevel, + "toolInput": toolInput, + })) +} + +// RegisterRepo registers a local repository with the session's affinity registry. +func (t *teammateAgent) RegisterRepo(remoteURL, localPath, branch string, dirty bool) error { + return t.c.doJSON("POST", + "/api/team/sessions/"+t.c.cfg.SessionID+"/repos", + map[string]any{ + "memberId": t.memberID, + "repoRemoteUrl": remoteURL, + "repoLocalPath": localPath, + "currentBranch": branch, + "hasUncommittedChanges": dirty, + "lastSyncedAt": time.Now().Format(time.RFC3339), + }, + nil, + ) +} + +// ─── wsProgressReporter ─────────────────────────────────────────────────── + +// wsProgressReporter implements ProgressReporter by sending task.progress +// events over the WebSocket connection. +type wsProgressReporter struct { + c *Client + taskID string +} + +func (r *wsProgressReporter) Report(pct int, message string) { + r.c.ws.send(newEvent(EventTaskProgress, r.c.cfg.SessionID, map[string]any{ //nolint:errcheck + "taskId": r.taskID, + "percent": pct, + "message": message, + })) +} diff --git a/client/go/team/types.go b/client/go/team/types.go new file mode 100644 index 0000000..33e7ce1 --- /dev/null +++ b/client/go/team/types.go @@ -0,0 +1,182 @@ +package team + +import "time" + +// CloudEvent is the unified event envelope — identical to the server definition. +type CloudEvent struct { + EventID string `json:"eventId"` + Type string `json:"type"` + SessionID string `json:"sessionId"` + Timestamp int64 `json:"timestamp"` + Payload map[string]any `json:"payload,omitempty"` +} + +// Task mirrors the server's TeamTask fields that the client cares about. +type Task struct { + ID string `json:"id"` + SessionID string `json:"sessionId"` + Description string `json:"description"` + RepoAffinity []string `json:"repoAffinity,omitempty"` + FileHints []string `json:"fileHints,omitempty"` + Dependencies []string `json:"dependencies,omitempty"` + AssignedMemberID *string `json:"assignedMemberId,omitempty"` + Status string `json:"status"` + Priority int `json:"priority"` + RetryCount int `json:"retryCount"` + MaxRetries int `json:"maxRetries"` + ErrorMessage string `json:"errorMessage,omitempty"` + CreatedAt time.Time `json:"createdAt"` + ClaimedAt *time.Time `json:"claimedAt,omitempty"` + StartedAt *time.Time `json:"startedAt,omitempty"` + CompletedAt *time.Time `json:"completedAt,omitempty"` +} + +// TaskSpec is what the LeaderPlugin returns — the plan for a single task. +// Set ID to a pre-generated UUID if you want dependency DAGs to work; +// the server will use the provided ID instead of generating a new one. +type TaskSpec struct { + ID string `json:"id,omitempty"` + Description string `json:"description"` + RepoAffinity []string `json:"repoAffinity,omitempty"` + FileHints []string `json:"fileHints,omitempty"` + Dependencies []string `json:"dependencies,omitempty"` // references IDs in the same batch + AssignedMemberID string `json:"assignedMemberId,omitempty"` + Priority int `json:"priority,omitempty"` + MaxRetries int `json:"maxRetries,omitempty"` +} + +// TaskResult is returned by TeammatePlugin.ExecuteTask on success. +type TaskResult struct { + Output string `json:"output,omitempty"` + Files []string `json:"files,omitempty"` + ExtraData map[string]any `json:"extraData,omitempty"` +} + +// ApprovalRequest is pushed to the leader when a teammate needs permission. +type ApprovalRequest struct { + ID string `json:"id"` + SessionID string `json:"sessionId"` + RequesterID string `json:"requesterId"` + ToolName string `json:"toolName"` + ToolInput map[string]any `json:"toolInput"` + Description string `json:"description,omitempty"` + RiskLevel string `json:"riskLevel"` + Status string `json:"status"` + CreatedAt time.Time `json:"createdAt"` +} + +// ExploreQuery represents a single code-intelligence query. +type ExploreQuery struct { + Type string `json:"type"` // file_tree | symbol_search | content_search | git_log | dependency_graph + Params map[string]any `json:"params"` +} + +// ExploreRequest is sent to the teammate that owns the target repo. +type ExploreRequest struct { + RequestID string `json:"requestId"` + SessionID string `json:"sessionId"` + FromMachineID string `json:"fromMachineId"` + Queries []ExploreQuery `json:"queries"` +} + +// ExploreQueryResult holds the output for one query in an ExploreRequest. +type ExploreQueryResult struct { + Type string `json:"type"` + Output string `json:"output"` + Truncated bool `json:"truncated"` +} + +// ExploreResult is returned by ExplorePlugin and sent back to the leader. +type ExploreResult struct { + RequestID string `json:"requestId"` + QueryResults []ExploreQueryResult `json:"queryResults"` + Error string `json:"error,omitempty"` +} + +// Member represents a session participant, used when planning tasks. +type Member struct { + ID string `json:"id"` + SessionID string `json:"sessionId"` + MachineID string `json:"machineId"` + MachineName string `json:"machineName,omitempty"` + Role string `json:"role"` + Status string `json:"status"` +} + +// PlanTasksInput is passed to LeaderPlugin.PlanTasks with full session context. +type PlanTasksInput struct { + Goal string + SessionID string + Members []Member // current session participants (for assignment decisions) +} + +// ─── Event type constants (Client → Cloud) ──────────────────────────────── + +const ( + EventSessionCreate = "session.create" + EventSessionJoin = "session.join" + EventTaskPlanSubmit = "task.plan.submit" + EventTaskClaim = "task.claim" + EventTaskProgress = "task.progress" + EventTaskComplete = "task.complete" + EventTaskFail = "task.fail" + EventApprovalRequest = "approval.request" + EventApprovalRespond = "approval.respond" + EventMessageSend = "message.send" + EventRepoRegister = "repo.register" + EventExploreRequest = "explore.request" + EventExploreResult = "explore.result" + EventLeaderElect = "leader.elect" + EventLeaderHeartbeat = "leader.heartbeat" +) + +// ─── Event type constants (Cloud → Client) ──────────────────────────────── + +const ( + EventTaskAssigned = "task.assigned" + EventApprovalPush = "approval.push" + EventApprovalResponse = "approval.response" + EventMessageReceive = "message.receive" + EventSessionUpdated = "session.updated" + EventTeammateStatus = "teammate.status" + EventLeaderElected = "leader.elected" + EventLeaderExpired = "leader.expired" + EventError = "error" +) + +// ─── Status constants ───────────────────────────────────────────────────── + +const ( + SessionStatusActive = "active" + SessionStatusPaused = "paused" + SessionStatusCompleted = "completed" + SessionStatusFailed = "failed" +) + +const ( + MemberStatusOnline = "online" + MemberStatusOffline = "offline" + MemberStatusBusy = "busy" +) + +const ( + MemberRoleLeader = "leader" + MemberRoleTeammate = "teammate" +) + +const ( + TaskStatusPending = "pending" + TaskStatusAssigned = "assigned" + TaskStatusClaimed = "claimed" + TaskStatusRunning = "running" + TaskStatusCompleted = "completed" + TaskStatusFailed = "failed" +) + +// ─── Internal timing constants ──────────────────────────────────────────── + +const ( + leaderHeartbeatSec = 10 + wsReconnectInitial = 1 // seconds + wsReconnectMax = 30 // seconds +) diff --git a/client/go/team/ws.go b/client/go/team/ws.go new file mode 100644 index 0000000..e470c91 --- /dev/null +++ b/client/go/team/ws.go @@ -0,0 +1,188 @@ +package team + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "strings" + "sync" + "time" + + "github.com/gorilla/websocket" +) + +const ( + wsPingInterval = 30 * time.Second + wsWriteWait = 10 * time.Second + wsPongWait = 60 * time.Second + wsSendCap = 256 +) + +// wsConn manages a single WebSocket connection with automatic reconnect. +// It exposes two channels: +// - inbound chan CloudEvent – events received from the server +// - outbound chan []byte – serialised events to be sent to the server +type wsConn struct { + cfg Config + + inbound chan CloudEvent + outbound chan []byte + + mu sync.Mutex + rawConn *websocket.Conn + + closed chan struct{} + once sync.Once +} + +func newWSConn(cfg Config) *wsConn { + return &wsConn{ + cfg: cfg, + inbound: make(chan CloudEvent, wsSendCap), + outbound: make(chan []byte, wsSendCap), + closed: make(chan struct{}), + } +} + +// run connects (and reconnects on error) until ctx is cancelled. +// Must be called in a goroutine. +func (w *wsConn) run(ctx context.Context) { + backoff := time.Duration(wsReconnectInitial) * time.Second + maxBackoff := time.Duration(wsReconnectMax) * time.Second + + for { + select { + case <-ctx.Done(): + return + case <-w.closed: + return + default: + } + + conn, _, err := websocket.DefaultDialer.DialContext(ctx, w.buildURL(), nil) + if err != nil { + select { + case <-ctx.Done(): + return + case <-w.closed: + return + case <-time.After(backoff): + if backoff < maxBackoff { + backoff *= 2 + if backoff > maxBackoff { + backoff = maxBackoff + } + } + continue + } + } + backoff = time.Duration(wsReconnectInitial) * time.Second + + w.mu.Lock() + w.rawConn = conn + w.mu.Unlock() + + writeDone := make(chan struct{}) + go w.writeLoop(ctx, conn, writeDone) + w.readLoop(ctx, conn) + close(writeDone) + + w.mu.Lock() + w.rawConn = nil + w.mu.Unlock() + } +} + +func (w *wsConn) readLoop(ctx context.Context, conn *websocket.Conn) { + conn.SetReadDeadline(time.Now().Add(wsPongWait)) //nolint:errcheck + conn.SetPongHandler(func(string) error { + conn.SetReadDeadline(time.Now().Add(wsPongWait)) //nolint:errcheck + return nil + }) + + for { + _, data, err := conn.ReadMessage() + if err != nil { + return + } + var evt CloudEvent + if json.Unmarshal(data, &evt) != nil { + continue + } + select { + case w.inbound <- evt: + case <-ctx.Done(): + return + } + } +} + +func (w *wsConn) writeLoop(ctx context.Context, conn *websocket.Conn, done <-chan struct{}) { + ticker := time.NewTicker(wsPingInterval) + defer ticker.Stop() + + for { + select { + case data := <-w.outbound: + conn.SetWriteDeadline(time.Now().Add(wsWriteWait)) //nolint:errcheck + if err := conn.WriteMessage(websocket.TextMessage, data); err != nil { + return + } + + case <-ticker.C: + conn.SetWriteDeadline(time.Now().Add(wsWriteWait)) //nolint:errcheck + if err := conn.WriteMessage(websocket.PingMessage, nil); err != nil { + return + } + + case <-done: + return + case <-ctx.Done(): + return + } + } +} + +// send enqueues an event for sending. Non-blocking; drops if channel is full. +func (w *wsConn) send(evt CloudEvent) error { + data, err := json.Marshal(evt) + if err != nil { + return fmt.Errorf("marshal event: %w", err) + } + select { + case w.outbound <- data: + return nil + default: + return fmt.Errorf("send channel full, event dropped (type=%s)", evt.Type) + } +} + +// close shuts down the connection cleanly. +func (w *wsConn) close() { + w.once.Do(func() { + close(w.closed) + w.mu.Lock() + if w.rawConn != nil { + w.rawConn.Close() + } + w.mu.Unlock() + }) +} + +// buildURL constructs the WebSocket URL from the Config: +// +// wss:///ws/sessions/?machineId=&token= +func (w *wsConn) buildURL() string { + base := w.cfg.ServerURL + base = strings.Replace(base, "https://", "wss://", 1) + base = strings.Replace(base, "http://", "ws://", 1) + base = strings.TrimRight(base, "/") + + q := url.Values{} + q.Set("machineId", w.cfg.MachineID) + if w.cfg.Token != "" { + q.Set("token", w.cfg.Token) + } + return fmt.Sprintf("%s/ws/sessions/%s?%s", base, w.cfg.SessionID, q.Encode()) +} diff --git a/client/ts/.gitignore b/client/ts/.gitignore new file mode 100644 index 0000000..5e9eee0 --- /dev/null +++ b/client/ts/.gitignore @@ -0,0 +1,3 @@ +node_modules/ +dist/ +*.js.map diff --git a/client/ts/e2e/README.md b/client/ts/e2e/README.md new file mode 100644 index 0000000..128dfad --- /dev/null +++ b/client/ts/e2e/README.md @@ -0,0 +1,285 @@ +# Cloud Team Agent E2E Tests + +端到端测试套件,用于验证 Cloud Team Agent SDK 的核心功能。 + +## 测试场景覆盖 + +| 测试场景 | 描述 | 文件 | +|---------|------|------| +| Basic Task Execution | 基础任务下发和执行 | `team-e2e.test.ts` | +| Task Dependency Chain | 任务依赖链执行顺序 | `team-e2e.test.ts` | +| Parallel Task Execution | 并行任务执行 | `team-e2e.test.ts` | +| Diamond Dependency Graph | 菱形依赖图 | `team-e2e.test.ts` | +| Approval Workflow | 审批流程测试 | `team-e2e.test.ts` | +| Multiple Teammates | 多 Teammate 协作 | `team-e2e.test.ts` | +| Task Retry | 任务失败重试 | `team-e2e.test.ts` | + +## 快速开始 + +### 1. 安装依赖 + +```bash +cd costrict-web/client/ts +npm install +``` + +### 2. 配置环境变量 + +```bash +export E2E_SERVER_URL="http://localhost:8080" +export E2E_TOKEN="your-jwt-token" +export E2E_SESSION_ID="optional-session-id" +``` + +### 3. 运行测试 + +```bash +# 运行所有 E2E 测试 +npm run test:e2e:ts + +# 或直接使用 ts-node +npx ts-node --project tsconfig.e2e.json e2e/team-e2e.test.ts +``` + +## 环境变量 + +| 变量名 | 必填 | 默认值 | 说明 | +|--------|------|--------|------| +| `E2E_SERVER_URL` | 否 | `http://localhost:8080` | Team 服务端点 | +| `E2E_TOKEN` | 是 | - | JWT 认证令牌 | +| `E2E_SESSION_ID` | 否 | 自动生成 | 测试会话 ID | +| `E2E_TIMEOUT` | 否 | `30000` | 测试超时时间(ms) | + +## 测试架构 + +``` +e2e/ +├── team-e2e.test.ts # 主测试文件(测试场景) +├── helpers.ts # 测试工具函数 +├── mocks.ts # Mock 插件实现 +└── README.md # 本文档 +``` + +### 核心组件 + +#### TaskExecutionCollector + +跟踪任务执行状态的工具类: + +```typescript +const collector = new TaskExecutionCollector(); + +// 获取执行统计 +console.log(collector.getCompletedCount()); // 完成的任务数 +console.log(collector.getFailedCount()); // 失败的任务数 +console.log(collector.getExecutionOrder()); // 执行顺序 + +// 打印报告 +collector.printReport(); +``` + +#### Mock Plugins + +**MockLeaderPlugin**: 预定义任务计划的 Leader 插件 + +```typescript +const leaderPlugin = new MockLeaderPlugin([ + { id: 'task-1', description: 'Task 1', priority: 9 }, + { id: 'task-2', description: 'Task 2', priority: 8, dependencies: ['task-1'] }, +]); +``` + +**MockTeammatePlugin**: 模拟任务执行的 Teammate 插件 + +```typescript +const collector = new TaskExecutionCollector(); +const teammatePlugin = new MockTeammatePlugin( + collector, + async (task, reporter) => { + reporter.report(50, 'executing'); + return { output: 'done' }; + }, + 100 // 执行延迟(ms) +); +``` + +**FailingTeammatePlugin**: 模拟任务失败(用于测试重试) + +```typescript +const failingPlugin = new FailingTeammatePlugin( + ['task-1', 'task-2'], // 会失败的任务ID + 2 // 失败2次后才成功 +); +``` + +#### TaskPlanBuilder + +流式 API 构建任务计划: + +```typescript +// 线性依赖链 +const linear = TaskPlanBuilder.linear('A', 'B', 'C'); +// A → B → C + +// 并行任务 +const parallel = TaskPlanBuilder.parallel(3, 'Task'); +// Task 1, Task 2, Task 3 (无依赖) + +// 自定义构建 +const custom = TaskPlanBuilder.create() + .addTask('First') + .addDependentTask('Second', 'First') + .build(); +``` + +## 编写新测试 + +### 基本模板 + +```typescript +async function testMyScenario(): Promise { + console.log('\n=== Test: My Scenario ==='); + + const sessionId = generateSessionId(); + const collector = new TaskExecutionCollector(); + + // 创建 Teammate + const teammateClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('teammate'), + role: MemberRoleTeammate, + }).withTeammatePlugin(new MockTeammatePlugin(collector)); + + // 创建 Leader + const taskSpecs = TaskPlanBuilder.linear('Step 1', 'Step 2', 'Step 3'); + + const leaderClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('leader'), + role: MemberRoleLeader, + }).withLeaderPlugin(new MockLeaderPlugin(taskSpecs)); + + try { + // 启动客户端 + await teammateClient.start(); + await delay(500); + await leaderClient.start(); + await delay(1000); + + // 提交计划 + await leaderClient.submitPlan('Test scenario'); + + // 等待完成 + await waitFor(() => collector.getCompletedCount() >= 3, 10000); + + // 验证结果 + console.log(' ✓ Test passed\n'); + } finally { + // 清理 + leaderClient.stop(); + teammateClient.stop(); + } +} +``` + +### 测试工具函数 + +| 函数 | 用途 | +|------|------| +| `generateSessionId()` | 生成唯一会话ID | +| `generateMachineId(prefix)` | 生成机器ID | +| `delay(ms)` | 异步延迟 | +| `waitFor(condition, timeout, interval)` | 等待条件满足 | +| `createTaskChain(descriptions)` | 创建依赖链 | +| `createParallelTasks(count)` | 创建并行任务 | +| `createDiamondGraph(...)` | 创建菱形依赖图 | + +## 调试技巧 + +### 1. 增加日志输出 + +```typescript +// 在测试中添加详细日志 +console.log(' Current executions:', collector.getExecutions()); +console.log(' Execution order:', collector.getExecutionOrder()); +``` + +### 2. 延长超时时间 + +```typescript +// 对于复杂的测试,增加超时 +await waitFor(() => collector.getCompletedCount() >= 10, 60000); +``` + +### 3. 单测试运行 + +注释掉其他测试,只运行特定测试: + +```typescript +const tests = [ + // { name: 'Other Test', fn: testOther }, + { name: 'My Test', fn: testMyScenario }, +]; +``` + +## CI/CD 集成 + +```yaml +# .github/workflows/e2e.yml +name: E2E Tests + +on: [push, pull_request] + +jobs: + e2e: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Setup Node.js + uses: actions/setup-node@v3 + with: + node-version: '20' + + - name: Install dependencies + run: cd costrict-web/client/ts && npm ci + + - name: Start test server + run: docker-compose up -d + + - name: Wait for server + run: sleep 10 + + - name: Run E2E tests + run: cd costrict-web/client/ts && npm run test:e2e:ts + env: + E2E_SERVER_URL: http://localhost:8080 + E2E_TOKEN: ${{ secrets.TEST_TOKEN }} +``` + +## 常见问题 + +### Q: 测试连接失败 +A: 检查服务端是否运行,以及 `E2E_TOKEN` 是否有效。 + +### Q: 任务未执行 +A: 确保 Teammate 在 Leader 提交计划前已启动并注册。 + +### Q: 依赖任务顺序错误 +A: 检查是否正确设置了 `dependencies` 数组,并确保任务 ID 正确。 + +### Q: 并行任务未并行执行 +A: 这是预期行为,实际并行度取决于服务端调度和 Teammate 数量。 + +## 扩展测试 + +如需添加更多测试场景: + +1. 在 `team-e2e.test.ts` 中添加新的测试函数 +2. 将新测试添加到 `tests` 数组 +3. 运行测试验证 +4. 更新本文档 diff --git a/client/ts/e2e/helpers.ts b/client/ts/e2e/helpers.ts new file mode 100644 index 0000000..252e49e --- /dev/null +++ b/client/ts/e2e/helpers.ts @@ -0,0 +1,304 @@ +/** + * e2e/helpers.ts — Test utilities for Cloud Team Agent E2E tests + */ + +import { v4 as uuidv4 } from 'uuid'; +import type { Task, TaskSpec, TaskResult, Member, CloudEvent } from '../src/types.js'; +import type { ProgressReporter } from '../src/plugin.js'; + +/** + * Generate a unique session ID for testing + */ +export function generateSessionId(): string { + return `test-session-${uuidv4().slice(0, 8)}`; +} + +/** + * Generate a unique machine ID for testing + */ +export function generateMachineId(prefix: string): string { + return `${prefix}-${uuidv4().slice(0, 8)}`; +} + +/** + * Create a simple task spec for testing + */ +export function createTaskSpec( + description: string, + overrides: Partial = {} +): TaskSpec { + return { + id: uuidv4(), + description, + priority: 5, + ...overrides, + }; +} + +/** + * Create a task dependency chain + */ +export function createTaskChain(descriptions: string[]): TaskSpec[] { + const tasks: TaskSpec[] = []; + let prevId: string | undefined; + + for (const description of descriptions) { + const task: TaskSpec = { + id: uuidv4(), + description, + priority: 10 - tasks.length, + dependencies: prevId ? [prevId] : undefined, + }; + tasks.push(task); + prevId = task.id; + } + + return tasks; +} + +/** + * Create parallel tasks (no dependencies) + */ +export function createParallelTasks(count: number, prefix = 'Task'): TaskSpec[] { + return Array.from({ length: count }, (_, i) => ({ + id: uuidv4(), + description: `${prefix} ${i + 1}`, + priority: 5, + })); +} + +/** + * Create a diamond-shaped dependency graph + * A + * / \ + * B C + * \ / + * D + */ +export function createDiamondGraph( + taskA: string, + taskB: string, + taskC: string, + taskD: string +): TaskSpec[] { + const idA = uuidv4(); + const idB = uuidv4(); + const idC = uuidv4(); + const idD = uuidv4(); + + return [ + { id: idA, description: taskA, priority: 10 }, + { id: idB, description: taskB, priority: 9, dependencies: [idA] }, + { id: idC, description: taskC, priority: 9, dependencies: [idA] }, + { id: idD, description: taskD, priority: 8, dependencies: [idB, idC] }, + ]; +} + +/** + * Mock member factory for testing + */ +export function createMockMember(overrides: Partial = {}): Member { + return { + id: uuidv4(), + sessionId: 'test-session', + machineId: generateMachineId('machine'), + machineName: 'Test Machine', + role: 'teammate', + status: 'online', + ...overrides, + }; +} + +/** + * Create multiple mock members + */ +export function createMockMembers(count: number, role: string = 'teammate'): Member[] { + return Array.from({ length: count }, (_, i) => + createMockMember({ + role, + machineName: `Test Machine ${i + 1}`, + }) + ); +} + +/** + * Delay function for async testing + */ +export function delay(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); +} + +/** + * Wait for a condition with timeout + */ +export async function waitFor( + condition: () => boolean | Promise, + timeoutMs: number = 5000, + intervalMs: number = 100 +): Promise { + const startTime = Date.now(); + while (Date.now() - startTime < timeoutMs) { + if (await condition()) { + return; + } + await delay(intervalMs); + } + throw new Error(`Timeout waiting for condition after ${timeoutMs}ms`); +} + +/** + * Collect all values from an async iterator + */ +export async function collectAsync( + iterator: AsyncIterable, + maxItems: number = 100 +): Promise { + const results: T[] = []; + for await (const item of iterator) { + results.push(item); + if (results.length >= maxItems) { + break; + } + } + return results; +} + +/** + * Test result collector for tracking task execution + */ +export class TaskExecutionCollector { + private executions: Array<{ + taskId: string; + description: string; + status: 'started' | 'completed' | 'failed'; + startTime: number; + endTime?: number; + result?: TaskResult; + error?: string; + }> = []; + + recordStart(taskId: string, description: string): void { + this.executions.push({ + taskId, + description, + status: 'started', + startTime: Date.now(), + }); + } + + recordComplete(taskId: string, result: TaskResult): void { + const exec = this.executions.find((e) => e.taskId === taskId); + if (exec) { + exec.status = 'completed'; + exec.endTime = Date.now(); + exec.result = result; + } + } + + recordFailure(taskId: string, error: string): void { + const exec = this.executions.find((e) => e.taskId === taskId); + if (exec) { + exec.status = 'failed'; + exec.endTime = Date.now(); + exec.error = error; + } + } + + getExecutions() { + return [...this.executions]; + } + + getCompletedCount(): number { + return this.executions.filter((e) => e.status === 'completed').length; + } + + getFailedCount(): number { + return this.executions.filter((e) => e.status === 'failed').length; + } + + getExecutionOrder(): string[] { + return this.executions + .filter((e) => e.status === 'completed') + .sort((a, b) => (a.endTime ?? 0) - (b.endTime ?? 0)) + .map((e) => e.description); + } + + printReport(): void { + console.log('\n=== Task Execution Report ==='); + console.log(`Total: ${this.executions.length}`); + console.log(`Completed: ${this.getCompletedCount()}`); + console.log(`Failed: ${this.getFailedCount()}`); + console.log('\nExecution Order:'); + this.getExecutionOrder().forEach((desc, i) => { + console.log(` ${i + 1}. ${desc}`); + }); + console.log('============================\n'); + } +} + +/** + * Environment variable helper + */ +export function getEnv(name: string, defaultValue?: string): string { + const value = process.env[name] ?? defaultValue; + if (value === undefined) { + throw new Error(`Environment variable ${name} is required`); + } + return value; +} + +/** + * Test configuration + */ +export interface TestConfig { + serverUrl: string; + token: string; + sessionId: string; + timeoutMs: number; +} + +export function loadTestConfig(): TestConfig { + return { + serverUrl: getEnv('E2E_SERVER_URL', 'http://localhost:8080'), + token: getEnv('E2E_TOKEN', ''), + sessionId: getEnv('E2E_SESSION_ID', generateSessionId()), + timeoutMs: parseInt(getEnv('E2E_TIMEOUT', '30000'), 10), + }; +} + +/** + * Create a new team session on the server and return its ID. + * Must be called before starting any TeamClient that uses the session. + */ +export async function createSession( + serverUrl: string, + token: string, + name?: string +): Promise { + const { TeamClient } = await import('../src/index.js'); + return TeamClient.createSession(serverUrl, token, name); +} + +/** + * Retry a function with exponential backoff + */ +export async function retry( + fn: () => Promise, + maxRetries: number = 3, + delayMs: number = 1000 +): Promise { + let lastError: Error; + + for (let i = 0; i < maxRetries; i++) { + try { + return await fn(); + } catch (error) { + lastError = error as Error; + if (i < maxRetries - 1) { + await delay(delayMs * Math.pow(2, i)); + } + } + } + + throw lastError!; +} diff --git a/client/ts/e2e/mocks.ts b/client/ts/e2e/mocks.ts new file mode 100644 index 0000000..05920b5 --- /dev/null +++ b/client/ts/e2e/mocks.ts @@ -0,0 +1,417 @@ +/** + * e2e/mocks.ts — Mock plugin implementations for E2E testing + */ + +import { v4 as uuidv4 } from 'uuid'; +import type { + ApprovalRequest, + ExploreQueryResult, + ExploreRequest, + ExploreResult, + Member, + PlanTasksInput, + Task, + TaskResult, + TaskSpec, +} from '../src/types.js'; +import type { + ApprovalPlugin, + ExplorePlugin, + LeaderPlugin, + ProgressReporter, + TeammatePlugin, +} from '../src/plugin.js'; +import { TaskExecutionCollector } from './helpers.js'; + +/** + * Mock Leader Plugin — Creates predefined task plans + */ +export class MockLeaderPlugin implements LeaderPlugin { + private taskSpecs: TaskSpec[]; + private onPlanCalled?: (input: PlanTasksInput) => void; + + constructor( + taskSpecs: TaskSpec[], + onPlanCalled?: (input: PlanTasksInput) => void + ) { + this.taskSpecs = taskSpecs; + this.onPlanCalled = onPlanCalled; + } + + async planTasks( + _signal: AbortSignal, + input: PlanTasksInput + ): Promise { + this.onPlanCalled?.(input); + + // Pick online teammates to assign tasks to (round-robin) + const teammates = input.members.filter((m) => m.role === 'teammate'); + + return this.taskSpecs.map((spec, i) => ({ + ...spec, + id: spec.id ?? uuidv4(), + assignedMemberId: teammates.length > 0 + ? teammates[i % teammates.length].id + : spec.assignedMemberId, + })); + } +} + +/** + * Dynamic Leader Plugin — Creates tasks based on goal + */ +export class DynamicLeaderPlugin implements LeaderPlugin { + private taskGenerator: (goal: string, members: Member[]) => TaskSpec[]; + private onPlanCalled?: (input: PlanTasksInput) => void; + + constructor( + taskGenerator: (goal: string, members: Member[]) => TaskSpec[], + onPlanCalled?: (input: PlanTasksInput) => void + ) { + this.taskGenerator = taskGenerator; + this.onPlanCalled = onPlanCalled; + } + + async planTasks( + _signal: AbortSignal, + input: PlanTasksInput + ): Promise { + this.onPlanCalled?.(input); + const tasks = this.taskGenerator(input.goal, input.members); + return tasks.map((spec) => ({ + ...spec, + id: spec.id ?? uuidv4(), + })); + } +} + +/** + * Mock Teammate Plugin — Executes tasks with configurable behavior + */ +export class MockTeammatePlugin implements TeammatePlugin { + private collector: TaskExecutionCollector; + private executeFn: ( + task: Task, + reporter: ProgressReporter + ) => Promise; + private delayMs: number; + + constructor( + collector: TaskExecutionCollector, + executeFn?: (task: Task, reporter: ProgressReporter) => Promise, + delayMs: number = 100 + ) { + this.collector = collector; + this.delayMs = delayMs; + this.executeFn = + executeFn ?? + (async (task, reporter) => { + reporter.report(10, 'preparing'); + await this.sleep(this.delayMs); + reporter.report(50, 'executing'); + await this.sleep(this.delayMs); + reporter.report(100, 'done'); + return { + output: `Executed: ${task.description}`, + files: [], + }; + }); + } + + async executeTask( + signal: AbortSignal, + task: Task, + reporter: ProgressReporter + ): Promise { + this.collector.recordStart(task.id, task.description); + + try { + const result = await this.executeFn(task, reporter); + this.collector.recordComplete(task.id, result); + return result; + } catch (error) { + const errorMsg = error instanceof Error ? error.message : String(error); + this.collector.recordFailure(task.id, errorMsg); + throw error; + } + } + + private sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); + } +} + +/** + * Failing Teammate Plugin — Simulates task failures for testing retry logic + */ +export class FailingTeammatePlugin implements TeammatePlugin { + private failTaskIds: Set; + private failCount: Map; + private maxFailCount: number; + + constructor( + failTaskIds: string[] = [], + maxFailCount: number = 1 // Fail N times before succeeding + ) { + this.failTaskIds = new Set(failTaskIds); + this.failCount = new Map(); + this.maxFailCount = maxFailCount; + } + + async executeTask( + _signal: AbortSignal, + task: Task, + reporter: ProgressReporter + ): Promise { + reporter.report(10, 'preparing'); + + if (this.failTaskIds.has(task.id) || this.failTaskIds.has('all')) { + const currentFails = this.failCount.get(task.id) ?? 0; + + if (currentFails < this.maxFailCount) { + this.failCount.set(task.id, currentFails + 1); + reporter.report(50, 'failing'); + throw new Error(`Simulated failure for task ${task.id}`); + } + } + + reporter.report(100, 'done'); + return { + output: `Successfully executed: ${task.description}`, + files: [], + }; + } +} + +/** + * Mock Approval Plugin — Auto-approves or rejects based on configuration + */ +export class MockApprovalPlugin implements ApprovalPlugin { + private autoApprove: boolean; + private shouldApproveFn?: (req: ApprovalRequest) => boolean; + private delayMs: number; + + constructor( + autoApprove: boolean = true, + shouldApproveFn?: (req: ApprovalRequest) => boolean, + delayMs: number = 100 + ) { + this.autoApprove = autoApprove; + this.shouldApproveFn = shouldApproveFn; + this.delayMs = delayMs; + } + + async handleApproval( + _signal: AbortSignal, + req: ApprovalRequest + ): Promise<{ approved: boolean; note?: string }> { + await this.sleep(this.delayMs); + + const approved = this.shouldApproveFn + ? this.shouldApproveFn(req) + : this.autoApprove; + + return { + approved, + note: approved ? 'Auto-approved by test' : 'Auto-rejected by test', + }; + } + + private sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); + } +} + +/** + * Mock Explore Plugin — Returns predefined or dynamic explore results + */ +export class MockExplorePlugin implements ExplorePlugin { + private results: Map; + private handler?: (query: ExploreQuery) => ExploreQueryResult; + + constructor( + results?: Map, + handler?: (query: ExploreQuery) => ExploreQueryResult + ) { + this.results = results ?? new Map(); + this.handler = handler; + } + + async explore( + _signal: AbortSignal, + req: ExploreRequest + ): Promise { + const queryResults: ExploreQueryResult[] = []; + + for (const query of req.queries) { + let result: ExploreQueryResult; + + if (this.handler) { + result = this.handler(query); + } else { + result = + this.results.get(query.type) ?? + this.createDefaultResult(query.type); + } + + queryResults.push(result); + } + + return { + requestId: req.requestId, + queryResults, + }; + } + + private createDefaultResult(type: string): ExploreQueryResult { + return { + type, + output: `Mock ${type} result`, + truncated: false, + }; + } +} + +/** + * Simple Explore Plugin — Returns file tree and search results from local FS + */ +export class SimpleExplorePlugin implements ExplorePlugin { + private basePath: string; + + constructor(basePath: string = process.cwd()) { + this.basePath = basePath; + } + + async explore( + _signal: AbortSignal, + req: ExploreRequest + ): Promise { + const queryResults: ExploreQueryResult[] = []; + + for (const query of req.queries) { + const result = await this.handleQuery(query); + queryResults.push(result); + } + + return { + requestId: req.requestId, + queryResults, + }; + } + + private async handleQuery(query: ExploreQuery): Promise { + switch (query.type) { + case 'file_tree': + return { + type: 'file_tree', + output: JSON.stringify({ path: this.basePath, files: ['mock'] }), + truncated: false, + }; + + case 'content_search': + return { + type: 'content_search', + output: `Search results for: ${query.params['pattern']}`, + truncated: false, + }; + + case 'symbol_search': + return { + type: 'symbol_search', + output: `Symbol: ${query.params['symbol']}`, + truncated: false, + }; + + case 'git_log': + return { + type: 'git_log', + output: 'abc123 Mock commit message', + truncated: false, + }; + + default: + return { + type: query.type, + output: 'Unknown query type', + truncated: false, + }; + } + } +} + +// Type for ExploreQuery since it's not exported directly +interface ExploreQuery { + type: string; + params: Record; +} + +/** + * Task Plan Builder — Fluent API for creating test task plans + */ +export class TaskPlanBuilder { + private tasks: TaskSpec[] = []; + + addTask(description: string, overrides: Partial = {}): this { + this.tasks.push({ + id: uuidv4(), + description, + priority: 5, + ...overrides, + }); + return this; + } + + addDependentTask( + description: string, + dependsOn: string | string[], + overrides: Partial = {} + ): this { + const dependencies = Array.isArray(dependsOn) ? dependsOn : [dependsOn]; + this.tasks.push({ + id: uuidv4(), + description, + priority: 5, + dependencies, + ...overrides, + }); + return this; + } + + build(): TaskSpec[] { + return [...this.tasks]; + } + + static create(): TaskPlanBuilder { + return new TaskPlanBuilder(); + } + + static linear(...descriptions: string[]): TaskSpec[] { + const builder = new TaskPlanBuilder(); + let prevId: string | undefined; + + for (const desc of descriptions) { + const task: TaskSpec = { + id: uuidv4(), + description: desc, + priority: 10 - builder.tasks.length, + }; + + if (prevId) { + task.dependencies = [prevId]; + } + + builder.tasks.push(task); + prevId = task.id; + } + + return builder.build(); + } + + static parallel(count: number, prefix = 'Task'): TaskSpec[] { + return Array.from({ length: count }, (_, i) => ({ + id: uuidv4(), + description: `${prefix} ${i + 1}`, + priority: 5, + })); + } +} diff --git a/client/ts/e2e/team-e2e.test.ts b/client/ts/e2e/team-e2e.test.ts new file mode 100644 index 0000000..7fe40ac --- /dev/null +++ b/client/ts/e2e/team-e2e.test.ts @@ -0,0 +1,564 @@ +/** + * e2e/team-e2e.test.ts — End-to-end tests for Cloud Team Agent SDK + * + * Run with: npx ts-node --project tsconfig.e2e.json e2e/team-e2e.test.ts + * Or: npm run test:e2e:ts + * + * Required environment variables: + * E2E_SERVER_URL — Team server URL (default: http://localhost:8080) + * E2E_TOKEN — JWT authentication token + * E2E_SESSION_ID — Optional session ID (auto-generated if not set) + */ + +import { v4 as uuidv4 } from 'uuid'; +import { TeamClient, MemberRoleLeader, MemberRoleTeammate } from '../src/index.js'; +import type { TaskSpec, TaskResult, Member } from '../src/types.js'; +import { + createSession, + generateMachineId, + loadTestConfig, + delay, + waitFor, + TaskExecutionCollector, + createTaskChain, + createParallelTasks, + createDiamondGraph, +} from './helpers.js'; +import { + MockLeaderPlugin, + MockTeammatePlugin, + MockApprovalPlugin, + FailingTeammatePlugin, + TaskPlanBuilder, +} from './mocks.js'; + +// Test configuration +const config = loadTestConfig(); +const TEST_TIMEOUT = 60000; // 60 seconds + +/** + * Test Scenario 1: Basic Task Execution + * Leader creates a simple task plan, Teammate executes it + */ +async function testBasicTaskExecution(): Promise { + console.log('\n=== Test: Basic Task Execution ==='); + + const sessionId = await createSession(config.serverUrl, config.token); + const collector = new TaskExecutionCollector(); + + // Create Teammate + const teammateClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('teammate'), + machineName: 'Test Teammate', + role: MemberRoleTeammate, + }).withTeammatePlugin(new MockTeammatePlugin(collector)); + + // Create Leader with simple task plan + const taskSpecs: TaskSpec[] = [ + { id: uuidv4(), description: 'Echo Hello', priority: 9 }, + { id: uuidv4(), description: 'Echo World', priority: 8 }, + ]; + + const leaderClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('leader'), + machineName: 'Test Leader', + role: MemberRoleLeader, + }).withLeaderPlugin(new MockLeaderPlugin(taskSpecs)); + + try { + // Start Teammate first + await teammateClient.start(); + await delay(500); // Give teammate time to register + + // Start Leader and submit plan + await leaderClient.start(); + await delay(1000); + + await leaderClient.submitPlan('Basic test plan'); + + // Wait for tasks to complete + await waitFor(() => collector.getCompletedCount() >= 2, 10000, 500); + + // Verify results + const executions = collector.getExecutions(); + console.log(` ✓ Completed ${executions.length} tasks`); + + if (executions.length !== 2) { + throw new Error(`Expected 2 tasks, got ${executions.length}`); + } + + console.log(' ✓ Basic task execution passed\n'); + } finally { + leaderClient.stop(); + teammateClient.stop(); + } +} + +/** + * Test Scenario 2: Task Dependency Chain + * Tasks execute in order due to dependencies + */ +async function testTaskDependencyChain(): Promise { + console.log('\n=== Test: Task Dependency Chain ==='); + + const sessionId = await createSession(config.serverUrl, config.token); + const collector = new TaskExecutionCollector(); + + // Create Teammate + const teammateClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('teammate'), + role: MemberRoleTeammate, + }).withTeammatePlugin(new MockTeammatePlugin(collector, async (task, reporter) => { + reporter.report(10, 'preparing'); + await delay(200); + reporter.report(100, 'done'); + return { output: `Executed: ${task.description}` }; + })); + + // Create dependent task chain: A → B → C + const taskSpecs = createTaskChain(['Task A', 'Task B', 'Task C']); + + const leaderClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('leader'), + role: MemberRoleLeader, + }).withLeaderPlugin(new MockLeaderPlugin(taskSpecs)); + + try { + await teammateClient.start(); + await delay(500); + + await leaderClient.start(); + await delay(1000); + + await leaderClient.submitPlan('Dependency chain test'); + + // Wait for all tasks + await waitFor(() => collector.getCompletedCount() >= 3, 15000, 500); + + // Verify execution order + const order = collector.getExecutionOrder(); + console.log(` Execution order: ${order.join(' → ')}`); + + if (order[0] !== 'Task A' || order[1] !== 'Task B' || order[2] !== 'Task C') { + throw new Error(`Tasks executed out of order: ${order.join(', ')}`); + } + + console.log(' ✓ Dependency chain execution passed\n'); + } finally { + leaderClient.stop(); + teammateClient.stop(); + } +} + +/** + * Test Scenario 3: Parallel Task Execution + * Multiple independent tasks execute concurrently + */ +async function testParallelTaskExecution(): Promise { + console.log('\n=== Test: Parallel Task Execution ==='); + + const sessionId = await createSession(config.serverUrl, config.token); + const collector = new TaskExecutionCollector(); + const startTimes = new Map(); + + // Create Teammate with delay to detect parallelism + const teammateClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('teammate'), + role: MemberRoleTeammate, + }).withTeammatePlugin(new MockTeammatePlugin(collector, async (task, reporter) => { + startTimes.set(task.id, Date.now()); + reporter.report(50, 'executing'); + await delay(500); // Fixed delay for each task + reporter.report(100, 'done'); + return { output: `Executed: ${task.description}` }; + })); + + // Create 3 parallel tasks + const taskSpecs = createParallelTasks(3, 'Parallel Task'); + + const leaderClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('leader'), + role: MemberRoleLeader, + }).withLeaderPlugin(new MockLeaderPlugin(taskSpecs)); + + try { + await teammateClient.start(); + await delay(500); + + await leaderClient.start(); + await delay(1000); + + const submitStart = Date.now(); + await leaderClient.submitPlan('Parallel execution test'); + + // Wait for all tasks + await waitFor(() => collector.getCompletedCount() >= 3, 15000, 500); + const totalTime = Date.now() - submitStart; + + // If truly parallel, total time should be ~500ms, not ~1500ms + console.log(` Total execution time: ${totalTime}ms`); + + if (totalTime > 1200) { + console.log(' ⚠ Tasks may not be executing in parallel (expected < 1200ms)'); + } else { + console.log(' ✓ Parallel execution confirmed'); + } + + console.log(' ✓ Parallel task execution passed\n'); + } finally { + leaderClient.stop(); + teammateClient.stop(); + } +} + +/** + * Test Scenario 4: Diamond Dependency Graph + * A + * / \ + * B C + * \ / + * D + */ +async function testDiamondDependencyGraph(): Promise { + console.log('\n=== Test: Diamond Dependency Graph ==='); + + const sessionId = await createSession(config.serverUrl, config.token); + const collector = new TaskExecutionCollector(); + + const teammateClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('teammate'), + role: MemberRoleTeammate, + }).withTeammatePlugin(new MockTeammatePlugin(collector)); + + // Create diamond graph + const taskSpecs = createDiamondGraph( + 'Diamond Task A', + 'Diamond Task B', + 'Diamond Task C', + 'Diamond Task D' + ); + + const leaderClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('leader'), + role: MemberRoleLeader, + }).withLeaderPlugin(new MockLeaderPlugin(taskSpecs)); + + try { + await teammateClient.start(); + await delay(500); + + await leaderClient.start(); + await delay(1000); + + await leaderClient.submitPlan('Diamond graph test'); + + // Wait for all tasks + await waitFor(() => collector.getCompletedCount() >= 4, 15000, 500); + + const order = collector.getExecutionOrder(); + console.log(` Execution order: ${order.join(' → ')}`); + + // Verify A is first, D is last + if (order[0] !== 'Diamond Task A') { + throw new Error('Task A should be first'); + } + if (order[order.length - 1] !== 'Diamond Task D') { + throw new Error('Task D should be last'); + } + + console.log(' ✓ Diamond dependency graph passed\n'); + } finally { + leaderClient.stop(); + teammateClient.stop(); + } +} + +/** +/** + * Test Scenario 5: Approval Workflow + * Teammate requests approval, Leader approves + */ +async function testApprovalWorkflow(): Promise { + console.log('\n=== Test: Approval Workflow ==='); + + const sessionId = await createSession(config.serverUrl, config.token); + let approvalRequested = false; + + // Teammate that requests approval + const teammateClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('teammate'), + role: MemberRoleTeammate, + }).withTeammatePlugin({ + async executeTask(_signal, task, reporter) { + reporter.report(50, 'needs approval'); + approvalRequested = true; + + // In real scenario, approval would be requested automatically via tool interception + // Here we simulate the approval flow + reporter.report(100, 'done'); + return { output: 'Approved and executed' }; + }, + }); + + // Leader with auto-approval + const leaderClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('leader'), + role: MemberRoleLeader, + }) + .withLeaderPlugin(new MockLeaderPlugin([ + { id: uuidv4(), description: 'Test Approval Task', priority: 9 }, + ])) + .withApprovalPlugin(new MockApprovalPlugin(true)); + + try { + await teammateClient.start(); + await delay(500); + + await leaderClient.start(); + await delay(1000); + + await leaderClient.submitPlan('Approval workflow test'); + + // Wait and verify + await delay(3000); + + console.log(' ✓ Approval workflow test completed\n'); + } finally { + leaderClient.stop(); + teammateClient.stop(); + } +} + +/** + * Test Scenario 6: Multiple Teammates + * One Leader, multiple Teammates + */ +async function testMultipleTeammates(): Promise { + console.log('\n=== Test: Multiple Teammates ==='); + + const sessionId = await createSession(config.serverUrl, config.token); + const collectors: TaskExecutionCollector[] = []; + const clients: TeamClient[] = []; + + // Create 2 Teammates + for (let i = 0; i < 2; i++) { + const collector = new TaskExecutionCollector(); + collectors.push(collector); + + const client = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId(`teammate-${i + 1}`), + machineName: `Teammate ${i + 1}`, + role: MemberRoleTeammate, + }).withTeammatePlugin(new MockTeammatePlugin(collector)); + + clients.push(client); + await client.start(); + } + + await delay(1000); + + // Create Leader with 4 tasks + const taskSpecs = createParallelTasks(4, 'Multi-Teammate Task'); + + const leaderClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('leader'), + role: MemberRoleLeader, + }).withLeaderPlugin(new MockLeaderPlugin(taskSpecs)); + + try { + await leaderClient.start(); + await delay(1000); + + await leaderClient.submitPlan('Multiple teammates test'); + + // Wait for all tasks + const totalCompleted = () => + collectors.reduce((sum, c) => sum + c.getCompletedCount(), 0); + + await waitFor(() => totalCompleted() >= 4, 15000, 500); + + // Show distribution + collectors.forEach((c, i) => { + console.log(` Teammate ${i + 1} executed ${c.getCompletedCount()} tasks`); + }); + + console.log(' ✓ Multiple teammates test passed\n'); + } finally { + leaderClient.stop(); + clients.forEach((c) => c.stop()); + } +} + +/** + * Test Scenario 7: Task Retry on Failure + */ +async function testTaskRetry(): Promise { + console.log('\n=== Test: Task Retry on Failure ==='); + + const sessionId = await createSession(config.serverUrl, config.token); + const collector = new TaskExecutionCollector(); + let attemptCount = 0; + + const teammateClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('teammate'), + role: MemberRoleTeammate, + }).withTeammatePlugin(new MockTeammatePlugin(collector, async (task, reporter) => { + attemptCount++; + reporter.report(50, 'executing'); + + if (attemptCount <= 2) { + // Fail first 2 attempts + throw new Error(`Simulated failure (attempt ${attemptCount})`); + } + + reporter.report(100, 'done'); + return { output: `Succeeded after ${attemptCount} attempts` }; + })); + + const leaderClient = new TeamClient({ + serverUrl: config.serverUrl, + token: config.token, + sessionId, + machineId: generateMachineId('leader'), + role: MemberRoleLeader, + }).withLeaderPlugin( + new MockLeaderPlugin([ + { id: uuidv4(), description: 'Retry Test Task', priority: 9, maxRetries: 3 }, + ]) + ); + + try { + await teammateClient.start(); + await delay(500); + + await leaderClient.start(); + await delay(1000); + + await leaderClient.submitPlan('Retry test'); + + // Wait for completion + await waitFor(() => collector.getCompletedCount() >= 1, 15000, 500); + + console.log(` Task succeeded after ${attemptCount} attempts`); + console.log(' ✓ Retry test passed\n'); + } finally { + leaderClient.stop(); + teammateClient.stop(); + } +} + +/** + * Run all tests + */ +async function runAllTests(): Promise { + console.log('\n╔════════════════════════════════════════════════════════════╗'); + console.log('║ Cloud Team Agent SDK - End-to-End Tests ║'); + console.log('╚════════════════════════════════════════════════════════════╝'); + console.log(`Server: ${config.serverUrl}`); + console.log(`Session: ${config.sessionId}`); + console.log(`Timeout: ${TEST_TIMEOUT}ms\n`); + + const results: Array<{ name: string; passed: boolean; error?: string }> = []; + + const tests = [ + { name: 'Basic Task Execution', fn: testBasicTaskExecution }, + { name: 'Task Dependency Chain', fn: testTaskDependencyChain }, + { name: 'Parallel Task Execution', fn: testParallelTaskExecution }, + { name: 'Diamond Dependency Graph', fn: testDiamondDependencyGraph }, + { name: 'Approval Workflow', fn: testApprovalWorkflow }, + { name: 'Multiple Teammates', fn: testMultipleTeammates }, + { name: 'Task Retry', fn: testTaskRetry }, + ]; + + for (const test of tests) { + try { + await test.fn(); + results.push({ name: test.name, passed: true }); + } catch (error) { + const errorMsg = error instanceof Error ? error.message : String(error); + console.error(` ✗ ${test.name} failed: ${errorMsg}`); + results.push({ name: test.name, passed: false, error: errorMsg }); + } + } + + // Print summary + console.log('\n╔════════════════════════════════════════════════════════════╗'); + console.log('║ Test Summary ║'); + console.log('╚════════════════════════════════════════════════════════════╝'); + + const passed = results.filter((r) => r.passed).length; + const failed = results.filter((r) => !r.passed).length; + + results.forEach((r) => { + const status = r.passed ? '✓ PASS' : '✗ FAIL'; + console.log(` ${status}: ${r.name}`); + if (r.error) { + console.log(` ${r.error}`); + } + }); + + console.log(`\nTotal: ${results.length} | Passed: ${passed} | Failed: ${failed}`); + + if (failed > 0) { + process.exit(1); + } +} + +// Run tests if executed directly +const isMainModule = import.meta.url.startsWith('file://') && process.argv[1] && import.meta.url.includes(process.argv[1]); +if (isMainModule) { + runAllTests().catch((error) => { + console.error('Test runner failed:', error); + process.exit(1); + }); +} + +export { + testBasicTaskExecution, + testTaskDependencyChain, + testParallelTaskExecution, + testDiamondDependencyGraph, + testApprovalWorkflow, + testMultipleTeammates, + testTaskRetry, + runAllTests, +}; diff --git a/client/ts/examples/leader.ts b/client/ts/examples/leader.ts new file mode 100644 index 0000000..fa7fa9c --- /dev/null +++ b/client/ts/examples/leader.ts @@ -0,0 +1,238 @@ +/** + * examples/leader.ts — TypeScript leader example. + * + * This example demonstrates: + * - Building a TeamClient in the leader role + * - Implementing LeaderPlugin with a simple multi-task dependency DAG + * - Implementing ApprovalPlugin with a stdin y/n prompt + * - Submitting a plan after connecting + * - Polling session progress + * + * Run: + * npx ts-node examples/leader.ts + * + * Environment variables (or edit the constants below): + * TEAM_SERVER_URL — e.g. https://api.example.com + * TEAM_TOKEN — JWT bearer token + * TEAM_SESSION_ID — UUID of an existing session + * TEAM_MACHINE_ID — stable machine identifier + * TEAM_GOAL — free-text goal to plan (default: "refactor the auth module") + */ + +import * as readline from 'readline'; +import { v4 as uuidv4 } from 'uuid'; + +import { + TeamClient, + MemberRoleLeader, + MemberStatusOnline, + MemberRoleTeammate, +} from '../src/index.js'; + +import type { + ApprovalPlugin, + ApprovalRequest, + LeaderPlugin, + Member, + PlanTasksInput, + TaskSpec, +} from '../src/index.js'; + +// ─── Config ─────────────────────────────────────────────────────────────── + +const SERVER_URL = process.env['TEAM_SERVER_URL'] ?? 'http://localhost:8080'; +const TOKEN = process.env['TEAM_TOKEN'] ?? ''; +const SESSION_ID = process.env['TEAM_SESSION_ID'] ?? ''; +const MACHINE_ID = process.env['TEAM_MACHINE_ID'] ?? `leader-${process.pid}`; +const GOAL = + process.env['TEAM_GOAL'] ?? 'refactor the authentication module'; + +// ─── LeaderPlugin implementation ────────────────────────────────────────── + +/** + * SimplePlanner creates a 3-task DAG: analyse → implement → verify. + * + * In production, replace planTasks with an LLM call that decomposes the goal + * into a richer task graph. Pre-assigning `id` values is required when tasks + * reference each other via `dependencies`. + */ +class SimplePlanner implements LeaderPlugin { + async planTasks(_signal: AbortSignal, req: PlanTasksInput): Promise { + console.log( + `[leader] planning tasks for goal: "${req.goal}" (${req.members.length} members online)` + ); + + // Pick the first online teammate (if any) for assignment. + const assignee = pickTeammate(req.members); + if (assignee) { + console.log(`[leader] assigning tasks to ${assignee.machineName ?? assignee.machineId}`); + } else { + console.log('[leader] no online teammates — tasks will remain unassigned'); + } + + // Pre-assign UUIDs so dependency references in the same batch are stable. + const idAnalyse = uuidv4(); + const idImplement = uuidv4(); + const idVerify = uuidv4(); + + return [ + { + id: idAnalyse, + description: `Analyse codebase — ${req.goal}`, + priority: 9, + assignedMemberId: assignee?.id, + repoAffinity: [], // set to ['https://github.com/org/repo'] to target a specific repo + fileHints: [], // set to file paths for focused execution + }, + { + id: idImplement, + description: `Implement changes — ${req.goal}`, + priority: 8, + dependencies: [idAnalyse], // ← only starts after analyse completes + assignedMemberId: assignee?.id, + }, + { + id: idVerify, + description: `Verify and test — ${req.goal}`, + priority: 7, + dependencies: [idImplement], // ← only starts after implement completes + assignedMemberId: assignee?.id, + }, + ]; + } +} + +// ─── ApprovalPlugin implementation ──────────────────────────────────────── + +/** + * StdinApprover displays the approval request on stdout and reads y/n from stdin. + * + * In production, replace handleApproval with a GUI dialog, Slack message, + * web notification, or any other mechanism that surfaces the decision to a human. + */ +class StdinApprover implements ApprovalPlugin { + async handleApproval( + _signal: AbortSignal, + req: ApprovalRequest + ): Promise<{ approved: boolean; note?: string }> { + console.log('\n[APPROVAL REQUEST] ─────────────────────────────────'); + console.log(` Tool: ${req.toolName}`); + console.log(` Risk level: ${req.riskLevel}`); + console.log(` Description: ${req.description}`); + if (req.toolInput && Object.keys(req.toolInput).length) { + console.log(` Input: ${JSON.stringify(req.toolInput)}`); + } + console.log('─────────────────────────────────────────────────────'); + + const answer = await prompt(' Approve? [y/N]: '); + const approved = answer.trim().toLowerCase() === 'y'; + console.log(` → ${approved ? 'Approved' : 'Rejected'}\n`); + return { approved }; + } +} + +// ─── Main ───────────────────────────────────────────────────────────────── + +async function main(): Promise { + if (!TOKEN || !SESSION_ID) { + console.error( + 'Set TEAM_TOKEN and TEAM_SESSION_ID environment variables before running.' + ); + process.exit(1); + } + + const ac = new AbortController(); + process.on('SIGINT', () => { + console.log('\n[leader] shutting down...'); + ac.abort(); + }); + + const client = new TeamClient({ + serverUrl: SERVER_URL, + token: TOKEN, + sessionId: SESSION_ID, + machineId: MACHINE_ID, + machineName: `Leader (${MACHINE_ID})`, + role: MemberRoleLeader, + }) + .withLeaderPlugin(new SimplePlanner()) + .withApprovalPlugin(new StdinApprover()); + + console.log(`[leader] connecting to ${SERVER_URL} (session=${SESSION_ID})`); + + // Submit the plan a couple of seconds after the WS connection is ready. + setTimeout(async () => { + if (ac.signal.aborted) return; + console.log(`[leader] submitting plan: "${GOAL}"`); + try { + await client.submitPlan(GOAL, ac.signal); + console.log('[leader] plan submitted — waiting for task events...'); + } catch (err) { + console.error('[leader] plan submission failed:', err); + } + }, 2500); + + // Poll progress every 10 s. + const progressInterval = setInterval(async () => { + if (ac.signal.aborted) { + clearInterval(progressInterval); + return; + } + try { + const prog = await client.doJSON( + 'GET', + `/api/team/sessions/${SESSION_ID}/progress` + ); + console.log( + `[leader] progress — total: ${prog.totalTasks}, ` + + `completed: ${prog.completedTasks}, ` + + `running: ${prog.runningTasks}, ` + + `failed: ${prog.failedTasks}, ` + + `pending: ${prog.pendingTasks}` + ); + } catch { + // swallow transient errors + } + }, 10_000); + + try { + await client.start(ac.signal); + } finally { + clearInterval(progressInterval); + } +} + +// ─── Helpers ────────────────────────────────────────────────────────────── + +function pickTeammate(members: Member[]): Member | undefined { + return members.find( + (m) => m.role === MemberRoleTeammate && m.status === MemberStatusOnline + ); +} + +function prompt(question: string): Promise { + const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout, + terminal: false, + }); + return new Promise((resolve) => { + rl.question(question, (answer) => { + rl.close(); + resolve(answer); + }); + }); +} + +interface SessionProgress { + totalTasks: number; + completedTasks: number; + failedTasks: number; + runningTasks: number; + pendingTasks: number; +} + +main().catch((err) => { + console.error('[leader] fatal error:', err); + process.exit(1); +}); diff --git a/client/ts/examples/teammate.ts b/client/ts/examples/teammate.ts new file mode 100644 index 0000000..b5a165a --- /dev/null +++ b/client/ts/examples/teammate.ts @@ -0,0 +1,332 @@ +/** + * examples/teammate.ts — TypeScript teammate example. + * + * This example demonstrates: + * - Building a TeamClient in the teammate role + * - Implementing TeammatePlugin to execute tasks via child_process + * - Implementing ExplorePlugin to answer remote file/code queries + * - Implementing ApprovalPlugin for user confirmation + * - Registering local repos with the affinity registry + * + * Run: + * npx ts-node examples/teammate.ts + * + * Environment variables (or edit the constants below): + * TEAM_SERVER_URL — e.g. https://api.example.com + * TEAM_TOKEN — JWT bearer token + * TEAM_SESSION_ID — UUID of an existing session + * TEAM_MACHINE_ID — stable machine identifier + * TEAM_REPO_URL — remote URL of the local git repo (optional) + * TEAM_REPO_PATH — local path to the repo (optional) + */ + +import { execSync, exec as execCb } from 'child_process'; +import * as path from 'path'; +import * as readline from 'readline'; +import { promisify } from 'util'; + +import { + TeamClient, + MemberRoleTeammate, +} from '../src/index.js'; + +import type { + ApprovalPlugin, + ApprovalRequest, + ExplorePlugin, + ExploreQueryResult, + ExploreRequest, + ExploreResult, + ProgressReporter, + Task, + TaskResult, + TeammatePlugin, +} from '../src/index.js'; + +const exec = promisify(execCb); + +// ─── Config ─────────────────────────────────────────────────────────────── + +const SERVER_URL = process.env['TEAM_SERVER_URL'] ?? 'http://localhost:8080'; +const TOKEN = process.env['TEAM_TOKEN'] ?? ''; +const SESSION_ID = process.env['TEAM_SESSION_ID'] ?? ''; +const MACHINE_ID = process.env['TEAM_MACHINE_ID'] ?? `teammate-${process.pid}`; +const REPO_URL = process.env['TEAM_REPO_URL'] ?? ''; +const REPO_PATH = process.env['TEAM_REPO_PATH'] ?? process.cwd(); + +// ─── TeammatePlugin implementation ──────────────────────────────────────── + +/** + * ShellExecutor interprets the task description as a shell command. + * + * In production, replace executeTask with an AI agent call that: + * 1. Parses the task description into actionable steps + * 2. Uses task.fileHints to scope operations to specific files + * 3. Calls your LLM / code editor / tool chain + * 4. Streams progress via reporter.report() + */ +class ShellExecutor implements TeammatePlugin { + async executeTask( + signal: AbortSignal, + task: Task, + reporter: ProgressReporter + ): Promise { + console.log(`[teammate] executing task ${task.id.slice(0, 8)}: "${task.description}"`); + reporter.report(10, 'preparing'); + + // Abort support — create a child-process abort mechanism. + const ac = new AbortController(); + const onAbort = () => ac.abort(); + signal.addEventListener('abort', onAbort, { once: true }); + + try { + reporter.report(30, 'running'); + const { stdout, stderr } = await exec(task.description, { + cwd: REPO_PATH, + signal: ac.signal, + timeout: 5 * 60 * 1000, // 5 minute max per task + maxBuffer: 4 * 1024 * 1024, + }); + + const output = [stdout, stderr].filter(Boolean).join('\n'); + reporter.report(100, 'done'); + console.log(`[teammate] task ${task.id.slice(0, 8)} completed`); + return { output, extraData: { exitCode: 0 } }; + } catch (err: unknown) { + const e = err as { code?: number; stdout?: string; stderr?: string }; + const output = [e.stdout, e.stderr].filter(Boolean).join('\n'); + throw new Error(`Exit ${e.code ?? 1}: ${output}`); + } finally { + signal.removeEventListener('abort', onAbort); + } + } +} + +// ─── ExplorePlugin implementation ───────────────────────────────────────── + +/** + * LocalExplorer handles remote code-intelligence queries using shell tools. + * All operations are read-only and scoped to the local filesystem. + */ +class LocalExplorer implements ExplorePlugin { + async explore(_signal: AbortSignal, req: ExploreRequest): Promise { + console.log( + `[teammate] handling explore request ${req.requestId.slice(0, 8)} (${req.queries.length} queries)` + ); + + const queryResults: ExploreQueryResult[] = []; + + for (const q of req.queries) { + const result: ExploreQueryResult = { type: q.type, output: '', truncated: false }; + + try { + switch (q.type) { + case 'file_tree': { + const dir = str(q.params['path'], REPO_PATH); + const { stdout } = await exec( + `find "${dir}" -type f -not -path "*/.*" -not -path "*/node_modules/*" -not -path "*/vendor/*"`, + { timeout: 10_000 } + ); + result.output = truncate(stdout, 8192); + result.truncated = stdout.length > 8192; + break; + } + + case 'content_search': { + const pattern = str(q.params['pattern'], ''); + const dir = str(q.params['dir'], REPO_PATH); + const fileGlob = str(q.params['fileGlob'], ''); + if (!pattern) { result.output = 'error: pattern required'; break; } + // Prefer ripgrep, fall back to grep. + const cmd = hasCommand('rg') + ? `rg --no-heading -n -m 50 ${fileGlob ? `--glob "${fileGlob}"` : ''} "${pattern}" "${dir}"` + : `grep -rn "${pattern}" "${dir}"`; + const { stdout } = await exec(cmd, { timeout: 15_000 }).catch(() => ({ stdout: '' })); + result.output = truncate(stdout, 8192); + result.truncated = stdout.length > 8192; + break; + } + + case 'symbol_search': { + const symbol = str(q.params['symbol'], ''); + const dir = str(q.params['dir'], REPO_PATH); + if (!symbol) { result.output = 'error: symbol required'; break; } + const cmd = hasCommand('rg') + ? `rg --no-heading -n -w "${symbol}" "${dir}"` + : `grep -rn "\\b${symbol}\\b" "${dir}"`; + const { stdout } = await exec(cmd, { timeout: 15_000 }).catch(() => ({ stdout: '' })); + result.output = truncate(stdout, 8192); + break; + } + + case 'git_log': { + const dir = str(q.params['dir'], REPO_PATH); + const n = num(q.params['n'], 20); + const { stdout } = await exec( + `git -C "${dir}" log --oneline -${n}`, + { timeout: 10_000 } + ); + result.output = stdout; + break; + } + + case 'dependency_graph': { + const entry = str(q.params['entry'], '.'); + const ext = path.extname(entry); + let cmd: string; + if (ext === '.ts' || ext === '.js') { + cmd = `node -e "console.log(JSON.stringify(Object.keys(require.resolve.paths('${entry}'))))"`; + } else { + // Go module graph + cmd = `go list -deps ${entry}`; + } + const { stdout } = await exec(cmd, { + cwd: REPO_PATH, + timeout: 30_000, + }).catch((e: Error) => ({ stdout: `error: ${e.message}` })); + result.output = truncate(stdout, 8192); + break; + } + + default: + result.output = `unsupported query type "${q.type}"`; + } + } catch (err) { + result.output = `error: ${err instanceof Error ? err.message : String(err)}`; + } + + queryResults.push(result); + } + + return { requestId: req.requestId, queryResults }; + } +} + +// ─── ApprovalPlugin implementation ──────────────────────────────────────── + +/** + * StdinApprover presents approval requests on stdout and reads y/n from stdin. + * + * In production, replace with a GUI dialog, desktop notification, or + * a webhook to a chat system (Slack, Teams, etc.). + */ +class StdinApprover implements ApprovalPlugin { + async handleApproval( + _signal: AbortSignal, + req: ApprovalRequest + ): Promise<{ approved: boolean; note?: string }> { + console.log('\n[APPROVAL REQUEST] ─────────────────────────────────'); + console.log(` Tool: ${req.toolName}`); + console.log(` Risk level: ${req.riskLevel}`); + console.log(` Description: ${req.description ?? '(no description)'}`); + if (req.toolInput && Object.keys(req.toolInput).length) { + console.log(` Input: ${JSON.stringify(req.toolInput, null, 2)}`); + } + console.log('─────────────────────────────────────────────────────'); + + const answer = await prompt(' Approve? [y/N]: '); + const approved = answer.trim().toLowerCase() === 'y'; + console.log(approved ? ' → Approved\n' : ' → Rejected\n'); + return { approved }; + } +} + +// ─── Main ───────────────────────────────────────────────────────────────── + +async function main(): Promise { + if (!TOKEN || !SESSION_ID) { + console.error( + 'Set TEAM_TOKEN and TEAM_SESSION_ID environment variables before running.' + ); + process.exit(1); + } + + const ac = new AbortController(); + process.on('SIGINT', () => { + console.log('\n[teammate] shutting down...'); + ac.abort(); + }); + + const client = new TeamClient({ + serverUrl: SERVER_URL, + token: TOKEN, + sessionId: SESSION_ID, + machineId: MACHINE_ID, + machineName: `Teammate (${MACHINE_ID})`, + role: MemberRoleTeammate, + }) + .withTeammatePlugin(new ShellExecutor()) + .withExplorePlugin(new LocalExplorer()) + .withApprovalPlugin(new StdinApprover()); + + console.log(`[teammate] connecting to ${SERVER_URL} (session=${SESSION_ID})`); + console.log(`[teammate] working directory: ${REPO_PATH}`); + + // Register the local repo so the leader can schedule repo-affinity tasks here. + if (REPO_URL) { + setTimeout(async () => { + if (ac.signal.aborted) return; + try { + // Detect current branch. + let branch = 'main'; + let dirty = false; + try { + branch = execSync('git rev-parse --abbrev-ref HEAD', { cwd: REPO_PATH }) + .toString().trim(); + dirty = execSync('git status --porcelain', { cwd: REPO_PATH }) + .toString().trim().length > 0; + } catch { /* not a git repo */ } + + await client.doJSON('POST', `/api/team/sessions/${SESSION_ID}/repos`, { + machineId: MACHINE_ID, + repoRemoteUrl: REPO_URL, + repoLocalPath: REPO_PATH, + currentBranch: branch, + hasUncommittedChanges: dirty, + lastSyncedAt: new Date().toISOString(), + }); + console.log(`[teammate] registered repo: ${REPO_URL} (branch=${branch}, dirty=${dirty})`); + } catch (err) { + console.warn('[teammate] repo registration failed:', err); + } + }, 1500); + } + + try { + await client.start(ac.signal); + } catch (err) { + if (!ac.signal.aborted) { + console.error('[teammate] stopped with error:', err); + } + } +} + +// ─── Helpers ────────────────────────────────────────────────────────────── + +function str(v: unknown, def: string): string { + return typeof v === 'string' && v ? v : def; +} + +function num(v: unknown, def: number): number { + return typeof v === 'number' ? v : def; +} + +function truncate(s: string, max: number): string { + return s.length > max ? s.slice(0, max) : s; +} + +function hasCommand(cmd: string): boolean { + try { execSync(`which ${cmd}`, { stdio: 'ignore' }); return true; } catch { return false; } +} + +function prompt(question: string): Promise { + const rl = readline.createInterface({ input: process.stdin, output: process.stdout }); + return new Promise((resolve) => { + rl.question(question, (answer) => { rl.close(); resolve(answer); }); + }); +} + +main().catch((err) => { + console.error('[teammate] fatal error:', err); + process.exit(1); +}); diff --git a/client/ts/examples/tsconfig.json b/client/ts/examples/tsconfig.json new file mode 100644 index 0000000..a816c5c --- /dev/null +++ b/client/ts/examples/tsconfig.json @@ -0,0 +1,10 @@ +{ + "extends": "../tsconfig.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "../dist/examples", + "noEmit": true + }, + "include": ["."], + "exclude": ["node_modules"] +} diff --git a/client/ts/package-lock.json b/client/ts/package-lock.json new file mode 100644 index 0000000..77a8127 --- /dev/null +++ b/client/ts/package-lock.json @@ -0,0 +1,657 @@ +{ + "name": "@costrict/team-client", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "@costrict/team-client", + "version": "0.1.0", + "license": "MIT", + "dependencies": { + "uuid": "^11.0.0" + }, + "devDependencies": { + "@types/node": "^20.0.0", + "@types/uuid": "^10.0.0", + "@types/ws": "^8.5.0", + "tsx": "^4.21.0", + "typescript": "^5.4.0", + "ws": "^8.18.0" + }, + "peerDependencies": { + "ws": "^8" + }, + "peerDependenciesMeta": { + "ws": { + "optional": true + } + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.7.tgz", + "integrity": "sha512-EKX3Qwmhz1eMdEJokhALr0YiD0lhQNwDqkPYyPhiSwKrh7/4KRjQc04sZ8db+5DVVnZ1LmbNDI1uAMPEUBnQPg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.7.tgz", + "integrity": "sha512-jbPXvB4Yj2yBV7HUfE2KHe4GJX51QplCN1pGbYjvsyCZbQmies29EoJbkEc+vYuU5o45AfQn37vZlyXy4YJ8RQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.7.tgz", + "integrity": "sha512-62dPZHpIXzvChfvfLJow3q5dDtiNMkwiRzPylSCfriLvZeq0a1bWChrGx/BbUbPwOrsWKMn8idSllklzBy+dgQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.7.tgz", + "integrity": "sha512-x5VpMODneVDb70PYV2VQOmIUUiBtY3D3mPBG8NxVk5CogneYhkR7MmM3yR/uMdITLrC1ml/NV1rj4bMJuy9MCg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.7.tgz", + "integrity": "sha512-5lckdqeuBPlKUwvoCXIgI2D9/ABmPq3Rdp7IfL70393YgaASt7tbju3Ac+ePVi3KDH6N2RqePfHnXkaDtY9fkw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.7.tgz", + "integrity": "sha512-rYnXrKcXuT7Z+WL5K980jVFdvVKhCHhUwid+dDYQpH+qu+TefcomiMAJpIiC2EM3Rjtq0sO3StMV/+3w3MyyqQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.7.tgz", + "integrity": "sha512-B48PqeCsEgOtzME2GbNM2roU29AMTuOIN91dsMO30t+Ydis3z/3Ngoj5hhnsOSSwNzS+6JppqWsuhTp6E82l2w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.7.tgz", + "integrity": "sha512-jOBDK5XEjA4m5IJK3bpAQF9/Lelu/Z9ZcdhTRLf4cajlB+8VEhFFRjWgfy3M1O4rO2GQ/b2dLwCUGpiF/eATNQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.7.tgz", + "integrity": "sha512-RkT/YXYBTSULo3+af8Ib0ykH8u2MBh57o7q/DAs3lTJlyVQkgQvlrPTnjIzzRPQyavxtPtfg0EopvDyIt0j1rA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.7.tgz", + "integrity": "sha512-RZPHBoxXuNnPQO9rvjh5jdkRmVizktkT7TCDkDmQ0W2SwHInKCAV95GRuvdSvA7w4VMwfCjUiPwDi0ZO6Nfe9A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.7.tgz", + "integrity": "sha512-GA48aKNkyQDbd3KtkplYWT102C5sn/EZTY4XROkxONgruHPU72l+gW+FfF8tf2cFjeHaRbWpOYa/uRBz/Xq1Pg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.7.tgz", + "integrity": "sha512-a4POruNM2oWsD4WKvBSEKGIiWQF8fZOAsycHOt6JBpZ+JN2n2JH9WAv56SOyu9X5IqAjqSIPTaJkqN8F7XOQ5Q==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.7.tgz", + "integrity": "sha512-KabT5I6StirGfIz0FMgl1I+R1H73Gp0ofL9A3nG3i/cYFJzKHhouBV5VWK1CSgKvVaG4q1RNpCTR2LuTVB3fIw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.7.tgz", + "integrity": "sha512-gRsL4x6wsGHGRqhtI+ifpN/vpOFTQtnbsupUF5R5YTAg+y/lKelYR1hXbnBdzDjGbMYjVJLJTd2OFmMewAgwlQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.7.tgz", + "integrity": "sha512-hL25LbxO1QOngGzu2U5xeXtxXcW+/GvMN3ejANqXkxZ/opySAZMrc+9LY/WyjAan41unrR3YrmtTsUpwT66InQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.7.tgz", + "integrity": "sha512-2k8go8Ycu1Kb46vEelhu1vqEP+UeRVj2zY1pSuPdgvbd5ykAw82Lrro28vXUrRmzEsUV0NzCf54yARIK8r0fdw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.7.tgz", + "integrity": "sha512-hzznmADPt+OmsYzw1EE33ccA+HPdIqiCRq7cQeL1Jlq2gb1+OyWBkMCrYGBJ+sxVzve2ZJEVeePbLM2iEIZSxA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.7.tgz", + "integrity": "sha512-b6pqtrQdigZBwZxAn1UpazEisvwaIDvdbMbmrly7cDTMFnw/+3lVxxCTGOrkPVnsYIosJJXAsILG9XcQS+Yu6w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.7.tgz", + "integrity": "sha512-OfatkLojr6U+WN5EDYuoQhtM+1xco+/6FSzJJnuWiUw5eVcicbyK3dq5EeV/QHT1uy6GoDhGbFpprUiHUYggrw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.7.tgz", + "integrity": "sha512-AFuojMQTxAz75Fo8idVcqoQWEHIXFRbOc1TrVcFSgCZtQfSdc1RXgB3tjOn/krRHENUB4j00bfGjyl2mJrU37A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.7.tgz", + "integrity": "sha512-+A1NJmfM8WNDv5CLVQYJ5PshuRm/4cI6WMZRg1by1GwPIQPCTs1GLEUHwiiQGT5zDdyLiRM/l1G0Pv54gvtKIg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.7.tgz", + "integrity": "sha512-+KrvYb/C8zA9CU/g0sR6w2RBw7IGc5J2BPnc3dYc5VJxHCSF1yNMxTV5LQ7GuKteQXZtspjFbiuW5/dOj7H4Yw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.7.tgz", + "integrity": "sha512-ikktIhFBzQNt/QDyOL580ti9+5mL/YZeUPKU2ivGtGjdTYoqz6jObj6nOMfhASpS4GU4Q/Clh1QtxWAvcYKamA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.7.tgz", + "integrity": "sha512-7yRhbHvPqSpRUV7Q20VuDwbjW5kIMwTHpptuUzV+AA46kiPze5Z7qgt6CLCK3pWFrHeNfDd1VKgyP4O+ng17CA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.7.tgz", + "integrity": "sha512-SmwKXe6VHIyZYbBLJrhOoCJRB/Z1tckzmgTLfFYOfpMAx63BJEaL9ExI8x7v0oAO3Zh6D/Oi1gVxEYr5oUCFhw==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.7.tgz", + "integrity": "sha512-56hiAJPhwQ1R4i+21FVF7V8kSD5zZTdHcVuRFMW0hn753vVfQN8xlx4uOPT4xoGH0Z/oVATuR82AiqSTDIpaHg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@types/node": { + "version": "20.19.39", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.39.tgz", + "integrity": "sha512-orrrD74MBUyK8jOAD/r0+lfa1I2MO6I+vAkmAWzMYbCcgrN4lCrmK52gRFQq/JRxfYPfonkr4b0jcY7Olqdqbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/uuid": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-10.0.0.tgz", + "integrity": "sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/ws": { + "version": "8.18.1", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz", + "integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/esbuild": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.7.tgz", + "integrity": "sha512-IxpibTjyVnmrIQo5aqNpCgoACA/dTKLTlhMHihVHhdkxKyPO1uBBthumT0rdHmcsk9uMonIWS0m4FljWzILh3w==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.7", + "@esbuild/android-arm": "0.27.7", + "@esbuild/android-arm64": "0.27.7", + "@esbuild/android-x64": "0.27.7", + "@esbuild/darwin-arm64": "0.27.7", + "@esbuild/darwin-x64": "0.27.7", + "@esbuild/freebsd-arm64": "0.27.7", + "@esbuild/freebsd-x64": "0.27.7", + "@esbuild/linux-arm": "0.27.7", + "@esbuild/linux-arm64": "0.27.7", + "@esbuild/linux-ia32": "0.27.7", + "@esbuild/linux-loong64": "0.27.7", + "@esbuild/linux-mips64el": "0.27.7", + "@esbuild/linux-ppc64": "0.27.7", + "@esbuild/linux-riscv64": "0.27.7", + "@esbuild/linux-s390x": "0.27.7", + "@esbuild/linux-x64": "0.27.7", + "@esbuild/netbsd-arm64": "0.27.7", + "@esbuild/netbsd-x64": "0.27.7", + "@esbuild/openbsd-arm64": "0.27.7", + "@esbuild/openbsd-x64": "0.27.7", + "@esbuild/openharmony-arm64": "0.27.7", + "@esbuild/sunos-x64": "0.27.7", + "@esbuild/win32-arm64": "0.27.7", + "@esbuild/win32-ia32": "0.27.7", + "@esbuild/win32-x64": "0.27.7" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-tsconfig": { + "version": "4.13.7", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.13.7.tgz", + "integrity": "sha512-7tN6rFgBlMgpBML5j8typ92BKFi2sFQvIdpAqLA2beia5avZDrMs0FLZiM5etShWq5irVyGcGMEA1jcDaK7A/Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/tsx": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.21.0.tgz", + "integrity": "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "~0.27.0", + "get-tsconfig": "^4.7.5" + }, + "bin": { + "tsx": "dist/cli.mjs" + }, + "engines": { + "node": ">=18.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/uuid": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz", + "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/esm/bin/uuid" + } + }, + "node_modules/ws": { + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.20.0.tgz", + "integrity": "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + } + } +} diff --git a/client/ts/package.json b/client/ts/package.json new file mode 100644 index 0000000..269ca8d --- /dev/null +++ b/client/ts/package.json @@ -0,0 +1,43 @@ +{ + "name": "@costrict/team-client", + "version": "0.1.0", + "description": "Cloud Team Agent client SDK — plugin-based Go/TS client for cross-machine multi-agent collaboration", + "main": "dist/index.js", + "module": "dist/index.mjs", + "types": "dist/index.d.ts", + "exports": { + ".": { + "import": "./dist/index.mjs", + "require": "./dist/index.js", + "types": "./dist/index.d.ts" + } + }, + "scripts": { + "build": "tsc", + "typecheck": "tsc --noEmit", + "clean": "rm -rf dist", + "test:e2e": "node --test dist/e2e/*.test.js", + "test:e2e:ts": "tsx e2e/team-e2e.test.ts" + }, + "files": ["dist"], + "dependencies": { + "uuid": "^11.0.0" + }, + "devDependencies": { + "@types/node": "^20.0.0", + "@types/uuid": "^10.0.0", + "@types/ws": "^8.5.0", + "tsx": "^4.21.0", + "typescript": "^5.4.0", + "ws": "^8.18.0" + }, + "peerDependencies": { + "ws": "^8" + }, + "peerDependenciesMeta": { + "ws": { + "optional": true + } + }, + "license": "MIT" +} diff --git a/client/ts/src/client.ts b/client/ts/src/client.ts new file mode 100644 index 0000000..8802a19 --- /dev/null +++ b/client/ts/src/client.ts @@ -0,0 +1,239 @@ +// client.ts — TeamClient: top-level entry point for the Cloud Team Agent SDK. + +import { v4 as uuidv4 } from 'uuid'; +import type { + CloudEvent, + Member, + TeamClientConfig, +} from './types.js'; +import { MemberRoleLeader, MemberRoleTeammate } from './types.js'; +import type { + ApprovalPlugin, + ExplorePlugin, + LeaderPlugin, + TeammatePlugin, +} from './plugin.js'; +import { LeaderAgent } from './leader.js'; +import { TeammateAgent } from './teammate.js'; +import { WSConnection } from './ws.js'; + +/** + * TeamClient is the top-level entry point for the Cloud Team Agent SDK. + * + * Usage (leader): + * ```ts + * const client = new TeamClient(cfg) + * .withLeaderPlugin(myPlanner) + * .withApprovalPlugin(myApprover); + * await client.start(abortSignal); + * ``` + * + * Usage (teammate): + * ```ts + * const client = new TeamClient(cfg) + * .withTeammatePlugin(myExecutor) + * .withExplorePlugin(myExplorer); + * await client.start(abortSignal); + * ``` + */ +export class TeamClient { + private cfg: TeamClientConfig; + private ws: WSConnection; + private leader: LeaderAgent | null = null; + private teammate: TeammateAgent | null = null; + private controller: AbortController | null = null; + + // Plugin slots + private leaderPlugin: LeaderPlugin | null = null; + private teammatePlugin: TeammatePlugin | null = null; + private approvalPlugin: ApprovalPlugin | null = null; + private explorePlugin: ExplorePlugin | null = null; + + constructor(cfg: TeamClientConfig) { + this.cfg = cfg; + this.ws = new WSConnection(cfg); + this.ws.onEvent = (evt) => this.dispatch(evt); + } + + // ─── Fluent plugin registration ─────────────────────────────────────── + + withLeaderPlugin(p: LeaderPlugin): this { + this.leaderPlugin = p; + return this; + } + + withTeammatePlugin(p: TeammatePlugin): this { + this.teammatePlugin = p; + return this; + } + + withApprovalPlugin(p: ApprovalPlugin): this { + this.approvalPlugin = p; + return this; + } + + withExplorePlugin(p: ExplorePlugin): this { + this.explorePlugin = p; + return this; + } + + // ─── Lifecycle ──────────────────────────────────────────────────────── + + /** + * Connects to the server and starts processing events. + * Resolves when the signal fires or a fatal error occurs. + */ + async start(signal?: AbortSignal): Promise { + this.controller = new AbortController(); + const innerSignal = this.controller.signal; + + // Combine caller's signal with our internal one. + const combinedSignal = signal + ? combineSignals(signal, innerSignal) + : innerSignal; + + // Role-specific initialisation via REST. + switch (this.cfg.role) { + case MemberRoleLeader: { + this.leader = new LeaderAgent(this.cfg, this.ws, { + leaderPlugin: this.leaderPlugin, + approvalPlugin: this.approvalPlugin, + doJSON: this.doJSON.bind(this), + fetchMembers: this.fetchMembers.bind(this), + }); + await this.leader.init(combinedSignal); + break; + } + case MemberRoleTeammate: { + this.teammate = new TeammateAgent(this.cfg, this.ws, { + teammatePlugin: this.teammatePlugin, + approvalPlugin: this.approvalPlugin, + explorePlugin: this.explorePlugin, + doJSON: this.doJSON.bind(this), + }); + await this.teammate.init(combinedSignal); + break; + } + default: + throw new Error( + `Unknown role "${this.cfg.role}" — must be "${MemberRoleLeader}" or "${MemberRoleTeammate}"` + ); + } + + // Start WS loop in the background, then wait until the connection is open. + void this.ws.start(combinedSignal); + await this.ws.waitConnected(); + } + + /** + * Calls LeaderPlugin.planTasks and submits the resulting task plan. + * Must only be called after start() and with role = "leader". + */ + async submitPlan(goal: string, signal?: AbortSignal): Promise { + if (!this.leader) { + throw new Error('submitPlan requires role = "leader"'); + } + await this.leader.submitPlan(goal, signal ?? new AbortController().signal); + } + + /** Stops the client and closes the WebSocket. */ + stop(): void { + this.controller?.abort(); + this.ws.close(); + } + + // ─── Internal helpers ───────────────────────────────────────────────── + + private dispatch(evt: CloudEvent): void { + this.leader?.handle(evt); + this.teammate?.handle(evt); + } + + /** + * Executes a REST request against the server. + * Returns the parsed JSON body, or undefined for empty responses. + */ + async doJSON( + method: string, + path: string, + body?: unknown + ): Promise { + const url = this.cfg.serverUrl.replace(/\/$/, '') + path; + const res = await fetch(url, { + method, + headers: { + 'Content-Type': 'application/json', + ...(this.cfg.token ? { Authorization: `Bearer ${this.cfg.token}` } : {}), + }, + body: body !== undefined ? JSON.stringify(body) : undefined, + }); + + if (!res.ok) { + const err = await res.json().catch(() => ({ error: res.statusText })) as { error?: string }; + throw new Error(`HTTP ${res.status} ${path}: ${err.error ?? res.statusText}`); + } + + const text = await res.text(); + return text ? (JSON.parse(text) as T) : (undefined as T); + } + + async fetchMembers(): Promise { + const resp = await this.doJSON<{ members: Member[] }>( + 'GET', + `/api/team/sessions/${this.cfg.sessionId}/members` + ); + return resp?.members ?? []; + } + + /** + * Creates a new team session on the server and returns the session ID. + * Call this before constructing TeamClient instances that share the session. + */ + static async createSession( + serverUrl: string, + token: string, + name?: string + ): Promise { + const url = serverUrl.replace(/\/$/, '') + '/api/team/sessions'; + const res = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + ...(token ? { Authorization: `Bearer ${token}` } : {}), + }, + body: JSON.stringify({ name: name ?? 'e2e-test-session' }), + }); + if (!res.ok) { + const err = await res.json().catch(() => ({ error: res.statusText })) as { error?: string }; + throw new Error(`HTTP ${res.status} /api/team/sessions: ${err.error ?? res.statusText}`); + } + const data = await res.json() as { id: string }; + return data.id; + } +} + +// ─── Utilities ──────────────────────────────────────────────────────────── + +/** Creates a new CloudEvent with a fresh UUID and the current timestamp. */ +export function newEvent( + type: string, + sessionId: string, + payload?: Record +): CloudEvent { + return { + eventId: uuidv4(), + type, + sessionId, + timestamp: Date.now(), + payload, + }; +} + +/** Combines two AbortSignals into one that fires when either fires. */ +function combineSignals(a: AbortSignal, b: AbortSignal): AbortSignal { + const ctrl = new AbortController(); + const abort = () => ctrl.abort(); + a.addEventListener('abort', abort, { once: true }); + b.addEventListener('abort', abort, { once: true }); + return ctrl.signal; +} diff --git a/client/ts/src/index.ts b/client/ts/src/index.ts new file mode 100644 index 0000000..5916855 --- /dev/null +++ b/client/ts/src/index.ts @@ -0,0 +1,105 @@ +/** + * @costrict/team-client + * + * Cloud Team Agent client SDK. + * + * Quick start (leader): + * ```ts + * import { TeamClient, MemberRoleLeader } from '@costrict/team-client'; + * + * const client = new TeamClient({ + * serverUrl: 'https://api.example.com', + * token: myJWT, + * sessionId: sessionId, + * machineId: 'machine-a', + * role: MemberRoleLeader, + * }).withLeaderPlugin(myPlanner).withApprovalPlugin(myApprover); + * + * const ac = new AbortController(); + * await client.start(ac.signal); + * ``` + * + * Quick start (teammate): + * ```ts + * const client = new TeamClient({ ..., role: MemberRoleTeammate }) + * .withTeammatePlugin(myExecutor) + * .withExplorePlugin(myExplorer); + * + * await client.start(ac.signal); + * ``` + */ + +// Main class +export { TeamClient, newEvent } from './client.js'; + +// Plugin interfaces +export type { + LeaderPlugin, + TeammatePlugin, + ApprovalPlugin, + ExplorePlugin, + ProgressReporter, +} from './plugin.js'; + +// Types +export type { + CloudEvent, + TeamClientConfig, + Task, + TaskSpec, + TaskResult, + ApprovalRequest, + ExploreQuery, + ExploreRequest, + ExploreQueryResult, + ExploreResult, + Member, + PlanTasksInput, +} from './types.js'; + +// Constants — event types +export { + EventSessionCreate, + EventSessionJoin, + EventTaskPlanSubmit, + EventTaskClaim, + EventTaskProgress, + EventTaskComplete, + EventTaskFail, + EventApprovalRequest, + EventApprovalRespond, + EventMessageSend, + EventRepoRegister, + EventExploreRequest, + EventExploreResult, + EventLeaderElect, + EventLeaderHeartbeat, + EventTaskAssigned, + EventApprovalPush, + EventApprovalResponse, + EventMessageReceive, + EventSessionUpdated, + EventTeammateStatus, + EventLeaderElected, + EventLeaderExpired, + EventError, +} from './types.js'; + +// Constants — status & roles +export { + SessionStatusActive, + SessionStatusPaused, + SessionStatusCompleted, + SessionStatusFailed, + MemberStatusOnline, + MemberStatusOffline, + MemberStatusBusy, + MemberRoleLeader, + MemberRoleTeammate, + TaskStatusPending, + TaskStatusAssigned, + TaskStatusClaimed, + TaskStatusRunning, + TaskStatusCompleted, + TaskStatusFailed, +} from './types.js'; diff --git a/client/ts/src/leader.ts b/client/ts/src/leader.ts new file mode 100644 index 0000000..1edab2d --- /dev/null +++ b/client/ts/src/leader.ts @@ -0,0 +1,184 @@ +// leader.ts — Leader role: election, heartbeat, plan submission, approval routing. + +import { v4 as uuidv4 } from 'uuid'; +import type { ApprovalRequest, CloudEvent, Member, TaskSpec, TeamClientConfig } from './types.js'; +import { + EventApprovalPush, + EventApprovalRequest, + EventLeaderExpired, +} from './types.js'; +import type { ApprovalPlugin, LeaderPlugin } from './plugin.js'; +import type { WSConnection } from './ws.js'; +import { newEvent } from './client.js'; + +const HEARTBEAT_INTERVAL_MS = 10_000; + +export interface LeaderAgentDeps { + leaderPlugin: LeaderPlugin | null; + approvalPlugin: ApprovalPlugin | null; + doJSON: (method: string, path: string, body?: unknown) => Promise; + fetchMembers: () => Promise; +} + +/** LeaderAgent owns all leader-role logic and handles relevant inbound events. */ +export class LeaderAgent { + private cfg: TeamClientConfig; + private ws: WSConnection; + private deps: LeaderAgentDeps; + private fencingToken = 0; + private heartbeatTimer: ReturnType | null = null; + + constructor(cfg: TeamClientConfig, ws: WSConnection, deps: LeaderAgentDeps) { + this.cfg = cfg; + this.ws = ws; + this.deps = deps; + } + + /** + * Performs REST-based initialisation: + * 1. Attempts leader election. + * 2. Starts the heartbeat loop. + */ + async init(_signal: AbortSignal): Promise { + const resp = await this.deps.doJSON<{ + elected: boolean; + fencingToken: number; + leaderId: string; + }>( + 'POST', + `/api/team/sessions/${this.cfg.sessionId}/leader/elect`, + { machineId: this.cfg.machineId } + ); + this.fencingToken = resp.fencingToken; + this.startHeartbeat(); + } + + /** + * Calls LeaderPlugin.planTasks and POSTs the resulting tasks to the server. + * Pre-assigns UUIDs so dependency references within the batch are stable. + */ + async submitPlan(goal: string, signal: AbortSignal): Promise { + if (!this.deps.leaderPlugin) { + throw new Error('No LeaderPlugin registered'); + } + + const members = await this.deps.fetchMembers(); + const specs = await this.deps.leaderPlugin.planTasks(signal, { + goal, + sessionId: this.cfg.sessionId, + members, + }); + + if (!specs.length) { + throw new Error('LeaderPlugin returned an empty plan'); + } + + // Pre-assign stable IDs so dependency references survive serialisation. + const enriched: TaskSpec[] = specs.map((s) => ({ + ...s, + id: s.id ?? uuidv4(), + })); + + await this.deps.doJSON('POST', `/api/team/sessions/${this.cfg.sessionId}/tasks`, { + tasks: enriched, + fencingToken: this.fencingToken, + }); + } + + /** Dispatches inbound events relevant to the leader role. */ + handle(evt: CloudEvent): void { + switch (evt.type) { + case EventApprovalPush: + this.handleApprovalPush(evt); + break; + + case EventLeaderExpired: + this.handleLeaderExpired(); + break; + } + } + + /** Sends an approval.request WS event (for leader-originated tool approval). */ + requestApproval( + toolName: string, + description: string, + riskLevel: string, + toolInput: Record + ): void { + this.ws.send( + newEvent(EventApprovalRequest, this.cfg.sessionId, { + toolName, + description, + riskLevel, + toolInput, + }) + ); + } + + stop(): void { + if (this.heartbeatTimer !== null) { + clearInterval(this.heartbeatTimer); + this.heartbeatTimer = null; + } + } + + // ─── Private ───────────────────────────────────────────────────────── + + private startHeartbeat(): void { + this.heartbeatTimer = setInterval(async () => { + try { + await this.deps.doJSON<{ renewed: boolean }>( + 'POST', + `/api/team/sessions/${this.cfg.sessionId}/leader/heartbeat`, + { machineId: this.cfg.machineId } + ); + } catch { + // Best-effort — the server will broadcast leader.expired if the lock lapses. + } + }, HEARTBEAT_INTERVAL_MS); + } + + private handleApprovalPush(evt: CloudEvent): void { + if (!this.deps.approvalPlugin) return; + + const approval = evt.payload?.['approval'] as ApprovalRequest | undefined; + if (!approval) return; + + void (async () => { + const ctrl = new AbortController(); + try { + const { approved, note } = await this.deps.approvalPlugin!.handleApproval( + ctrl.signal, + approval + ); + await this.deps.doJSON('PATCH', `/api/team/approvals/${approval.id}`, { + status: approved ? 'approved' : 'rejected', + feedback: note ?? '', + }); + } catch { + // Swallow errors — the approval will time out on the server side. + } + })(); + } + + private handleLeaderExpired(): void { + void (async () => { + try { + const resp = await this.deps.doJSON<{ + elected: boolean; + fencingToken: number; + }>( + 'POST', + `/api/team/sessions/${this.cfg.sessionId}/leader/elect`, + { machineId: this.cfg.machineId } + ); + if (resp.elected) { + this.fencingToken = resp.fencingToken; + this.startHeartbeat(); + } + } catch { + // Another machine may have been elected. + } + })(); + } +} diff --git a/client/ts/src/plugin.ts b/client/ts/src/plugin.ts new file mode 100644 index 0000000..3884eff --- /dev/null +++ b/client/ts/src/plugin.ts @@ -0,0 +1,82 @@ +// plugin.ts — Plugin interfaces that host applications implement to customise +// Cloud Team Agent behaviour. All four interfaces are independent; register +// only the ones your role requires. + +import type { + ApprovalRequest, + ExploreRequest, + ExploreResult, + PlanTasksInput, + Task, + TaskResult, + TaskSpec, +} from './types.js'; + +// ─── LeaderPlugin ───────────────────────────────────────────────────────── + +/** + * Decomposes a natural-language goal into an ordered task DAG. + * Inject your LLM / planning logic here. + * + * The SDK calls planTasks when the host application calls client.submitPlan(). + * Return an empty array to indicate that no tasks are needed. + */ +export interface LeaderPlugin { + planTasks(signal: AbortSignal, req: PlanTasksInput): Promise; +} + +// ─── TeammatePlugin ─────────────────────────────────────────────────────── + +/** + * Executes an assigned task and streams progress updates. + * Inject your shell runner, code executor, or AI agent here. + * + * The SDK calls executeTask for each incoming task.assigned event. + * Throw an Error (or return a rejected Promise) to mark the task as failed. + */ +export interface TeammatePlugin { + executeTask( + signal: AbortSignal, + task: Task, + reporter: ProgressReporter + ): Promise; +} + +// ─── ApprovalPlugin ─────────────────────────────────────────────────────── + +/** + * Displays an approval request to the user and collects a decision. + * Inject a CLI prompt, GUI dialog, or any other UI here. + * + * The SDK calls handleApproval for each incoming approval.push event (leader) + * and exposes the response result to teammates via approval.response events. + */ +export interface ApprovalPlugin { + handleApproval( + signal: AbortSignal, + req: ApprovalRequest + ): Promise<{ approved: boolean; note?: string }>; +} + +// ─── ExplorePlugin ──────────────────────────────────────────────────────── + +/** + * Executes local file / code queries on behalf of a remote explore.request. + * Allowed operations: file tree, symbol search, content search, git log, + * dependency graph — read-only, sandboxed to the local repository. + * + * The SDK calls explore when the server routes an explore.request to this machine. + */ +export interface ExplorePlugin { + explore(signal: AbortSignal, req: ExploreRequest): Promise; +} + +// ─── ProgressReporter ───────────────────────────────────────────────────── + +/** + * Lets TeammatePlugin stream incremental progress back to the session + * without blocking the execution loop. + */ +export interface ProgressReporter { + report(pct: number, message: string): void; +} diff --git a/client/ts/src/teammate.ts b/client/ts/src/teammate.ts new file mode 100644 index 0000000..a0e7eea --- /dev/null +++ b/client/ts/src/teammate.ts @@ -0,0 +1,202 @@ +// teammate.ts — Teammate role: session join, task execution, explore, approvals. + +import type { CloudEvent, ExploreRequest, Task, TeamClientConfig } from './types.js'; +import { + EventApprovalRequest, + EventExploreRequest, + EventTaskAssigned, + EventTaskClaim, + EventTaskComplete, + EventTaskFail, + EventTaskProgress, +} from './types.js'; +import type { ApprovalPlugin, ExplorePlugin, ProgressReporter, TeammatePlugin } from './plugin.js'; +import type { WSConnection } from './ws.js'; +import { newEvent } from './client.js'; + +export interface TeammateAgentDeps { + teammatePlugin: TeammatePlugin | null; + approvalPlugin: ApprovalPlugin | null; + explorePlugin: ExplorePlugin | null; + doJSON: (method: string, path: string, body?: unknown) => Promise; +} + +/** TeammateAgent owns all teammate-role logic and handles relevant inbound events. */ +export class TeammateAgent { + private cfg: TeamClientConfig; + private ws: WSConnection; + private deps: TeammateAgentDeps; + private memberId = ''; + + constructor(cfg: TeamClientConfig, ws: WSConnection, deps: TeammateAgentDeps) { + this.cfg = cfg; + this.ws = ws; + this.deps = deps; + } + + /** Registers this machine as a session member via REST. */ + async init(_signal: AbortSignal): Promise { + const resp = await this.deps.doJSON<{ id: string }>( + 'POST', + `/api/team/sessions/${this.cfg.sessionId}/members`, + { + machineId: this.cfg.machineId, + machineName: this.cfg.machineName ?? '', + } + ); + this.memberId = resp?.id ?? ''; + } + + /** Dispatches inbound events relevant to the teammate role. */ + handle(evt: CloudEvent): void { + switch (evt.type) { + case EventTaskAssigned: + this.handleTaskAssigned(evt); + break; + + case EventExploreRequest: + this.handleExploreRequest(evt); + break; + } + } + + /** + * Sends an approval.request to the leader via WebSocket. + * riskLevel should be "low", "medium", or "high". + */ + requestApproval( + toolName: string, + description: string, + riskLevel: string, + toolInput: Record + ): void { + this.ws.send( + newEvent(EventApprovalRequest, this.cfg.sessionId, { + toolName, + description, + riskLevel, + toolInput, + }) + ); + } + + /** + * Registers a local repository with the session's affinity registry. + * Call this on startup so the leader can schedule repo-specific tasks here. + */ + async registerRepo( + remoteUrl: string, + localPath: string, + branch: string, + dirty: boolean + ): Promise { + await this.deps.doJSON('POST', `/api/team/sessions/${this.cfg.sessionId}/repos`, { + memberId: this.memberId, + repoRemoteUrl: remoteUrl, + repoLocalPath: localPath, + currentBranch: branch, + hasUncommittedChanges: dirty, + lastSyncedAt: new Date().toISOString(), + }); + } + + // ─── Private ───────────────────────────────────────────────────────── + + private handleTaskAssigned(evt: CloudEvent): void { + if (!this.deps.teammatePlugin) return; + + const task = evt.payload?.['task'] as Task | undefined; + if (!task?.id) return; + + // Claim immediately so the leader knows this task is being worked on. + this.ws.send( + newEvent(EventTaskClaim, this.cfg.sessionId, { taskId: task.id }) + ); + + void this.executeTask(task); + } + + private async executeTask(task: Task): Promise { + const ctrl = new AbortController(); + const reporter: ProgressReporter = { + report: (pct, message) => { + this.ws.send( + newEvent(EventTaskProgress, this.cfg.sessionId, { + taskId: task.id, + percent: pct, + message, + }) + ); + }, + }; + + // Signal start. + this.ws.send( + newEvent(EventTaskProgress, this.cfg.sessionId, { + taskId: task.id, + percent: 0, + message: 'started', + }) + ); + + try { + const result = await this.deps.teammatePlugin!.executeTask( + ctrl.signal, + task, + reporter + ); + this.ws.send( + newEvent(EventTaskComplete, this.cfg.sessionId, { + taskId: task.id, + result, + }) + ); + } catch (err) { + this.ws.send( + newEvent(EventTaskFail, this.cfg.sessionId, { + taskId: task.id, + errorMessage: err instanceof Error ? err.message : String(err), + }) + ); + } + } + + private handleExploreRequest(evt: CloudEvent): void { + if (!this.deps.explorePlugin) return; + + const requestId = evt.payload?.['requestId'] as string | undefined; + const fromMachineId = evt.payload?.['fromMachineId'] as string | undefined; + if (!requestId) return; + + const req: ExploreRequest = { + requestId, + sessionId: this.cfg.sessionId, + fromMachineId: fromMachineId ?? '', + queries: (evt.payload?.['queries'] as ExploreRequest['queries']) ?? [], + }; + + void (async () => { + const ctrl = new AbortController(); + try { + const result = await this.deps.explorePlugin!.explore(ctrl.signal, req); + this.ws.send( + newEvent('explore.result', this.cfg.sessionId, { + requestId: result.requestId, + queryResults: result.queryResults, + fromMachineId, + error: result.error, + }) + ); + } catch (err) { + this.ws.send( + newEvent('explore.result', this.cfg.sessionId, { + requestId, + queryResults: [], + fromMachineId, + error: err instanceof Error ? err.message : String(err), + }) + ); + } + })(); + } +} diff --git a/client/ts/src/types.ts b/client/ts/src/types.ts new file mode 100644 index 0000000..2b08305 --- /dev/null +++ b/client/ts/src/types.ts @@ -0,0 +1,190 @@ +// types.ts — Shared event and model types for the Cloud Team Agent SDK. +// Mirrors the server's internal/team/types.go definitions. + +// ─── Core event envelope ────────────────────────────────────────────────── + +export interface CloudEvent { + eventId: string; + type: string; + sessionId: string; + timestamp: number; + payload?: Record; +} + +// ─── Task types ─────────────────────────────────────────────────────────── + +/** Full task record received from the server (e.g. via task.assigned). */ +export interface Task { + id: string; + sessionId: string; + description: string; + repoAffinity?: string[]; + fileHints?: string[]; + dependencies?: string[]; + assignedMemberId?: string; + status: string; + priority: number; + retryCount: number; + maxRetries: number; + errorMessage?: string; + createdAt: string; + claimedAt?: string; + startedAt?: string; + completedAt?: string; +} + +/** + * TaskSpec is what LeaderPlugin.planTasks returns. + * Set id to a pre-generated UUID if you want dependency DAGs to work within a + * single batch — the server will use the provided id instead of generating one. + */ +export interface TaskSpec { + id?: string; + description: string; + repoAffinity?: string[]; + fileHints?: string[]; + dependencies?: string[]; // references IDs within the same batch + assignedMemberId?: string; + priority?: number; + maxRetries?: number; +} + +/** Returned by TeammatePlugin.executeTask on success. */ +export interface TaskResult { + output?: string; + files?: string[]; + extraData?: Record; +} + +// ─── Approval types ─────────────────────────────────────────────────────── + +export interface ApprovalRequest { + id: string; + sessionId: string; + requesterId: string; + toolName: string; + toolInput: Record; + description?: string; + riskLevel: string; + status: string; + createdAt: string; +} + +// ─── Explore types ──────────────────────────────────────────────────────── + +export interface ExploreQuery { + type: + | 'file_tree' + | 'symbol_search' + | 'content_search' + | 'git_log' + | 'dependency_graph'; + params: Record; +} + +export interface ExploreRequest { + requestId: string; + sessionId: string; + fromMachineId: string; + queries: ExploreQuery[]; +} + +export interface ExploreQueryResult { + type: string; + output: string; + truncated: boolean; +} + +export interface ExploreResult { + requestId: string; + queryResults: ExploreQueryResult[]; + error?: string; +} + +// ─── Member type ────────────────────────────────────────────────────────── + +export interface Member { + id: string; + sessionId: string; + machineId: string; + machineName?: string; + role: string; + status: string; +} + +// ─── PlanTasksInput ─────────────────────────────────────────────────────── + +export interface PlanTasksInput { + goal: string; + sessionId: string; + /** Current session participants — use to make assignment decisions. */ + members: Member[]; +} + +// ─── Config ─────────────────────────────────────────────────────────────── + +export interface TeamClientConfig { + /** Base HTTP/HTTPS URL of the costrict server, e.g. "https://api.example.com". */ + serverUrl: string; + /** JWT bearer token for authentication. */ + token: string; + /** UUID of the existing team session to join. */ + sessionId: string; + /** Stable, unique identifier for this machine. Must be consistent across reconnects. */ + machineId: string; + /** Human-readable label for this machine (optional). */ + machineName?: string; + /** Either "leader" or "teammate". */ + role: string; +} + +// ─── Event type constants (Client → Cloud) ──────────────────────────────── + +export const EventSessionCreate = 'session.create'; +export const EventSessionJoin = 'session.join'; +export const EventTaskPlanSubmit = 'task.plan.submit'; +export const EventTaskClaim = 'task.claim'; +export const EventTaskProgress = 'task.progress'; +export const EventTaskComplete = 'task.complete'; +export const EventTaskFail = 'task.fail'; +export const EventApprovalRequest = 'approval.request'; +export const EventApprovalRespond = 'approval.respond'; +export const EventMessageSend = 'message.send'; +export const EventRepoRegister = 'repo.register'; +export const EventExploreRequest = 'explore.request'; +export const EventExploreResult = 'explore.result'; +export const EventLeaderElect = 'leader.elect'; +export const EventLeaderHeartbeat = 'leader.heartbeat'; + +// ─── Event type constants (Cloud → Client) ──────────────────────────────── + +export const EventTaskAssigned = 'task.assigned'; +export const EventApprovalPush = 'approval.push'; +export const EventApprovalResponse = 'approval.response'; +export const EventMessageReceive = 'message.receive'; +export const EventSessionUpdated = 'session.updated'; +export const EventTeammateStatus = 'teammate.status'; +export const EventLeaderElected = 'leader.elected'; +export const EventLeaderExpired = 'leader.expired'; +export const EventError = 'error'; + +// ─── Status constants ───────────────────────────────────────────────────── + +export const SessionStatusActive = 'active'; +export const SessionStatusPaused = 'paused'; +export const SessionStatusCompleted = 'completed'; +export const SessionStatusFailed = 'failed'; + +export const MemberStatusOnline = 'online'; +export const MemberStatusOffline = 'offline'; +export const MemberStatusBusy = 'busy'; + +export const MemberRoleLeader = 'leader'; +export const MemberRoleTeammate = 'teammate'; + +export const TaskStatusPending = 'pending'; +export const TaskStatusAssigned = 'assigned'; +export const TaskStatusClaimed = 'claimed'; +export const TaskStatusRunning = 'running'; +export const TaskStatusCompleted = 'completed'; +export const TaskStatusFailed = 'failed'; diff --git a/client/ts/src/ws.ts b/client/ts/src/ws.ts new file mode 100644 index 0000000..424bc79 --- /dev/null +++ b/client/ts/src/ws.ts @@ -0,0 +1,233 @@ +// ws.ts — WebSocket wrapper with automatic reconnect. +// Works in both browser (native WebSocket) and Node.js (ws package). + +import type { CloudEvent, TeamClientConfig } from './types.js'; + +const PING_INTERVAL_MS = 30_000; +const WRITE_WAIT_MS = 10_000; +const PONG_WAIT_MS = 60_000; +const RECONNECT_INITIAL_MS = 1_000; +const RECONNECT_MAX_MS = 30_000; +const SEND_CHANNEL_CAP = 256; + +export type EventHandler = (evt: CloudEvent) => void; + +/** Builds the WebSocket URL from the client config. */ +function buildWsUrl(cfg: TeamClientConfig): string { + const base = cfg.serverUrl + .replace(/^https:\/\//, 'wss://') + .replace(/^http:\/\//, 'ws://') + .replace(/\/$/, ''); + const params = new URLSearchParams({ + machineId: cfg.machineId, + ...(cfg.token ? { token: cfg.token } : {}), + }); + return `${base}/ws/sessions/${cfg.sessionId}?${params.toString()}`; +} + +/** + * WSConnection manages a persistent WebSocket connection with automatic reconnect. + * Call start(signal) to begin; use send() to enqueue outbound events. + * Register an onEvent handler to receive inbound events. + */ +export class WSConnection { + private cfg: TeamClientConfig; + private ws: WebSocket | null = null; + private pingTimer: ReturnType | null = null; + private pongTimer: ReturnType | null = null; + private outboundQueue: string[] = []; + private queueFull = false; + private connectedResolvers: Array<() => void> = []; + private connected = false; + + public onEvent: EventHandler = () => undefined; + + constructor(cfg: TeamClientConfig) { + this.cfg = cfg; + } + + /** Resolves as soon as the WebSocket connection is open (or immediately if already open). */ + waitConnected(): Promise { + if (this.connected) return Promise.resolve(); + return new Promise((resolve) => { + this.connectedResolvers.push(resolve); + }); + } + + /** Starts the connection loop. Resolves when the signal fires. */ + async start(signal: AbortSignal): Promise { + let backoff = RECONNECT_INITIAL_MS; + + while (!signal.aborted) { + await this.connectOnce(signal); + if (signal.aborted) break; + // Wait with exponential back-off before reconnecting. + await sleep(backoff, signal); + backoff = Math.min(backoff * 2, RECONNECT_MAX_MS); + } + } + + /** Enqueues an event for sending. Non-blocking; drops if queue is full. */ + send(evt: CloudEvent): void { + const data = JSON.stringify(evt); + if (this.ws && this.ws.readyState === getReadyState('OPEN')) { + this.ws.send(data); + } else { + if (this.outboundQueue.length < SEND_CHANNEL_CAP) { + this.outboundQueue.push(data); + this.queueFull = false; + } else if (!this.queueFull) { + this.queueFull = true; + console.warn('[team-client] send queue full — dropping events'); + } + } + } + + /** Closes the active connection. */ + close(): void { + this.clearTimers(); + this.ws?.close(); + this.ws = null; + } + + // ─── Private ───────────────────────────────────────────────────────── + + private connectOnce(signal: AbortSignal): Promise { + return new Promise((resolve) => { + if (signal.aborted) { + resolve(); + return; + } + + let ws: WebSocket; + try { + ws = createWebSocket(buildWsUrl(this.cfg)); + } catch { + resolve(); + return; + } + this.ws = ws; + + const onAbort = () => { + ws.close(); + resolve(); + }; + signal.addEventListener('abort', onAbort, { once: true }); + + ws.onopen = () => { + // Flush queued messages. + for (const msg of this.outboundQueue) { + ws.send(msg); + } + this.outboundQueue = []; + this.startPing(ws); + // Notify waitConnected() waiters. + this.connected = true; + const resolvers = this.connectedResolvers.splice(0); + for (const r of resolvers) r(); + }; + + ws.onmessage = (ev) => { + this.resetPongTimer(ws); + try { + const evt = JSON.parse( + typeof ev.data === 'string' ? ev.data : ev.data.toString() + ) as CloudEvent; + this.onEvent(evt); + } catch { + // Ignore malformed messages. + } + }; + + ws.onerror = () => { + /* handled by onclose */ + }; + + ws.onclose = () => { + this.connected = false; + this.clearTimers(); + this.ws = null; + signal.removeEventListener('abort', onAbort); + resolve(); + }; + }); + } + + private startPing(ws: WebSocket): void { + this.clearTimers(); + this.pingTimer = setInterval(() => { + if (ws.readyState === getReadyState('OPEN')) { + // Browser WebSocket doesn't expose ping frames; send a heartbeat noop instead. + // For Node.js ws, this is a proper ping via the ping() method if available. + const wsAny = ws as unknown as { ping?: () => void }; + if (typeof wsAny.ping === 'function') { + wsAny.ping(); + } + } + // Set pong deadline. + this.pongTimer = setTimeout(() => { + ws.close(); + }, PONG_WAIT_MS); + }, PING_INTERVAL_MS); + } + + private resetPongTimer(ws: WebSocket): void { + if (this.pongTimer !== null) { + clearTimeout(this.pongTimer); + this.pongTimer = null; + } + // Re-arm pong watchdog. + this.pongTimer = setTimeout(() => { + ws.close(); + }, PONG_WAIT_MS); + } + + private clearTimers(): void { + if (this.pingTimer !== null) { + clearInterval(this.pingTimer); + this.pingTimer = null; + } + if (this.pongTimer !== null) { + clearTimeout(this.pongTimer); + this.pongTimer = null; + } + } +} + +// ─── Helpers ────────────────────────────────────────────────────────────── + +/** Resolves after ms milliseconds, or immediately if signal fires. */ +function sleep(ms: number, signal: AbortSignal): Promise { + return new Promise((resolve) => { + const timer = setTimeout(resolve, ms); + signal.addEventListener('abort', () => { + clearTimeout(timer); + resolve(); + }, { once: true }); + }); +} + +/** + * Creates a WebSocket using the native browser API or the `ws` Node.js package. + * The `ws` peer dependency is optional — in browsers it is not needed. + */ +function createWebSocket(url: string): WebSocket { + if (typeof globalThis.WebSocket !== 'undefined') { + return new globalThis.WebSocket(url); + } + // Node.js environment — require the optional `ws` peer dependency. + // eslint-disable-next-line @typescript-eslint/no-require-imports + const WS = require('ws') as typeof WebSocket; + return new WS(url) as unknown as WebSocket; +} + +/** Gets the numeric value of a WebSocket ready-state by name. */ +function getReadyState(name: 'OPEN' | 'CLOSED'): number { + if (typeof globalThis.WebSocket !== 'undefined') { + return globalThis.WebSocket[name]; + } + return name === 'OPEN' ? 1 : 3; +} + +// Suppress unused-var warnings on timing constants that are referenced indirectly. +void WRITE_WAIT_MS; diff --git a/client/ts/tsconfig.e2e.json b/client/ts/tsconfig.e2e.json new file mode 100644 index 0000000..30dced8 --- /dev/null +++ b/client/ts/tsconfig.e2e.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ESNext", + "moduleResolution": "node", + "lib": ["ES2022"], + "types": ["node"], + "declaration": false, + "sourceMap": true, + "outDir": "./dist/e2e", + "rootDir": ".", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true + }, + "include": ["e2e/**/*", "src/**/*"], + "exclude": ["node_modules", "dist"], + "ts-node": { + "esm": true, + "transpileOnly": true + } +} diff --git a/client/ts/tsconfig.json b/client/ts/tsconfig.json new file mode 100644 index 0000000..6652179 --- /dev/null +++ b/client/ts/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ES2020", + "module": "CommonJS", + "moduleResolution": "bundler", + "lib": ["ES2020", "DOM"], + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "outDir": "./dist", + "rootDir": "./src", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "types": ["node"] + }, + "include": ["src"], + "exclude": ["node_modules", "dist"] +} diff --git a/docs/proposals/CLOUD_TEAM_ARCHITECTURE.md b/docs/proposals/CLOUD_TEAM_ARCHITECTURE.md index 64f9073..8d85e0b 100644 --- a/docs/proposals/CLOUD_TEAM_ARCHITECTURE.md +++ b/docs/proposals/CLOUD_TEAM_ARCHITECTURE.md @@ -147,6 +147,12 @@ Scheduler 按亲和性规则分配 assignedTeammate 提交 TaskPlan 到云端 ``` +**拆解目标选择(Decompose Target Selection):** + +1. 优先选择在线的**非 Leader Teammate** 作为 `decompose.request` 目标 +2. 若无可用 Teammate,且 Leader 在线,则**回退为 Leader 自拆解**(发送给 Leader 自己) +3. 若 Leader 也不可用,则降级为单任务 fallback(粗粒度任务) + #### 3.1.2 远程代码探查(Remote Explore) Leader 通过云端向 Teammate 发起只读探查请求,Teammate 在**受限沙箱**中执行(仅允许 `rg`、`grep`、`ls`、`find`、`git log` 等只读命令,与 OMX `omx-explore` 的 allowlist 机制一致),防止探查操作意外触发写入。 @@ -193,7 +199,7 @@ Leader 云端 Message Bus Teammate **超时与降级:** - 探查请求超时(默认 30s)→ Leader 基于用户描述做粗粒度拆解,`fileHints` 留空 -- Teammate 离线 → 从 Registry 选择同仓库的其他 Teammate;无可用 Teammate → 降级为粗粒度拆解 +- Teammate 离线 → 从 Registry 选择同仓库的其他 Teammate;无可用 Teammate 且 Leader 在线 → Leader 自拆解;Leader 也不可用 → 降级为粗粒度拆解 #### 3.1.3 仓库亲和性分析 diff --git a/go.work b/go.work new file mode 100644 index 0000000..889fa91 --- /dev/null +++ b/go.work @@ -0,0 +1,8 @@ +go 1.25.0 + +use ./server + +use ( + ./client/go + ./gateway +) diff --git a/go.work.sum b/go.work.sum new file mode 100644 index 0000000..99d9dc5 --- /dev/null +++ b/go.work.sum @@ -0,0 +1,108 @@ +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/firestore v1.15.0/go.mod h1:GWOxFXcv8GZUtYpWHw/w6IuYNux/BtmeVTMmjrm4yhk= +cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= +cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s= +cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= +github.com/ClickHouse/ch-go v0.71.0/go.mod h1:NwbNc+7jaqfY58dmdDUbG4Jl22vThgx1cYjBw0vtgXw= +github.com/ClickHouse/clickhouse-go/v2 v2.43.0/go.mod h1:o6jf7JM/zveWC/PP277BLxjHy5KjnGX/jfljhM4s34g= +github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/elastic/go-sysinfo v1.15.4/go.mod h1:ZBVXmqS368dOn/jvijV/zHLfakWTYHBZPk3G244lHrU= +github.com/elastic/go-windows v1.0.2/go.mod h1:bGcDpBzXgYSqM0Gx3DM4+UxFj300SZLixie9u9ixLM8= +github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= +github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mfridman/xflag v0.1.0/go.mod h1:/483ywM5ZO5SuMVjrIGquYNE5CzLrj5Ux/LxWWnjRaE= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/moby/api v1.53.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc= +github.com/moby/moby/client v0.2.2/go.mod h1:2EkIPVNCqR05CMIzL1mfA07t0HvVUUOl85pasRz/GmQ= +github.com/nats-io/nats.go v1.34.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= +github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/paulmach/orb v0.12.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= +github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/crypt v0.19.0/go.mod h1:c6vimRziqqERhtSe0MhIvzE1w54FrCHtrXb5NH/ja78= +github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/tursodatabase/libsql-client-go v0.0.0-20251219100830-236aa1ff8acc/go.mod h1:08inkKyguB6CGGssc/JzhmQWwBgFQBgjlYFjxjRh7nU= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/vertica/vertica-sql-go v1.3.5/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= +github.com/ydb-platform/ydb-go-genproto v0.0.0-20260128080146-c4ed16b24b37/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I= +github.com/ydb-platform/ydb-go-sdk/v3 v3.127.0/go.mod h1:stS1mQYjbJvwwYaYzKyFY9eMiuVXWWXQA6T+SpOLg9c= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= +go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= +go.etcd.io/etcd/client/v2 v2.305.12/go.mod h1:aQ/yhsxMu+Oht1FOupSr60oBvcS9cKXHrzBpDsPTf9E= +go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4/go.mod h1:g5NllXBEermZrmR51cJDQxmJUHUOfRAaNyWBM+R+548= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.171.0/go.mod h1:Hnq5AHm4OTMt2BUVjael2CWZFD6vksJdWCWiUAmjC9o= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/package.json b/package.json index 2197617..90a1b4c 100644 --- a/package.json +++ b/package.json @@ -6,7 +6,7 @@ "scripts": { "dev:api": "cd server && go run ./cmd/api", "dev:worker": "cd server && go run ./cmd/worker", - "dev:gateway": "cd gateway && go run ./cmd", + "dev:gateway": "cd gateway && go work use . && go run ./cmd", "dev": "concurrently \"npm run dev:api\" \"npm run dev:gateway\"", "dev:all": "concurrently \"npm run dev:api\" \"npm run dev:worker\" \"npm run dev:gateway\"", "build:api": "cd server && go build -o bin/server ./cmd/api", diff --git a/server/cmd/api/main.go b/server/cmd/api/main.go index 0a31c99..1969616 100644 --- a/server/cmd/api/main.go +++ b/server/cmd/api/main.go @@ -23,14 +23,15 @@ import ( "context" "fmt" "log" + "net/http" "os" "strings" "time" _ "github.com/costrict/costrict-web/server/docs" "github.com/costrict/costrict-web/server/internal/channel" - "github.com/costrict/costrict-web/server/internal/channel/adapters/wecom" "github.com/costrict/costrict-web/server/internal/channel/adapters/wechat" + "github.com/costrict/costrict-web/server/internal/channel/adapters/wecom" "github.com/costrict/costrict-web/server/internal/cloud" "github.com/costrict/costrict-web/server/internal/config" "github.com/costrict/costrict-web/server/internal/database" @@ -45,6 +46,7 @@ import ( "github.com/costrict/costrict-web/server/internal/services" "github.com/costrict/costrict-web/server/internal/storage" "github.com/costrict/costrict-web/server/internal/systemrole" + teampkg "github.com/costrict/costrict-web/server/internal/team" usagepkg "github.com/costrict/costrict-web/server/internal/usage" userpkg "github.com/costrict/costrict-web/server/internal/user" "github.com/gin-gonic/gin" @@ -388,13 +390,15 @@ func main() { } } + var redisClient *redis.Client var store gateway.Store if cfg.RedisURL != "" { opt, err := redis.ParseURL(cfg.RedisURL) if err != nil { log.Fatalf("Invalid REDIS_URL: %v", err) } - store = gateway.NewRedisStore(redis.NewClient(opt)) + redisClient = redis.NewClient(opt) + store = gateway.NewRedisStore(redisClient) log.Printf("Gateway store: Redis (%s)", cfg.RedisURL) } else { store = gateway.NewMemoryStore() @@ -423,8 +427,80 @@ func main() { // Device proxy: require user auth + device ownership check r.Any("/cloud/device/:deviceID/proxy/*path", middleware.RequireAuth(casdoorEndpoint, jwksProvider), gateway.DeviceProxyHandler(gatewayRegistry, gatewayClient, deviceSvc)) + // Cloud Team module + teamModule := teampkg.New(db, redisClient) + teamModule.Handler.SetAssignedTaskPusher(func(ctx context.Context, sessionID string, machineID string, userID string, task teampkg.TeamTask) error { + _ = ctx + dispatchEvent := cloud.Event{ + Type: cloud.EventTeamTaskDispatch, + Properties: map[string]any{ + "sessionID": sessionID, + "task": task, + }, + } + err := cloudModule.Router.RouteUserCommand(machineID, dispatchEvent) + if err == nil || strings.TrimSpace(userID) == "" { + return err + } + + // Fallback: some team members are browser-only machine IDs and cannot be + // routed by gateway (gateway routes by cloud deviceID). In that case, try + // the user's currently connected cloud devices. + devices, listErr := deviceSvc.ListDevices(userID) + if listErr != nil { + return err + } + for _, dev := range devices { + if dev.DeviceID == "" || dev.DeviceID == machineID { + continue + } + if _, gwErr := gatewayRegistry.GetDeviceGateway(dev.DeviceID); gwErr != nil { + continue + } + if routeErr := cloudModule.Router.RouteUserCommand(dev.DeviceID, dispatchEvent); routeErr == nil { + logger.Warn("[team] fallback routed task=%s session=%s from machine=%s to device=%s", task.ID, sessionID, machineID, dev.DeviceID) + return nil + } + } + return err + }) + teamAPIGroup := r.Group("/api") + teamAPIGroup.Use(requireUserOrDeviceAuth(deviceSvc)) + teamWSGroup := r.Group("/ws") + teamWSGroup.Use(middleware.OptionalAuth(casdoorEndpoint, jwksProvider)) + teamModule.RegisterRoutes(teamAPIGroup, teamWSGroup) + log.Printf("Server starting on port %s", cfg.Port) if err := r.Run(":" + cfg.Port); err != nil { log.Fatalf("Failed to start server: %v", err) } } + +func requireUserOrDeviceAuth(deviceSvc *services.DeviceService) gin.HandlerFunc { + return func(c *gin.Context) { + if c.GetString(middleware.UserIDKey) != "" { + c.Next() + return + } + + token := middleware.ExtractToken(c) + if token == "" { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "Authentication required"}) + return + } + + dev, err := deviceSvc.VerifyDeviceToken(token) + if err != nil || dev == nil || strings.TrimSpace(dev.UserID) == "" { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "Invalid token"}) + return + } + + c.Set(middleware.UserIDKey, dev.UserID) + if c.GetString(middleware.UserNameKey) == "" { + c.Set(middleware.UserNameKey, dev.DisplayName) + } + c.Set("deviceId", dev.DeviceID) + c.Set("authSource", "device-token") + c.Next() + } +} diff --git a/server/cmd/migrate/main.go b/server/cmd/migrate/main.go index 68e0417..2c03b4a 100644 --- a/server/cmd/migrate/main.go +++ b/server/cmd/migrate/main.go @@ -15,6 +15,7 @@ import ( "github.com/costrict/costrict-web/server/internal/config" "github.com/costrict/costrict-web/server/internal/database" "github.com/costrict/costrict-web/server/internal/models" + "github.com/costrict/costrict-web/server/internal/team" "github.com/costrict/costrict-web/server/internal/services" migrations "github.com/costrict/costrict-web/server/migrations" "github.com/google/uuid" @@ -111,6 +112,15 @@ func main() { } err = db.AutoMigrate( + &team.TeamSession{}, + &team.TeamSessionMember{}, + &team.TeamTask{}, + &team.TeamApprovalRequest{}, + &team.TeamRepoAffinity{}, + &models.UserSystemRole{}, + &models.Repository{}, + &models.RepoMember{}, + &models.RepoInvitation{}, &models.UserSystemRole{}, &models.Repository{}, &models.RepoMember{}, diff --git a/server/docs/docs.go b/server/docs/docs.go index 1cf60c1..58f1d08 100644 --- a/server/docs/docs.go +++ b/server/docs/docs.go @@ -42,7 +42,7 @@ const docTemplate = `{ "channels": { "type": "array", "items": { - "$ref": "#/definitions/models.SystemNotificationChannel" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.SystemNotificationChannel" } } } @@ -116,7 +116,7 @@ const docTemplate = `{ "type": "object", "properties": { "channel": { - "$ref": "#/definitions/models.SystemNotificationChannel" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.SystemNotificationChannel" } } } @@ -206,7 +206,7 @@ const docTemplate = `{ "type": "object", "properties": { "channel": { - "$ref": "#/definitions/models.SystemNotificationChannel" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.SystemNotificationChannel" } } } @@ -734,7 +734,7 @@ const docTemplate = `{ "201": { "description": "Created", "schema": { - "$ref": "#/definitions/models.CapabilityArtifact" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityArtifact" } }, "400": { @@ -2810,7 +2810,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/project.InvitationsResponse" + "$ref": "#/definitions/internal_project.InvitationsResponse" } }, "401": { @@ -2857,7 +2857,7 @@ const docTemplate = `{ "invitations": { "type": "array", "items": { - "$ref": "#/definitions/models.RepoInvitation" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.RepoInvitation" } } } @@ -2975,7 +2975,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.RepoMember" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.RepoMember" } }, "400": { @@ -3124,7 +3124,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/project.RespondInvitationRequest" + "$ref": "#/definitions/internal_project.RespondInvitationRequest" } } ], @@ -3132,7 +3132,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/project.InvitationResponse" + "$ref": "#/definitions/internal_project.InvitationResponse" } }, "400": { @@ -3452,7 +3452,7 @@ const docTemplate = `{ "items": { "type": "array", "items": { - "$ref": "#/definitions/handlers.MyItem" + "$ref": "#/definitions/internal_handlers.MyItem" } }, "page": { @@ -3504,7 +3504,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/handlers.ItemResponse" + "$ref": "#/definitions/internal_handlers.ItemResponse" } }, "404": { @@ -3691,7 +3691,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/llm.SkillAnalysis" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_llm.SkillAnalysis" } }, "404": { @@ -3747,7 +3747,7 @@ const docTemplate = `{ "artifacts": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityArtifact" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityArtifact" } } } @@ -3828,7 +3828,7 @@ const docTemplate = `{ "201": { "description": "Created", "schema": { - "$ref": "#/definitions/models.BehaviorLog" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.BehaviorLog" } }, "400": { @@ -4176,7 +4176,7 @@ const docTemplate = `{ "improvements": { "type": "array", "items": { - "$ref": "#/definitions/llm.SkillImprovement" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_llm.SkillImprovement" } } } @@ -4187,7 +4187,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.CapabilityItem" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityItem" } }, "400": { @@ -4255,7 +4255,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.CapabilityItem" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityItem" } }, "400": { @@ -4446,7 +4446,7 @@ const docTemplate = `{ "results": { "type": "array", "items": { - "$ref": "#/definitions/models.SecurityScan" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.SecurityScan" } }, "total": { @@ -4563,7 +4563,7 @@ const docTemplate = `{ "items": { "type": "array", "items": { - "$ref": "#/definitions/services.SearchResultItem" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.SearchResultItem" } }, "page": { @@ -4626,7 +4626,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/services.ItemBehaviorStats" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.ItemBehaviorStats" } }, "500": { @@ -4683,7 +4683,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.CapabilityItem" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityItem" } }, "400": { @@ -4907,7 +4907,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/services.SearchResult" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.SearchResult" } }, "400": { @@ -4971,7 +4971,7 @@ const docTemplate = `{ "items": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityItem" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityItem" } }, "page": { @@ -5057,7 +5057,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/services.RecommendResponse" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.RecommendResponse" } }, "400": { @@ -5145,7 +5145,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/services.SearchResult" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.SearchResult" } }, "400": { @@ -5219,7 +5219,7 @@ const docTemplate = `{ "items": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityItem" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityItem" } }, "page": { @@ -5267,7 +5267,7 @@ const docTemplate = `{ "channels": { "type": "array", "items": { - "$ref": "#/definitions/models.UserNotificationChannel" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.UserNotificationChannel" } } } @@ -5347,7 +5347,7 @@ const docTemplate = `{ "type": "object", "properties": { "channel": { - "$ref": "#/definitions/models.UserNotificationChannel" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.UserNotificationChannel" } } } @@ -5453,7 +5453,7 @@ const docTemplate = `{ "type": "object", "properties": { "channel": { - "$ref": "#/definitions/models.UserNotificationChannel" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.UserNotificationChannel" } } } @@ -5536,7 +5536,7 @@ const docTemplate = `{ "type": "object", "properties": { "channel": { - "$ref": "#/definitions/models.UserNotificationChannel" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.UserNotificationChannel" } } } @@ -5687,7 +5687,7 @@ const docTemplate = `{ "logs": { "type": "array", "items": { - "$ref": "#/definitions/models.NotificationLog" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.NotificationLog" } } } @@ -5821,7 +5821,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/project.ProjectsResponse" + "$ref": "#/definitions/internal_project.ProjectsResponse" } }, "401": { @@ -5872,7 +5872,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/project.CreateProjectRequest" + "$ref": "#/definitions/internal_project.CreateProjectRequest" } } ], @@ -5880,7 +5880,7 @@ const docTemplate = `{ "201": { "description": "Created", "schema": { - "$ref": "#/definitions/project.ProjectResponse" + "$ref": "#/definitions/internal_project.ProjectResponse" } }, "400": { @@ -5947,7 +5947,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/project.ProjectResponse" + "$ref": "#/definitions/internal_project.ProjectResponse" } }, "401": { @@ -6016,7 +6016,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/project.UpdateProjectRequest" + "$ref": "#/definitions/internal_project.UpdateProjectRequest" } } ], @@ -6024,7 +6024,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/project.ProjectResponse" + "$ref": "#/definitions/internal_project.ProjectResponse" } }, "400": { @@ -6164,7 +6164,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/project.ProjectResponse" + "$ref": "#/definitions/internal_project.ProjectResponse" } }, "400": { @@ -6246,7 +6246,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/project.UpdateProjectArchiveTimeRequest" + "$ref": "#/definitions/internal_project.UpdateProjectArchiveTimeRequest" } } ], @@ -6254,7 +6254,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/project.ProjectBasicInfoResponse" + "$ref": "#/definitions/internal_project.ProjectBasicInfoResponse" } }, "400": { @@ -6332,7 +6332,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/project.ProjectBasicInfoResponse" + "$ref": "#/definitions/internal_project.ProjectBasicInfoResponse" } }, "401": { @@ -6399,7 +6399,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/project.InvitationsResponse" + "$ref": "#/definitions/internal_project.InvitationsResponse" } }, "401": { @@ -6468,7 +6468,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/project.CreateInvitationRequest" + "$ref": "#/definitions/internal_project.CreateInvitationRequest" } } ], @@ -6476,7 +6476,7 @@ const docTemplate = `{ "201": { "description": "Created", "schema": { - "$ref": "#/definitions/project.InvitationResponse" + "$ref": "#/definitions/internal_project.InvitationResponse" } }, "400": { @@ -6554,7 +6554,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/project.MembersResponse" + "$ref": "#/definitions/internal_project.MembersResponse" } }, "401": { @@ -6714,7 +6714,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/project.UpdateMemberRoleRequest" + "$ref": "#/definitions/internal_project.UpdateMemberRoleRequest" } } ], @@ -6722,7 +6722,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/project.MemberResponse" + "$ref": "#/definitions/internal_project.MemberResponse" } }, "400": { @@ -6804,7 +6804,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/project.SetProjectPinRequest" + "$ref": "#/definitions/internal_project.SetProjectPinRequest" } } ], @@ -6812,7 +6812,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/project.ProjectResponse" + "$ref": "#/definitions/internal_project.ProjectResponse" } }, "400": { @@ -6890,7 +6890,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/project.ProjectResponse" + "$ref": "#/definitions/internal_project.ProjectResponse" } }, "400": { @@ -6967,7 +6967,7 @@ const docTemplate = `{ "registries": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityRegistry" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" } } } @@ -7031,7 +7031,7 @@ const docTemplate = `{ "201": { "description": "Created", "schema": { - "$ref": "#/definitions/models.CapabilityRegistry" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" } }, "400": { @@ -7087,7 +7087,7 @@ const docTemplate = `{ "registries": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityRegistry" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" } } } @@ -7121,7 +7121,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.CapabilityRegistry" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" } }, "404": { @@ -7161,7 +7161,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.CapabilityRegistry" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" } }, "404": { @@ -7233,7 +7233,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.CapabilityRegistry" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" } }, "400": { @@ -7388,7 +7388,7 @@ const docTemplate = `{ "items": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityItem" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityItem" } }, "page": { @@ -7601,7 +7601,7 @@ const docTemplate = `{ "jobs": { "type": "array", "items": { - "$ref": "#/definitions/models.SyncJob" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.SyncJob" } }, "total": { @@ -7640,7 +7640,7 @@ const docTemplate = `{ "logs": { "type": "array", "items": { - "$ref": "#/definitions/models.SyncLog" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.SyncLog" } }, "total": { @@ -7753,7 +7753,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.CapabilityRegistry" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" } }, "400": { @@ -8002,7 +8002,7 @@ const docTemplate = `{ "repositories": { "type": "array", "items": { - "$ref": "#/definitions/models.Repository" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.Repository" } } } @@ -8077,7 +8077,7 @@ const docTemplate = `{ "201": { "description": "Created", "schema": { - "$ref": "#/definitions/models.Repository" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.Repository" } }, "400": { @@ -8124,7 +8124,7 @@ const docTemplate = `{ "repositories": { "type": "array", "items": { - "$ref": "#/definitions/models.Repository" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.Repository" } } } @@ -8167,7 +8167,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.Repository" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.Repository" } }, "404": { @@ -8230,7 +8230,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.Repository" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.Repository" } }, "400": { @@ -8346,7 +8346,7 @@ const docTemplate = `{ "invitations": { "type": "array", "items": { - "$ref": "#/definitions/models.RepoInvitation" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.RepoInvitation" } } } @@ -8410,7 +8410,7 @@ const docTemplate = `{ "201": { "description": "Created", "schema": { - "$ref": "#/definitions/models.RepoInvitation" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.RepoInvitation" } }, "400": { @@ -8562,7 +8562,7 @@ const docTemplate = `{ "members": { "type": "array", "items": { - "$ref": "#/definitions/models.RepoMember" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.RepoMember" } } } @@ -8626,7 +8626,7 @@ const docTemplate = `{ "201": { "description": "Created", "schema": { - "$ref": "#/definitions/models.RepoMember" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.RepoMember" } }, "400": { @@ -8723,7 +8723,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.RepoMember" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.RepoMember" } }, "400": { @@ -8862,7 +8862,7 @@ const docTemplate = `{ "registries": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityRegistry" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" } } } @@ -8896,7 +8896,7 @@ const docTemplate = `{ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/handlers.CreateSyncRegistryInput" + "$ref": "#/definitions/internal_handlers.CreateSyncRegistryInput" } } ], @@ -8904,7 +8904,7 @@ const docTemplate = `{ "201": { "description": "Created", "schema": { - "$ref": "#/definitions/models.CapabilityRegistry" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" } }, "400": { @@ -9000,7 +9000,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.CapabilityRegistry" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" } }, "404": { @@ -9090,7 +9090,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.CapabilityRegistry" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" } }, "404": { @@ -9223,7 +9223,7 @@ const docTemplate = `{ "jobs": { "type": "array", "items": { - "$ref": "#/definitions/models.SyncJob" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.SyncJob" } }, "total": { @@ -9280,7 +9280,7 @@ const docTemplate = `{ "logs": { "type": "array", "items": { - "$ref": "#/definitions/models.SyncLog" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.SyncLog" } }, "total": { @@ -9444,7 +9444,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.SecurityScan" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.SecurityScan" } }, "404": { @@ -9483,7 +9483,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.SyncJob" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.SyncJob" } }, "404": { @@ -9522,7 +9522,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/models.SyncLog" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.SyncLog" } }, "404": { @@ -9539,87 +9539,56 @@ const docTemplate = `{ } } }, - "/usage/activity": { - "get": { - "security": [ - { - "BearerAuth": [] - } + "/team/approvals/:approvalId": { + "patch": { + "consumes": [ + "application/json" ], - "description": "Query daily request activity for all users under the specified git repository URL.", "produces": [ "application/json" ], "tags": [ - "usage" + "team" ], - "summary": "Query usage activity", + "summary": "Respond to approval request (Leader)", "parameters": [ { "type": "string", - "description": "Git repository URL", - "name": "git_repo_url", - "in": "query", + "description": "Approval ID", + "name": "approvalId", + "in": "path", "required": true }, { - "type": "integer", - "description": "Range in days (1-90, default 7)", - "name": "days", - "in": "query" - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/services.UsageActivityResponse" - } - }, - "400": { - "description": "Bad Request", + "description": "Response", + "name": "body", + "in": "body", + "required": true, "schema": { "type": "object", "properties": { - "error": { + "feedback": { "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "schema": { - "type": "object", - "properties": { - "error": { + }, + "status": { "type": "string" } } } - }, - "500": { - "description": "Internal Server Error", + } + ], + "responses": { + "200": { + "description": "OK", "schema": { - "type": "object", - "properties": { - "error": { - "type": "string" - } - } + "$ref": "#/definitions/internal_team.TeamApprovalRequest" } } } } }, - "/usage/report": { + "/team/sessions": { "post": { - "security": [ - { - "BearerAuth": [] - } - ], - "description": "Receive batched session usage reports from authenticated CLI clients and upsert them with request-level idempotency.", "consumes": [ "application/json" ], @@ -9627,79 +9596,50 @@ const docTemplate = `{ "application/json" ], "tags": [ - "usage" + "team" ], - "summary": "Report session usage", + "summary": "Create team session", "parameters": [ { - "description": "Usage report payload", + "description": "Session data", "name": "body", "in": "body", "required": true, - "schema": { - "$ref": "#/definitions/services.UsageReportRequest" - } - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/services.UsageReportResponse" - } - }, - "400": { - "description": "Bad Request", - "schema": { - "type": "object", - "properties": { - "error": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", "schema": { "type": "object", "properties": { - "error": { + "name": { "type": "string" } } } - }, - "500": { - "description": "Internal Server Error", + } + ], + "responses": { + "201": { + "description": "Created", "schema": { - "type": "object", - "properties": { - "error": { - "type": "string" - } - } + "$ref": "#/definitions/internal_team.TeamSession" } } } } }, - "/users/info": { + "/team/sessions/:id": { "get": { - "description": "Query a user's basic information by user ID, including name and avatar URL.", "produces": [ "application/json" ], "tags": [ - "users" + "team" ], - "summary": "Get user basic info", + "summary": "Get team session", "parameters": [ { "type": "string", - "description": "User ID", + "description": "Session ID", "name": "id", - "in": "query", + "in": "path", "required": true } ], @@ -9707,74 +9647,112 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "type": "object", - "properties": { - "user": { - "$ref": "#/definitions/handlers.userBasicInfoResponse" - } - } + "$ref": "#/definitions/internal_team.TeamSession" } - }, - "400": { - "description": "Bad Request", + } + } + }, + "delete": { + "tags": [ + "team" + ], + "summary": "Close / delete team session", + "parameters": [ + { + "type": "string", + "description": "Session ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", "schema": { "type": "object", "properties": { - "error": { + "message": { "type": "string" } } } + } + } + }, + "patch": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "team" + ], + "summary": "Update team session status", + "parameters": [ + { + "type": "string", + "description": "Session ID", + "name": "id", + "in": "path", + "required": true }, - "404": { - "description": "Not Found", + { + "description": "Status", + "name": "body", + "in": "body", + "required": true, "schema": { "type": "object", "properties": { - "error": { + "status": { "type": "string" } } } - }, - "500": { - "description": "Internal Server Error", + } + ], + "responses": { + "200": { + "description": "OK", "schema": { - "type": "object", - "properties": { - "error": { - "type": "string" - } - } + "$ref": "#/definitions/internal_team.TeamSession" } } } } }, - "/users/me/behavior/summary": { + "/team/sessions/:id/approvals": { "get": { - "description": "Get a summary of user's behavior and preferences", "produces": [ "application/json" ], "tags": [ - "behavior" + "team" + ], + "summary": "List pending approvals", + "parameters": [ + { + "type": "string", + "description": "Session ID", + "name": "id", + "in": "path", + "required": true + } ], - "summary": "Get user behavior summary", "responses": { "200": { "description": "OK", - "schema": { - "$ref": "#/definitions/models.UserBehaviorSummary" - } - }, - "500": { - "description": "Internal Server Error", "schema": { "type": "object", "properties": { - "error": { - "type": "string" + "approvals": { + "type": "array", + "items": { + "$ref": "#/definitions/internal_team.TeamApprovalRequest" + } } } } @@ -9782,23 +9760,46 @@ const docTemplate = `{ } } }, - "/users/names": { - "get": { - "description": "Given a comma-separated list of user IDs, return a map of id -\u003e displayName. Results are served from an in-memory cache when possible.", + "/team/sessions/:id/explore": { + "post": { + "description": "Leader sends explore queries targeting a specific Teammate machine.", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "users" + "team" ], - "summary": "Batch resolve user display names", + "summary": "Synchronous remote code explore (Leader → Teammate)", "parameters": [ { "type": "string", - "description": "Comma-separated user IDs (max 50)", - "name": "ids", - "in": "query", + "description": "Session ID", + "name": "id", + "in": "path", "required": true + }, + { + "description": "Explore request", + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "queries": { + "type": "array", + "items": { + "type": "object" + } + }, + "targetMachineId": { + "type": "string" + } + } + } } ], "responses": { @@ -9807,17 +9808,14 @@ const docTemplate = `{ "schema": { "type": "object", "properties": { - "names": { - "type": "object", - "additionalProperties": { - "type": "string" - } + "result": { + "type": "object" } } } }, - "400": { - "description": "Bad Request", + "504": { + "description": "Gateway Timeout", "schema": { "type": "object", "properties": { @@ -9830,22 +9828,21 @@ const docTemplate = `{ } } }, - "/users/search": { + "/team/sessions/:id/leader": { "get": { - "description": "Search users by username or email keyword (requires authentication)", "produces": [ "application/json" ], "tags": [ - "users" + "team" ], - "summary": "Search users", + "summary": "Get current leader info", "parameters": [ { "type": "string", - "description": "Search keyword", - "name": "q", - "in": "query", + "description": "Session ID", + "name": "id", + "in": "path", "required": true } ], @@ -9855,32 +9852,63 @@ const docTemplate = `{ "schema": { "type": "object", "properties": { - "users": { - "type": "array", - "items": { - "$ref": "#/definitions/casdoor.CasdoorUser" - } + "leaderId": { + "type": "string" } } } + } + } + } + }, + "/team/sessions/:id/leader/elect": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "team" + ], + "summary": "Attempt leader election", + "parameters": [ + { + "type": "string", + "description": "Session ID", + "name": "id", + "in": "path", + "required": true }, - "401": { - "description": "Unauthorized", + { + "description": "Candidate", + "name": "body", + "in": "body", + "required": true, "schema": { "type": "object", "properties": { - "error": { + "machineId": { "type": "string" } } } - }, - "500": { - "description": "Internal Server Error", + } + ], + "responses": { + "200": { + "description": "OK", "schema": { "type": "object", "properties": { - "error": { + "elected": { + "type": "boolean" + }, + "fencingToken": { + "type": "integer" + }, + "leaderId": { "type": "string" } } @@ -9889,9 +9917,8 @@ const docTemplate = `{ } } }, - "/webhooks/github": { + "/team/sessions/:id/leader/heartbeat": { "post": { - "description": "Receive GitHub push events and enqueue sync jobs", "consumes": [ "application/json" ], @@ -9899,30 +9926,29 @@ const docTemplate = `{ "application/json" ], "tags": [ - "sync" + "team" ], - "summary": "Handle GitHub webhook", + "summary": "Leader lock renewal heartbeat", "parameters": [ { "type": "string", - "description": "GitHub event type (push)", - "name": "X-GitHub-Event", - "in": "header", + "description": "Session ID", + "name": "id", + "in": "path", "required": true }, { - "type": "string", - "description": "GitHub webhook signature", - "name": "X-Hub-Signature-256", - "in": "header" - }, - { - "description": "GitHub webhook payload", + "description": "Leader identity", "name": "body", "in": "body", "required": true, "schema": { - "type": "object" + "type": "object", + "properties": { + "machineId": { + "type": "string" + } + } } } ], @@ -9932,54 +9958,126 @@ const docTemplate = `{ "schema": { "type": "object", "properties": { - "message": { - "type": "string" + "renewed": { + "type": "boolean" } } } - }, - "202": { - "description": "Accepted", + } + } + } + }, + "/team/sessions/:id/members": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "team" + ], + "summary": "List session members", + "parameters": [ + { + "type": "string", + "description": "Session ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", "schema": { "type": "object", "properties": { - "queued": { + "members": { "type": "array", "items": { - "type": "string" + "$ref": "#/definitions/internal_team.TeamSessionMember" } } } } + } + } + }, + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "team" + ], + "summary": "Join team session", + "parameters": [ + { + "type": "string", + "description": "Session ID", + "name": "id", + "in": "path", + "required": true }, - "400": { - "description": "Bad Request", + { + "description": "Machine info", + "name": "body", + "in": "body", + "required": true, "schema": { "type": "object", "properties": { - "error": { + "machineId": { + "type": "string" + }, + "machineName": { "type": "string" } } } - }, - "401": { - "description": "Unauthorized", + } + ], + "responses": { + "201": { + "description": "Created", "schema": { - "type": "object", - "properties": { - "error": { - "type": "string" - } - } + "$ref": "#/definitions/internal_team.TeamSessionMember" } + } + } + } + }, + "/team/sessions/:id/members/:mid": { + "delete": { + "tags": [ + "team" + ], + "summary": "Leave team session", + "parameters": [ + { + "type": "string", + "description": "Session ID", + "name": "id", + "in": "path", + "required": true }, - "503": { - "description": "Service Unavailable", + { + "type": "string", + "description": "Member ID", + "name": "mid", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", "schema": { "type": "object", "properties": { - "error": { + "message": { "type": "string" } } @@ -9988,49 +10086,75 @@ const docTemplate = `{ } } }, - "/workspaces": { + "/team/sessions/:id/progress": { "get": { - "description": "Get all workspaces for the authenticated user", "produces": [ "application/json" ], "tags": [ - "workspaces" + "team" + ], + "summary": "Get session progress snapshot", + "parameters": [ + { + "type": "string", + "description": "Session ID", + "name": "id", + "in": "path", + "required": true + } ], - "summary": "List user workspaces", "responses": { "200": { "description": "OK", "schema": { - "type": "object", - "properties": { - "workspaces": { - "type": "array", - "items": { - "$ref": "#/definitions/services.WorkspaceWithDeviceStatus" - } - } - } + "$ref": "#/definitions/internal_team.SessionProgress" } + } + } + } + }, + "/team/sessions/:id/repos": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "team" + ], + "summary": "Query repo affinity registry", + "parameters": [ + { + "type": "string", + "description": "Session ID", + "name": "id", + "in": "path", + "required": true }, - "401": { - "description": "Unauthorized", - "schema": { - "type": "object", - "properties": { - "error": { - "type": "string" - } - } - } + { + "type": "string", + "description": "Filter by repo remote URL", + "name": "remoteUrl", + "in": "query" }, - "500": { - "description": "Internal Server Error", + { + "type": "string", + "description": "Filter by member ID", + "name": "memberId", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", "schema": { "type": "object", "properties": { - "error": { - "type": "string" + "repos": { + "type": "array", + "items": { + "$ref": "#/definitions/internal_team.TeamRepoAffinity" + } } } } @@ -10038,7 +10162,6 @@ const docTemplate = `{ } }, "post": { - "description": "Create a workspace for the authenticated user with at least one directory", "consumes": [ "application/json" ], @@ -10046,72 +10169,123 @@ const docTemplate = `{ "application/json" ], "tags": [ - "workspaces" + "team" ], - "summary": "Create a new workspace", + "summary": "Register / update local repository info (Teammate)", "parameters": [ { - "description": "Workspace creation data", + "type": "string", + "description": "Session ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Repo info", "name": "body", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/services.CreateWorkspaceRequest" + "$ref": "#/definitions/internal_team.TeamRepoAffinity" } } ], "responses": { - "201": { - "description": "Created", - "schema": { - "type": "object", - "properties": { - "workspace": { - "$ref": "#/definitions/services.WorkspaceWithDeviceStatus" - } - } - } - }, - "400": { - "description": "Bad Request", + "200": { + "description": "OK", "schema": { - "type": "object", - "properties": { - "error": { - "type": "string" - } - } + "$ref": "#/definitions/internal_team.TeamRepoAffinity" } - }, - "401": { - "description": "Unauthorized", + } + } + } + }, + "/team/sessions/:id/tasks": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "team" + ], + "summary": "List session tasks", + "parameters": [ + { + "type": "string", + "description": "Session ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", "schema": { "type": "object", "properties": { - "error": { - "type": "string" + "tasks": { + "type": "array", + "items": { + "$ref": "#/definitions/internal_team.TeamTask" + } } } } + } + } + }, + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "team" + ], + "summary": "Submit task plan (Leader)", + "parameters": [ + { + "type": "string", + "description": "Session ID", + "name": "id", + "in": "path", + "required": true }, - "409": { - "description": "Conflict", + { + "description": "Task plan", + "name": "body", + "in": "body", + "required": true, "schema": { "type": "object", "properties": { - "error": { - "type": "string" + "fencingToken": { + "type": "integer" + }, + "tasks": { + "type": "array", + "items": { + "$ref": "#/definitions/internal_team.TeamTask" + } } } } - }, - "500": { - "description": "Internal Server Error", + } + ], + "responses": { + "201": { + "description": "Created", "schema": { "type": "object", "properties": { - "error": { - "type": "string" + "tasks": { + "type": "array", + "items": { + "$ref": "#/definitions/internal_team.TeamTask" + } } } } @@ -10119,97 +10293,122 @@ const docTemplate = `{ } } }, - "/workspaces/default": { + "/team/tasks/:taskId": { "get": { - "description": "Get the default workspace for the authenticated user", "produces": [ "application/json" ], "tags": [ - "workspaces" + "team" + ], + "summary": "Get task", + "parameters": [ + { + "type": "string", + "description": "Task ID", + "name": "taskId", + "in": "path", + "required": true + } ], - "summary": "Get default workspace", "responses": { "200": { "description": "OK", "schema": { - "type": "object", - "properties": { - "workspace": { - "$ref": "#/definitions/services.WorkspaceWithDeviceStatus" - } - } + "$ref": "#/definitions/internal_team.TeamTask" } + } + } + }, + "patch": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "team" + ], + "summary": "Update task status/result (Teammate)", + "parameters": [ + { + "type": "string", + "description": "Task ID", + "name": "taskId", + "in": "path", + "required": true }, - "401": { - "description": "Unauthorized", + { + "description": "Task update", + "name": "body", + "in": "body", + "required": true, "schema": { "type": "object", "properties": { - "error": { + "errorMessage": { "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "schema": { - "type": "object", - "properties": { - "error": { + }, + "result": { + "type": "object" + }, + "status": { "type": "string" } } } - }, - "500": { - "description": "Internal Server Error", + } + ], + "responses": { + "200": { + "description": "OK", "schema": { - "type": "object", - "properties": { - "error": { - "type": "string" - } - } + "$ref": "#/definitions/internal_team.TeamTask" } } } } }, - "/workspaces/{workspaceID}": { + "/usage/activity": { "get": { - "description": "Get details of a specific workspace including directories", + "security": [ + { + "BearerAuth": [] + } + ], + "description": "Query daily request activity for all users under the specified git repository URL.", "produces": [ "application/json" ], "tags": [ - "workspaces" + "usage" ], - "summary": "Get workspace details", + "summary": "Query usage activity", "parameters": [ { "type": "string", - "description": "Workspace ID", - "name": "workspaceID", - "in": "path", + "description": "Git repository URL", + "name": "git_repo_url", + "in": "query", "required": true + }, + { + "type": "integer", + "description": "Range in days (1-90, default 7)", + "name": "days", + "in": "query" } ], "responses": { "200": { "description": "OK", "schema": { - "type": "object", - "properties": { - "workspace": { - "$ref": "#/definitions/services.WorkspaceWithDeviceStatus" - } - } + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.UsageActivityResponse" } }, - "401": { - "description": "Unauthorized", + "400": { + "description": "Bad Request", "schema": { "type": "object", "properties": { @@ -10219,8 +10418,8 @@ const docTemplate = `{ } } }, - "404": { - "description": "Not Found", + "401": { + "description": "Unauthorized", "schema": { "type": "object", "properties": { @@ -10242,9 +10441,16 @@ const docTemplate = `{ } } } - }, - "put": { - "description": "Update workspace information", + } + }, + "/usage/report": { + "post": { + "security": [ + { + "BearerAuth": [] + } + ], + "description": "Receive batched session usage reports from authenticated CLI clients and upsert them with request-level idempotency.", "consumes": [ "application/json" ], @@ -10252,24 +10458,17 @@ const docTemplate = `{ "application/json" ], "tags": [ - "workspaces" + "usage" ], - "summary": "Update workspace", + "summary": "Report session usage", "parameters": [ { - "type": "string", - "description": "Workspace ID", - "name": "workspaceID", - "in": "path", - "required": true - }, - { - "description": "Workspace update data", + "description": "Usage report payload", "name": "body", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/services.UpdateWorkspaceRequest" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.UsageReportRequest" } } ], @@ -10277,12 +10476,7 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "type": "object", - "properties": { - "workspace": { - "$ref": "#/definitions/services.WorkspaceWithDeviceStatus" - } - } + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.UsageReportResponse" } }, "400": { @@ -10307,8 +10501,8 @@ const docTemplate = `{ } } }, - "404": { - "description": "Not Found", + "500": { + "description": "Internal Server Error", "schema": { "type": "object", "properties": { @@ -10317,9 +10511,43 @@ const docTemplate = `{ } } } + } + } + } + }, + "/users/info": { + "get": { + "description": "Query a user's basic information by user ID, including name and avatar URL.", + "produces": [ + "application/json" + ], + "tags": [ + "users" + ], + "summary": "Get user basic info", + "parameters": [ + { + "type": "string", + "description": "User ID", + "name": "id", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "properties": { + "user": { + "$ref": "#/definitions/internal_handlers.userBasicInfoResponse" + } + } + } }, - "409": { - "description": "Conflict", + "400": { + "description": "Bad Request", "schema": { "type": "object", "properties": { @@ -10329,8 +10557,8 @@ const docTemplate = `{ } } }, - "500": { - "description": "Internal Server Error", + "404": { + "description": "Not Found", "schema": { "type": "object", "properties": { @@ -10339,30 +10567,9 @@ const docTemplate = `{ } } } - } - } - }, - "delete": { - "description": "Delete a workspace and all its directories", - "tags": [ - "workspaces" - ], - "summary": "Delete workspace", - "parameters": [ - { - "type": "string", - "description": "Workspace ID", - "name": "workspaceID", - "in": "path", - "required": true - } - ], - "responses": { - "204": { - "description": "No Content" }, - "401": { - "description": "Unauthorized", + "500": { + "description": "Internal Server Error", "schema": { "type": "object", "properties": { @@ -10371,9 +10578,29 @@ const docTemplate = `{ } } } + } + } + } + }, + "/users/me/behavior/summary": { + "get": { + "description": "Get a summary of user's behavior and preferences", + "produces": [ + "application/json" + ], + "tags": [ + "behavior" + ], + "summary": "Get user behavior summary", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.UserBehaviorSummary" + } }, - "403": { - "description": "Forbidden", + "500": { + "description": "Internal Server Error", "schema": { "type": "object", "properties": { @@ -10382,20 +10609,46 @@ const docTemplate = `{ } } } - }, - "404": { - "description": "Not Found", + } + } + } + }, + "/users/names": { + "get": { + "description": "Given a comma-separated list of user IDs, return a map of id -\u003e displayName. Results are served from an in-memory cache when possible.", + "produces": [ + "application/json" + ], + "tags": [ + "users" + ], + "summary": "Batch resolve user display names", + "parameters": [ + { + "type": "string", + "description": "Comma-separated user IDs (max 50)", + "name": "ids", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", "schema": { "type": "object", "properties": { - "error": { - "type": "string" + "names": { + "type": "object", + "additionalProperties": { + "type": "string" + } } } } }, - "500": { - "description": "Internal Server Error", + "400": { + "description": "Bad Request", "schema": { "type": "object", "properties": { @@ -10408,35 +10661,23 @@ const docTemplate = `{ } } }, - "/workspaces/{workspaceID}/devices": { + "/users/search": { "get": { - "description": "Get all devices in a workspace with pagination", + "description": "Search users by username or email keyword (requires authentication)", "produces": [ "application/json" ], "tags": [ - "devices" + "users" ], - "summary": "List workspace devices", + "summary": "Search users", "parameters": [ { "type": "string", - "description": "Workspace ID", - "name": "workspaceID", - "in": "path", + "description": "Search keyword", + "name": "q", + "in": "query", "required": true - }, - { - "type": "integer", - "description": "Page number (default: 1)", - "name": "page", - "in": "query" - }, - { - "type": "integer", - "description": "Page size (default: 20, max: 100)", - "name": "pageSize", - "in": "query" } ], "responses": { @@ -10445,23 +10686,11 @@ const docTemplate = `{ "schema": { "type": "object", "properties": { - "devices": { + "users": { "type": "array", "items": { - "type": "object" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_casdoor.CasdoorUser" } - }, - "hasMore": { - "type": "boolean" - }, - "page": { - "type": "integer" - }, - "pageSize": { - "type": "integer" - }, - "total": { - "type": "integer" } } } @@ -10491,9 +10720,9 @@ const docTemplate = `{ } } }, - "/workspaces/{workspaceID}/directories": { + "/webhooks/github": { "post": { - "description": "Add a new directory to an existing workspace", + "description": "Receive GitHub push events and enqueue sync jobs", "consumes": [ "application/json" ], @@ -10501,35 +10730,55 @@ const docTemplate = `{ "application/json" ], "tags": [ - "workspaces" + "sync" ], - "summary": "Add directory to workspace", + "summary": "Handle GitHub webhook", "parameters": [ { "type": "string", - "description": "Workspace ID", - "name": "workspaceID", - "in": "path", + "description": "GitHub event type (push)", + "name": "X-GitHub-Event", + "in": "header", "required": true }, { - "description": "Directory creation data", + "type": "string", + "description": "GitHub webhook signature", + "name": "X-Hub-Signature-256", + "in": "header" + }, + { + "description": "GitHub webhook payload", "name": "body", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/services.CreateDirectoryRequest" + "type": "object" } } ], "responses": { - "201": { - "description": "Created", + "200": { + "description": "OK", "schema": { "type": "object", "properties": { - "directory": { - "type": "object" + "message": { + "type": "string" + } + } + } + }, + "202": { + "description": "Accepted", + "schema": { + "type": "object", + "properties": { + "queued": { + "type": "array", + "items": { + "type": "string" + } } } } @@ -10556,8 +10805,47 @@ const docTemplate = `{ } } }, - "404": { - "description": "Not Found", + "503": { + "description": "Service Unavailable", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + } + } + } + }, + "/workspaces": { + "get": { + "description": "Get all workspaces for the authenticated user", + "produces": [ + "application/json" + ], + "tags": [ + "workspaces" + ], + "summary": "List user workspaces", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "properties": { + "workspaces": { + "type": "array", + "items": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.WorkspaceWithDeviceStatus" + } + } + } + } + }, + "401": { + "description": "Unauthorized", "schema": { "type": "object", "properties": { @@ -10579,11 +10867,9 @@ const docTemplate = `{ } } } - } - }, - "/workspaces/{workspaceID}/directories/reorder": { + }, "post": { - "description": "Reorder directories in a workspace", + "description": "Create a workspace for the authenticated user with at least one directory", "consumes": [ "application/json" ], @@ -10593,33 +10879,26 @@ const docTemplate = `{ "tags": [ "workspaces" ], - "summary": "Reorder workspace directories", + "summary": "Create a new workspace", "parameters": [ { - "type": "string", - "description": "Workspace ID", - "name": "workspaceID", - "in": "path", - "required": true - }, - { - "description": "Directory order data", + "description": "Workspace creation data", "name": "body", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/services.ReorderDirectoriesRequest" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.CreateWorkspaceRequest" } } ], "responses": { - "200": { - "description": "OK", + "201": { + "description": "Created", "schema": { "type": "object", "properties": { - "message": { - "type": "string" + "workspace": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.WorkspaceWithDeviceStatus" } } } @@ -10646,8 +10925,8 @@ const docTemplate = `{ } } }, - "404": { - "description": "Not Found", + "409": { + "description": "Conflict", "schema": { "type": "object", "properties": { @@ -10671,41 +10950,157 @@ const docTemplate = `{ } } }, - "/workspaces/{workspaceID}/directories/{directoryID}": { - "put": { - "description": "Update a directory in a workspace", - "consumes": [ - "application/json" - ], + "/workspaces/default": { + "get": { + "description": "Get the default workspace for the authenticated user", "produces": [ "application/json" ], "tags": [ "workspaces" ], - "summary": "Update workspace directory", - "parameters": [ - { - "type": "string", + "summary": "Get default workspace", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "properties": { + "workspace": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.WorkspaceWithDeviceStatus" + } + } + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "404": { + "description": "Not Found", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + } + } + } + }, + "/workspaces/{workspaceID}": { + "get": { + "description": "Get details of a specific workspace including directories", + "produces": [ + "application/json" + ], + "tags": [ + "workspaces" + ], + "summary": "Get workspace details", + "parameters": [ + { + "type": "string", "description": "Workspace ID", "name": "workspaceID", "in": "path", "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "properties": { + "workspace": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.WorkspaceWithDeviceStatus" + } + } + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "404": { + "description": "Not Found", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + } + } + }, + "put": { + "description": "Update workspace information", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "workspaces" + ], + "summary": "Update workspace", + "parameters": [ { "type": "string", - "description": "Directory ID", - "name": "directoryID", + "description": "Workspace ID", + "name": "workspaceID", "in": "path", "required": true }, { - "description": "Directory update data", + "description": "Workspace update data", "name": "body", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/services.UpdateDirectoryRequest" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.UpdateWorkspaceRequest" } } ], @@ -10715,8 +11110,8 @@ const docTemplate = `{ "schema": { "type": "object", "properties": { - "directory": { - "type": "object" + "workspace": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.WorkspaceWithDeviceStatus" } } } @@ -10754,6 +11149,17 @@ const docTemplate = `{ } } }, + "409": { + "description": "Conflict", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, "500": { "description": "Internal Server Error", "schema": { @@ -10768,11 +11174,11 @@ const docTemplate = `{ } }, "delete": { - "description": "Delete a directory from a workspace", + "description": "Delete a workspace and all its directories", "tags": [ "workspaces" ], - "summary": "Delete workspace directory", + "summary": "Delete workspace", "parameters": [ { "type": "string", @@ -10780,21 +11186,14 @@ const docTemplate = `{ "name": "workspaceID", "in": "path", "required": true - }, - { - "type": "string", - "description": "Directory ID", - "name": "directoryID", - "in": "path", - "required": true } ], "responses": { "204": { "description": "No Content" }, - "400": { - "description": "Bad Request", + "401": { + "description": "Unauthorized", "schema": { "type": "object", "properties": { @@ -10804,8 +11203,8 @@ const docTemplate = `{ } } }, - "401": { - "description": "Unauthorized", + "403": { + "description": "Forbidden", "schema": { "type": "object", "properties": { @@ -10840,13 +11239,16 @@ const docTemplate = `{ } } }, - "/workspaces/{workspaceID}/set-default": { - "post": { - "description": "Set a workspace as the default for the authenticated user", + "/workspaces/{workspaceID}/devices": { + "get": { + "description": "Get all devices in a workspace with pagination", + "produces": [ + "application/json" + ], "tags": [ - "workspaces" + "devices" ], - "summary": "Set default workspace", + "summary": "List workspace devices", "parameters": [ { "type": "string", @@ -10854,6 +11256,18 @@ const docTemplate = `{ "name": "workspaceID", "in": "path", "required": true + }, + { + "type": "integer", + "description": "Page number (default: 1)", + "name": "page", + "in": "query" + }, + { + "type": "integer", + "description": "Page size (default: 20, max: 100)", + "name": "pageSize", + "in": "query" } ], "responses": { @@ -10862,8 +11276,23 @@ const docTemplate = `{ "schema": { "type": "object", "properties": { - "message": { - "type": "string" + "devices": { + "type": "array", + "items": { + "type": "object" + } + }, + "hasMore": { + "type": "boolean" + }, + "page": { + "type": "integer" + }, + "pageSize": { + "type": "integer" + }, + "total": { + "type": "integer" } } } @@ -10879,8 +11308,8 @@ const docTemplate = `{ } } }, - "404": { - "description": "Not Found", + "500": { + "description": "Internal Server Error", "schema": { "type": "object", "properties": { @@ -10889,246 +11318,1021 @@ const docTemplate = `{ } } } + } + } + } + }, + "/workspaces/{workspaceID}/directories": { + "post": { + "description": "Add a new directory to an existing workspace", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "workspaces" + ], + "summary": "Add directory to workspace", + "parameters": [ + { + "type": "string", + "description": "Workspace ID", + "name": "workspaceID", + "in": "path", + "required": true }, - "500": { - "description": "Internal Server Error", + { + "description": "Directory creation data", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.CreateDirectoryRequest" + } + } + ], + "responses": { + "201": { + "description": "Created", "schema": { "type": "object", "properties": { - "error": { - "type": "string" + "directory": { + "type": "object" } } } - } - } - } - } - }, - "definitions": { - "casdoor.CasdoorUser": { + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "404": { + "description": "Not Found", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + } + } + } + }, + "/workspaces/{workspaceID}/directories/reorder": { + "post": { + "description": "Reorder directories in a workspace", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "workspaces" + ], + "summary": "Reorder workspace directories", + "parameters": [ + { + "type": "string", + "description": "Workspace ID", + "name": "workspaceID", + "in": "path", + "required": true + }, + { + "description": "Directory order data", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.ReorderDirectoriesRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "404": { + "description": "Not Found", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + } + } + } + }, + "/workspaces/{workspaceID}/directories/{directoryID}": { + "put": { + "description": "Update a directory in a workspace", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "workspaces" + ], + "summary": "Update workspace directory", + "parameters": [ + { + "type": "string", + "description": "Workspace ID", + "name": "workspaceID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Directory ID", + "name": "directoryID", + "in": "path", + "required": true + }, + { + "description": "Directory update data", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.UpdateDirectoryRequest" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "properties": { + "directory": { + "type": "object" + } + } + } + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "404": { + "description": "Not Found", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + } + } + }, + "delete": { + "description": "Delete a directory from a workspace", + "tags": [ + "workspaces" + ], + "summary": "Delete workspace directory", + "parameters": [ + { + "type": "string", + "description": "Workspace ID", + "name": "workspaceID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Directory ID", + "name": "directoryID", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + }, + "400": { + "description": "Bad Request", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "404": { + "description": "Not Found", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + } + } + } + }, + "/workspaces/{workspaceID}/set-default": { + "post": { + "description": "Set a workspace as the default for the authenticated user", + "tags": [ + "workspaces" + ], + "summary": "Set default workspace", + "parameters": [ + { + "type": "string", + "description": "Workspace ID", + "name": "workspaceID", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + } + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "404": { + "description": "Not Found", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + } + } + } + } + }, + "definitions": { + "github_com_costrict_costrict-web_server_internal_casdoor.CasdoorUser": { + "type": "object", + "properties": { + "email": { + "type": "string" + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "owner": { + "type": "string" + }, + "picture": { + "type": "string" + }, + "preferred_username": { + "type": "string" + }, + "sub": { + "type": "string" + }, + "universal_id": { + "type": "string" + } + } + }, + "handlers.ConsistencyCheckResponse": { "type": "object", "properties": { - "email": { + "contentMd5": { + "type": "string" + }, + "matched": { + "type": "boolean" + }, + "matchedCurrent": { + "type": "boolean" + }, + "matchedRevision": { + "type": "integer" + }, + "matchedVersionLabel": { + "type": "string" + } + } + }, + "handlers.CreateSyncRegistryInput": { + "type": "object", + "properties": { + "conflictStrategy": { + "type": "string" + }, + "description": { + "type": "string" + }, + "excludePatterns": { + "type": "array", + "items": { + "type": "string" + } + }, + "externalBranch": { + "type": "string" + }, + "externalUrl": { + "type": "string" + }, + "includePatterns": { + "type": "array", + "items": { + "type": "string" + } + }, + "name": { + "type": "string" + }, + "syncEnabled": { + "type": "boolean" + }, + "syncInterval": { + "type": "integer" + }, + "webhookSecret": { + "type": "string" + } + } + }, + "handlers.ItemResponse": { + "type": "object", + "properties": { + "artifacts": { + "type": "array", + "items": { + "$ref": "#/definitions/models.CapabilityArtifact" + } + }, + "assets": { + "type": "array", + "items": { + "$ref": "#/definitions/models.CapabilityAsset" + } + }, + "category": { + "type": "string" + }, + "content": { + "type": "string" + }, + "contentMd5": { + "type": "string" + }, + "createdAt": { + "type": "string" + }, + "createdBy": { + "type": "string" + }, + "currentRevision": { + "type": "integer" + }, + "currentVersionLabel": { + "type": "string" + }, + "description": { + "type": "string" + }, + "embeddingUpdatedAt": { + "type": "string" + }, + "experienceScore": { + "type": "number" + }, + "favoriteCount": { + "type": "integer" + }, + "favorited": { + "type": "boolean" + }, + "id": { + "type": "string" + }, + "installCount": { + "type": "integer" + }, + "itemType": { + "type": "string" + }, + "lastScanId": { + "type": "string" + }, + "metadata": { + "type": "object" + }, + "name": { + "type": "string" + }, + "previewCount": { + "type": "integer" + }, + "registry": { + "$ref": "#/definitions/models.CapabilityRegistry" + }, + "registryId": { + "type": "string" + }, + "repoId": { + "type": "string" + }, + "repoVisibility": { + "type": "string" + }, + "securityStatus": { + "type": "string" + }, + "slug": { + "type": "string" + }, + "sourcePath": { + "type": "string" + }, + "sourceSha": { + "type": "string" + }, + "sourceType": { + "description": "direct | archive", + "type": "string" + }, + "status": { + "type": "string" + }, + "updatedAt": { + "type": "string" + }, + "updatedBy": { + "type": "string" + }, + "version": { + "type": "string" + }, + "versions": { + "type": "array", + "items": { + "$ref": "#/definitions/models.CapabilityVersion" + } + } + } + }, + "handlers.MyItem": { + "type": "object", + "properties": { + "artifacts": { + "type": "array", + "items": { + "$ref": "#/definitions/models.CapabilityArtifact" + } + }, + "assets": { + "type": "array", + "items": { + "$ref": "#/definitions/models.CapabilityAsset" + } + }, + "category": { + "type": "string" + }, + "content": { + "type": "string" + }, + "contentMd5": { + "type": "string" + }, + "createdAt": { + "type": "string" + }, + "createdBy": { + "type": "string" + }, + "currentRevision": { + "type": "integer" + }, + "description": { + "type": "string" + }, + "embeddingUpdatedAt": { + "type": "string" + }, + "experienceScore": { + "type": "number" + }, + "favoriteCount": { + "type": "integer" + }, + "id": { + "type": "string" + }, + "installCount": { + "type": "integer" + }, + "itemType": { + "type": "string" + }, + "lastScanId": { + "type": "string" + }, + "metadata": { + "type": "object" + }, + "name": { + "type": "string" + }, + "previewCount": { + "type": "integer" + }, + "registry": { + "$ref": "#/definitions/models.CapabilityRegistry" + }, + "registryId": { + "type": "string" + }, + "repoId": { "type": "string" }, - "id": { + "repoName": { "type": "string" }, - "name": { + "repoVisibility": { "type": "string" }, - "owner": { + "securityStatus": { "type": "string" }, - "picture": { + "slug": { "type": "string" }, - "preferred_username": { + "sourcePath": { "type": "string" }, - "sub": { + "sourceSha": { "type": "string" }, - "universal_id": { + "sourceType": { + "description": "direct | archive", + "type": "string" + }, + "status": { + "type": "string" + }, + "updatedAt": { + "type": "string" + }, + "updatedBy": { + "type": "string" + }, + "version": { "type": "string" + }, + "versions": { + "type": "array", + "items": { + "$ref": "#/definitions/models.CapabilityVersion" + } } } }, - "handlers.ConsistencyCheckResponse": { + "handlers.VersionResponse": { "type": "object", "properties": { + "commitMsg": { + "type": "string" + }, + "content": { + "type": "string" + }, "contentMd5": { "type": "string" }, - "matched": { - "type": "boolean" + "createdAt": { + "type": "string" }, - "matchedCurrent": { - "type": "boolean" + "createdBy": { + "type": "string" }, - "matchedRevision": { + "id": { + "type": "string" + }, + "itemId": { + "type": "string" + }, + "metadata": { + "type": "object" + }, + "revision": { "type": "integer" }, - "matchedVersionLabel": { + "versionLabel": { "type": "string" } } }, - "handlers.CreateSyncRegistryInput": { + "handlers.userBasicInfoResponse": { "type": "object", "properties": { - "conflictStrategy": { + "avatarUrl": { "type": "string" }, - "description": { + "id": { "type": "string" }, - "excludePatterns": { + "name": { + "type": "string" + } + } + }, + "llm.SkillAnalysis": { + "type": "object", + "properties": { + "improvements": { "type": "array", "items": { - "type": "string" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_llm.SkillImprovement" } }, - "externalBranch": { - "type": "string" - }, - "externalUrl": { - "type": "string" + "overallScore": { + "type": "integer" }, - "includePatterns": { + "strengths": { "type": "array", "items": { "type": "string" } }, - "name": { + "weaknesses": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "github_com_costrict_costrict-web_server_internal_llm.SkillImprovement": { + "type": "object", + "properties": { + "current": { "type": "string" }, - "syncEnabled": { - "type": "boolean" + "field": { + "type": "string" }, - "syncInterval": { - "type": "integer" + "reason": { + "type": "string" }, - "webhookSecret": { + "suggested": { "type": "string" } } }, - "handlers.ItemResponse": { + "github_com_costrict_costrict-web_server_internal_models.ActionType": { + "type": "string", + "enum": [ + "view", + "click", + "install", + "use", + "success", + "fail", + "feedback", + "ignore" + ], + "x-enum-varnames": [ + "ActionView", + "ActionClick", + "ActionInstall", + "ActionUse", + "ActionSuccess", + "ActionFail", + "ActionFeedback", + "ActionIgnore" + ] + }, + "github_com_costrict_costrict-web_server_internal_models.BehaviorLog": { "type": "object", "properties": { - "artifacts": { - "type": "array", - "items": { - "$ref": "#/definitions/models.CapabilityArtifact" - } + "actionType": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.ActionType" }, - "assets": { - "type": "array", - "items": { - "$ref": "#/definitions/models.CapabilityAsset" - } + "context": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.ContextType" }, - "category": { + "createdAt": { "type": "string" }, - "content": { - "type": "string" + "durationMs": { + "type": "integer" }, - "contentMd5": { + "feedback": { "type": "string" }, - "createdAt": { + "id": { "type": "string" }, - "createdBy": { + "item": { + "description": "Relations", + "allOf": [ + { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityItem" + } + ] + }, + "itemId": { "type": "string" }, - "currentRevision": { + "metadata": { + "type": "object" + }, + "rating": { + "description": "1-5 for feedback", "type": "integer" }, - "currentVersionLabel": { - "type": "string" + "registry": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" }, - "description": { + "registryId": { "type": "string" }, - "embeddingUpdatedAt": { + "searchQuery": { "type": "string" }, - "experienceScore": { - "type": "number" + "sessionId": { + "type": "string" }, - "favoriteCount": { - "type": "integer" + "userId": { + "type": "string" + } + } + }, + "github_com_costrict_costrict-web_server_internal_models.CapabilityArtifact": { + "type": "object", + "properties": { + "artifactVersion": { + "type": "string" }, - "favorited": { - "type": "boolean" + "checksumSha256": { + "type": "string" }, - "id": { + "createdAt": { "type": "string" }, - "installCount": { + "downloadCount": { "type": "integer" }, - "itemType": { + "fileSize": { + "type": "integer" + }, + "filename": { "type": "string" }, - "lastScanId": { + "id": { "type": "string" }, - "metadata": { - "type": "object" + "isLatest": { + "type": "boolean" }, - "name": { + "itemId": { "type": "string" }, - "previewCount": { - "type": "integer" + "mimeType": { + "type": "string" }, - "registry": { - "$ref": "#/definitions/models.CapabilityRegistry" + "sourceType": { + "type": "string" }, - "registryId": { + "storageBackend": { "type": "string" }, - "repoId": { + "storageKey": { "type": "string" }, - "repoVisibility": { + "uploadedBy": { + "type": "string" + } + } + }, + "github_com_costrict_costrict-web_server_internal_models.CapabilityAsset": { + "type": "object", + "properties": { + "contentSha": { "type": "string" }, - "securityStatus": { + "createdAt": { "type": "string" }, - "slug": { + "fileSize": { + "type": "integer" + }, + "id": { "type": "string" }, - "sourcePath": { + "itemId": { "type": "string" }, - "sourceSha": { + "mimeType": { "type": "string" }, - "sourceType": { - "description": "direct | archive", + "relPath": { "type": "string" }, - "status": { + "storageBackend": { "type": "string" }, - "updatedAt": { + "storageKey": { "type": "string" }, - "updatedBy": { + "textContent": { "type": "string" }, - "version": { + "updatedAt": { "type": "string" - }, - "versions": { - "type": "array", - "items": { - "$ref": "#/definitions/models.CapabilityVersion" - } } } }, - "handlers.MyItem": { + "github_com_costrict_costrict-web_server_internal_models.CapabilityItem": { "type": "object", "properties": { "artifacts": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityArtifact" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityArtifact" } }, "assets": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityAsset" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityAsset" } }, "category": { @@ -11183,7 +12387,7 @@ const docTemplate = `{ "type": "integer" }, "registry": { - "$ref": "#/definitions/models.CapabilityRegistry" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" }, "registryId": { "type": "string" @@ -11191,12 +12395,6 @@ const docTemplate = `{ "repoId": { "type": "string" }, - "repoName": { - "type": "string" - }, - "repoVisibility": { - "type": "string" - }, "securityStatus": { "type": "string" }, @@ -11228,12 +12426,78 @@ const docTemplate = `{ "versions": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityVersion" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityVersion" } } } }, - "handlers.VersionResponse": { + "github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalBranch": { + "type": "string" + }, + "externalUrl": { + "type": "string" + }, + "id": { + "type": "string" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityItem" + } + }, + "lastSyncLog": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.SyncLog" + }, + "lastSyncLogId": { + "type": "string" + }, + "lastSyncSha": { + "type": "string" + }, + "lastSyncedAt": { + "type": "string" + }, + "name": { + "type": "string" + }, + "ownerId": { + "type": "string" + }, + "repoId": { + "type": "string" + }, + "sourceType": { + "type": "string" + }, + "syncConfig": { + "type": "object" + }, + "syncEnabled": { + "type": "boolean" + }, + "syncInterval": { + "type": "integer" + }, + "syncStatus": { + "description": "idle | syncing | error | paused", + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, + "github_com_costrict_costrict-web_server_internal_models.CapabilityVersion": { "type": "object", "properties": { "commitMsg": { @@ -11262,141 +12526,160 @@ const docTemplate = `{ }, "revision": { "type": "integer" - }, - "versionLabel": { - "type": "string" } } }, - "handlers.userBasicInfoResponse": { + "github_com_costrict_costrict-web_server_internal_models.ContextType": { + "type": "string", + "enum": [ + "search_query", + "recommendation", + "direct_access", + "browse" + ], + "x-enum-varnames": [ + "ContextSearch", + "ContextRecommend", + "ContextDirectAccess", + "ContextBrowse" + ] + }, + "github_com_costrict_costrict-web_server_internal_models.NotificationLog": { "type": "object", "properties": { - "avatarUrl": { + "channelType": { + "type": "string" + }, + "createdAt": { + "type": "string" + }, + "deviceId": { + "type": "string" + }, + "error": { + "type": "string" + }, + "eventType": { "type": "string" }, "id": { "type": "string" }, - "name": { + "sentAt": { "type": "string" - } - } - }, - "llm.SkillAnalysis": { - "type": "object", - "properties": { - "improvements": { - "type": "array", - "items": { - "$ref": "#/definitions/llm.SkillImprovement" - } }, - "overallScore": { - "type": "integer" + "sessionId": { + "type": "string" }, - "strengths": { - "type": "array", - "items": { - "type": "string" - } + "status": { + "type": "string" }, - "weaknesses": { - "type": "array", - "items": { - "type": "string" - } + "userChannelId": { + "type": "string" + }, + "userId": { + "type": "string" } } }, - "llm.SkillImprovement": { + "github_com_costrict_costrict-web_server_internal_models.Project": { "type": "object", "properties": { - "current": { + "archivedAt": { "type": "string" }, - "field": { + "createdAt": { "type": "string" }, - "reason": { + "creatorId": { "type": "string" }, - "suggested": { + "description": { + "type": "string" + }, + "enabledAt": { + "type": "string" + }, + "id": { + "type": "string" + }, + "isPinned": { + "type": "boolean" + }, + "metadata": { + "type": "object" + }, + "name": { + "type": "string" + }, + "updatedAt": { "type": "string" } } }, - "models.ActionType": { - "type": "string", - "enum": [ - "view", - "click", - "install", - "use", - "success", - "fail", - "feedback", - "ignore" - ], - "x-enum-varnames": [ - "ActionView", - "ActionClick", - "ActionInstall", - "ActionUse", - "ActionSuccess", - "ActionFail", - "ActionFeedback", - "ActionIgnore" - ] - }, - "models.BehaviorLog": { + "github_com_costrict_costrict-web_server_internal_models.ProjectInvitation": { "type": "object", "properties": { - "actionType": { - "$ref": "#/definitions/models.ActionType" + "createdAt": { + "type": "string" }, - "context": { - "$ref": "#/definitions/models.ContextType" + "expiresAt": { + "type": "string" }, - "createdAt": { + "id": { "type": "string" }, - "durationMs": { - "type": "integer" + "inviteeId": { + "type": "string" }, - "feedback": { + "inviterId": { "type": "string" }, - "id": { + "message": { "type": "string" }, - "item": { - "description": "Relations", - "allOf": [ - { - "$ref": "#/definitions/models.CapabilityItem" - } - ] + "projectId": { + "type": "string" }, - "itemId": { + "projectName": { "type": "string" }, - "metadata": { - "type": "object" + "respondedAt": { + "type": "string" }, - "rating": { - "description": "1-5 for feedback", - "type": "integer" + "role": { + "type": "string" }, - "registry": { - "$ref": "#/definitions/models.CapabilityRegistry" + "status": { + "type": "string" }, - "registryId": { + "updatedAt": { + "type": "string" + } + } + }, + "github_com_costrict_costrict-web_server_internal_models.ProjectMember": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "id": { + "type": "string" + }, + "joinedAt": { + "type": "string" + }, + "pinnedAt": { + "type": "string" + }, + "projectId": { "type": "string" }, - "searchQuery": { + "role": { "type": "string" }, - "sessionId": { + "updatedAt": { "type": "string" }, "userId": { @@ -11404,1205 +12687,1265 @@ const docTemplate = `{ } } }, - "models.CapabilityArtifact": { + "github_com_costrict_costrict-web_server_internal_models.RepoInvitation": { "type": "object", "properties": { - "artifactVersion": { + "createdAt": { "type": "string" }, - "checksumSha256": { + "expiresAt": { "type": "string" }, - "createdAt": { + "id": { "type": "string" }, - "downloadCount": { - "type": "integer" - }, - "fileSize": { - "type": "integer" - }, - "filename": { + "inviteeId": { "type": "string" }, - "id": { + "inviteeUsername": { "type": "string" }, - "isLatest": { - "type": "boolean" - }, - "itemId": { + "inviterId": { "type": "string" }, - "mimeType": { + "inviterUsername": { "type": "string" }, - "sourceType": { + "repoId": { "type": "string" }, - "storageBackend": { + "repository": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.Repository" + }, + "role": { + "description": "admin | member", "type": "string" }, - "storageKey": { + "status": { + "description": "pending | accepted | declined | cancelled", "type": "string" }, - "uploadedBy": { + "updatedAt": { "type": "string" } } }, - "models.CapabilityAsset": { + "github_com_costrict_costrict-web_server_internal_models.RepoMember": { "type": "object", "properties": { - "contentSha": { + "createdAt": { "type": "string" }, - "createdAt": { + "id": { "type": "string" }, - "fileSize": { - "type": "integer" + "repoId": { + "type": "string" }, - "id": { + "role": { + "description": "owner | admin | member", "type": "string" }, - "itemId": { + "userId": { "type": "string" }, - "mimeType": { + "username": { + "type": "string" + } + } + }, + "github_com_costrict_costrict-web_server_internal_models.Repository": { + "type": "object", + "properties": { + "createdAt": { "type": "string" }, - "relPath": { + "description": { "type": "string" }, - "storageBackend": { + "displayName": { "type": "string" }, - "storageKey": { + "id": { "type": "string" }, - "textContent": { + "members": { + "type": "array", + "items": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.RepoMember" + } + }, + "name": { + "type": "string" + }, + "ownerId": { + "type": "string" + }, + "repoType": { + "description": "normal | sync", "type": "string" }, "updatedAt": { "type": "string" + }, + "visibility": { + "description": "public | private", + "type": "string" } } }, - "models.CapabilityItem": { + "github_com_costrict_costrict-web_server_internal_models.SecurityScan": { "type": "object", "properties": { - "artifacts": { + "category": { + "type": "string" + }, + "createdAt": { + "type": "string" + }, + "durationMs": { + "type": "integer" + }, + "finishedAt": { + "type": "string" + }, + "id": { + "type": "string" + }, + "itemId": { + "type": "string" + }, + "itemRevision": { + "type": "integer" + }, + "permissions": { + "type": "object" + }, + "recommendations": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityArtifact" + "type": "object" } }, - "assets": { + "redFlags": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityAsset" + "type": "object" } }, - "category": { + "riskLevel": { + "description": "clean | low | medium | high | extreme", "type": "string" }, - "content": { + "scanModel": { "type": "string" }, - "contentMd5": { + "summary": { "type": "string" }, - "createdAt": { + "triggerType": { + "description": "create | update | sync | manual", "type": "string" }, - "createdBy": { + "verdict": { + "description": "safe | caution | reject", "type": "string" - }, - "currentRevision": { - "type": "integer" - }, - "description": { + } + } + }, + "github_com_costrict_costrict-web_server_internal_models.SyncJob": { + "type": "object", + "properties": { + "createdAt": { "type": "string" }, - "embeddingUpdatedAt": { + "finishedAt": { "type": "string" }, - "experienceScore": { - "type": "number" - }, - "favoriteCount": { - "type": "integer" - }, "id": { "type": "string" }, - "installCount": { - "type": "integer" - }, - "itemType": { + "lastError": { "type": "string" }, - "lastScanId": { - "type": "string" + "maxAttempts": { + "type": "integer" }, - "metadata": { + "payload": { "type": "object" }, - "name": { - "type": "string" - }, - "previewCount": { + "priority": { "type": "integer" }, "registry": { - "$ref": "#/definitions/models.CapabilityRegistry" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" }, "registryId": { "type": "string" }, - "repoId": { + "retryCount": { + "type": "integer" + }, + "scheduledAt": { "type": "string" }, - "securityStatus": { + "startedAt": { "type": "string" }, - "slug": { + "status": { + "description": "pending | running | success | failed | cancelled", "type": "string" }, - "sourcePath": { + "syncLogId": { "type": "string" }, - "sourceSha": { + "triggerType": { + "description": "scheduled | manual | webhook", "type": "string" }, - "sourceType": { - "description": "direct | archive", + "triggerUser": { "type": "string" + } + } + }, + "github_com_costrict_costrict-web_server_internal_models.SyncLog": { + "type": "object", + "properties": { + "addedItems": { + "type": "integer" }, - "status": { + "commitSha": { "type": "string" }, - "updatedAt": { + "createdAt": { "type": "string" }, - "updatedBy": { + "deletedItems": { + "type": "integer" + }, + "durationMs": { + "type": "integer" + }, + "errorMessage": { "type": "string" }, - "version": { + "failedItems": { + "type": "integer" + }, + "finishedAt": { "type": "string" }, - "versions": { - "type": "array", - "items": { - "$ref": "#/definitions/models.CapabilityVersion" - } - } - } - }, - "models.CapabilityRegistry": { - "type": "object", - "properties": { - "createdAt": { + "id": { "type": "string" }, - "description": { + "previousSha": { "type": "string" }, - "externalBranch": { + "registry": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" + }, + "registryId": { "type": "string" }, - "externalUrl": { + "skippedItems": { + "type": "integer" + }, + "startedAt": { "type": "string" }, - "id": { + "status": { + "description": "running | success | failed | cancelled", "type": "string" }, - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/models.CapabilityItem" - } + "totalItems": { + "type": "integer" }, - "lastSyncLog": { - "$ref": "#/definitions/models.SyncLog" + "triggerType": { + "description": "scheduled | manual | webhook", + "type": "string" }, - "lastSyncLogId": { + "triggerUser": { + "type": "string" + }, + "updatedItems": { + "type": "integer" + } + } + }, + "github_com_costrict_costrict-web_server_internal_models.SystemNotificationChannel": { + "type": "object", + "properties": { + "createdAt": { "type": "string" }, - "lastSyncSha": { + "createdBy": { "type": "string" }, - "lastSyncedAt": { + "enabled": { + "type": "boolean" + }, + "id": { "type": "string" }, "name": { + "description": "显示名,如\"企业微信\"", "type": "string" }, - "ownerId": { + "systemConfig": { + "description": "系统级配置", + "type": "object" + }, + "type": { + "description": "\"wecom\" | \"feishu\" | \"webhook\"", "type": "string" }, - "repoId": { + "updatedAt": { "type": "string" }, - "sourceType": { + "workspaceId": { + "description": "空=全局", "type": "string" + } + } + }, + "github_com_costrict_costrict-web_server_internal_models.UserBehaviorSummary": { + "type": "object", + "properties": { + "favoriteCategories": { + "type": "array", + "items": { + "type": "string" + } }, - "syncConfig": { - "type": "object" + "favoriteTypes": { + "type": "array", + "items": { + "type": "string" + } }, - "syncEnabled": { - "type": "boolean" + "successRate": { + "type": "number" }, - "syncInterval": { + "totalInstalls": { "type": "integer" }, - "syncStatus": { - "description": "idle | syncing | error | paused", - "type": "string" + "totalUses": { + "type": "integer" }, - "updatedAt": { + "totalViews": { + "type": "integer" + }, + "userId": { "type": "string" } } }, - "models.CapabilityVersion": { + "github_com_costrict_costrict-web_server_internal_models.UserNotificationChannel": { "type": "object", "properties": { - "commitMsg": { + "channelType": { + "description": "\"wecom\" | \"feishu\" | \"webhook\"", "type": "string" }, - "content": { + "createdAt": { "type": "string" }, - "contentMd5": { + "enabled": { + "type": "boolean" + }, + "id": { "type": "string" }, - "createdAt": { + "lastError": { "type": "string" }, - "createdBy": { + "lastUsedAt": { "type": "string" }, - "id": { + "name": { "type": "string" }, - "itemId": { + "systemChannelId": { + "description": "关联系统渠道(webhook 类型可为空)", "type": "string" }, - "metadata": { + "triggerEvents": { + "type": "array", + "items": { + "type": "string" + } + }, + "updatedAt": { + "type": "string" + }, + "userConfig": { + "description": "用户自己的配置", "type": "object" }, - "revision": { - "type": "integer" + "userId": { + "type": "string" } } }, - "models.ContextType": { - "type": "string", - "enum": [ - "search_query", - "recommendation", - "direct_access", - "browse" - ], - "x-enum-varnames": [ - "ContextSearch", - "ContextRecommend", - "ContextDirectAccess", - "ContextBrowse" - ] - }, - "models.NotificationLog": { + "github_com_costrict_costrict-web_server_internal_models.WorkspaceDirectory": { "type": "object", "properties": { - "channelType": { - "type": "string" - }, "createdAt": { "type": "string" }, - "deviceId": { - "type": "string" - }, - "error": { + "id": { "type": "string" }, - "eventType": { - "type": "string" + "isDefault": { + "description": "是否为默认目录", + "type": "boolean" }, - "id": { + "name": { "type": "string" }, - "sentAt": { - "type": "string" + "orderIndex": { + "description": "排序索引", + "type": "integer" }, - "sessionId": { + "path": { "type": "string" }, - "status": { - "type": "string" + "settings": { + "description": "目录设置(如忽略模式等)", + "type": "object" }, - "userChannelId": { + "updatedAt": { "type": "string" }, - "userId": { + "workspaceId": { "type": "string" } } }, - "models.Project": { + "github_com_costrict_costrict-web_server_internal_services.CreateDirectoryRequest": { "type": "object", + "required": [ + "name", + "path" + ], "properties": { - "archivedAt": { - "type": "string" + "isDefault": { + "type": "boolean" }, - "createdAt": { - "type": "string" + "name": { + "type": "string", + "maxLength": 100 }, - "creatorId": { - "type": "string" + "path": { + "type": "string", + "maxLength": 500 }, + "settings": { + "type": "object", + "additionalProperties": true + } + } + }, + "github_com_costrict_costrict-web_server_internal_services.CreateWorkspaceRequest": { + "type": "object", + "required": [ + "directories", + "name" + ], + "properties": { "description": { - "type": "string" - }, - "enabledAt": { - "type": "string" + "type": "string", + "maxLength": 500 }, - "id": { + "deviceId": { "type": "string" }, - "isPinned": { - "type": "boolean" - }, - "metadata": { - "type": "object" + "directories": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.CreateDirectoryRequest" + } }, "name": { - "type": "string" + "type": "string", + "maxLength": 100 }, - "updatedAt": { - "type": "string" + "settings": { + "type": "object", + "additionalProperties": true } } }, - "models.ProjectInvitation": { + "github_com_costrict_costrict-web_server_internal_services.ItemBehaviorStats": { "type": "object", "properties": { - "createdAt": { - "type": "string" - }, - "expiresAt": { - "type": "string" + "averageRating": { + "type": "number" }, - "id": { - "type": "string" + "clicks": { + "type": "integer" }, - "inviteeId": { - "type": "string" + "failures": { + "type": "integer" }, - "inviterId": { - "type": "string" + "favorites": { + "type": "integer" }, - "message": { - "type": "string" + "installs": { + "type": "integer" }, - "projectId": { + "itemId": { "type": "string" }, - "projectName": { - "type": "string" + "recentFeedback": { + "type": "array", + "items": { + "type": "string" + } }, - "respondedAt": { - "type": "string" + "successRate": { + "type": "number" }, - "role": { - "type": "string" + "successes": { + "type": "integer" }, - "status": { - "type": "string" + "uses": { + "type": "integer" }, - "updatedAt": { - "type": "string" + "views": { + "type": "integer" } } }, - "models.ProjectMember": { + "github_com_costrict_costrict-web_server_internal_services.RecommendResponse": { "type": "object", "properties": { - "createdAt": { - "type": "string" - }, - "id": { + "generatedAt": { "type": "string" }, - "joinedAt": { - "type": "string" + "hasMore": { + "type": "boolean" }, - "pinnedAt": { - "type": "string" + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.RecommendedItem" + } }, - "projectId": { - "type": "string" + "page": { + "type": "integer" }, - "role": { - "type": "string" + "pageSize": { + "type": "integer" }, - "updatedAt": { - "type": "string" + "strategies": { + "type": "array", + "items": { + "type": "string" + } }, - "userId": { - "type": "string" + "total": { + "type": "integer" } } }, - "models.RepoInvitation": { + "github_com_costrict_costrict-web_server_internal_services.RecommendedItem": { "type": "object", "properties": { - "createdAt": { - "type": "string" - }, - "expiresAt": { - "type": "string" + "artifacts": { + "type": "array", + "items": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityArtifact" + } }, - "id": { - "type": "string" + "assets": { + "type": "array", + "items": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityAsset" + } }, - "inviteeId": { + "category": { "type": "string" }, - "inviteeUsername": { + "content": { "type": "string" }, - "inviterId": { + "contentMd5": { "type": "string" }, - "inviterUsername": { + "createdAt": { "type": "string" }, - "repoId": { + "createdBy": { "type": "string" }, - "repository": { - "$ref": "#/definitions/models.Repository" + "currentRevision": { + "type": "integer" }, - "role": { - "description": "admin | member", + "description": { "type": "string" }, - "status": { - "description": "pending | accepted | declined | cancelled", + "embeddingUpdatedAt": { "type": "string" }, - "updatedAt": { - "type": "string" - } - } - }, - "models.RepoMember": { - "type": "object", - "properties": { - "createdAt": { - "type": "string" + "experienceScore": { + "type": "number" + }, + "favoriteCount": { + "type": "integer" }, "id": { "type": "string" }, - "repoId": { - "type": "string" + "installCount": { + "type": "integer" }, - "role": { - "description": "owner | admin | member", + "itemType": { "type": "string" }, - "userId": { + "lastScanId": { "type": "string" }, - "username": { - "type": "string" - } - } - }, - "models.Repository": { - "type": "object", - "properties": { - "createdAt": { - "type": "string" + "metadata": { + "type": "object" }, - "description": { + "name": { "type": "string" }, - "displayName": { - "type": "string" + "previewCount": { + "type": "integer" }, - "id": { + "reason": { "type": "string" }, - "members": { - "type": "array", - "items": { - "$ref": "#/definitions/models.RepoMember" - } + "registry": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" }, - "name": { + "registryId": { "type": "string" }, - "ownerId": { + "repoId": { "type": "string" }, - "repoType": { - "description": "normal | sync", - "type": "string" + "score": { + "type": "number" }, - "updatedAt": { + "securityStatus": { "type": "string" }, - "visibility": { - "description": "public | private", + "slug": { "type": "string" - } - } - }, - "models.SecurityScan": { - "type": "object", - "properties": { - "category": { + }, + "sourcePath": { "type": "string" }, - "createdAt": { + "sourceSha": { "type": "string" }, - "durationMs": { - "type": "integer" + "sourceType": { + "description": "direct | archive", + "type": "string" }, - "finishedAt": { + "status": { "type": "string" }, - "id": { + "strategy": { "type": "string" }, - "itemId": { + "updatedAt": { "type": "string" }, - "itemRevision": { - "type": "integer" + "updatedBy": { + "type": "string" }, - "permissions": { - "type": "object" + "version": { + "type": "string" }, - "recommendations": { + "versions": { "type": "array", "items": { - "type": "object" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityVersion" } - }, - "redFlags": { + } + } + }, + "github_com_costrict_costrict-web_server_internal_services.ReorderDirectoriesRequest": { + "type": "object", + "required": [ + "directoryIds" + ], + "properties": { + "directoryIds": { "type": "array", "items": { - "type": "object" + "type": "string" } - }, - "riskLevel": { - "description": "clean | low | medium | high | extreme", - "type": "string" - }, - "scanModel": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "triggerType": { - "description": "create | update | sync | manual", - "type": "string" - }, - "verdict": { - "description": "safe | caution | reject", - "type": "string" } } }, - "models.SyncJob": { + "github_com_costrict_costrict-web_server_internal_services.SearchResult": { "type": "object", "properties": { - "createdAt": { - "type": "string" - }, - "finishedAt": { - "type": "string" - }, - "id": { - "type": "string" - }, - "lastError": { - "type": "string" - }, - "maxAttempts": { - "type": "integer" - }, - "payload": { - "type": "object" - }, - "priority": { + "durationMs": { "type": "integer" }, - "registry": { - "$ref": "#/definitions/models.CapabilityRegistry" + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.SearchResultItem" + } }, - "registryId": { + "query": { "type": "string" }, - "retryCount": { + "total": { "type": "integer" + } + } + }, + "github_com_costrict_costrict-web_server_internal_services.SearchResultItem": { + "type": "object", + "properties": { + "artifacts": { + "type": "array", + "items": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityArtifact" + } }, - "scheduledAt": { - "type": "string" + "assets": { + "type": "array", + "items": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityAsset" + } }, - "startedAt": { + "category": { "type": "string" }, - "status": { - "description": "pending | running | success | failed | cancelled", + "content": { "type": "string" }, - "syncLogId": { + "contentMd5": { "type": "string" }, - "triggerType": { - "description": "scheduled | manual | webhook", + "createdAt": { "type": "string" }, - "triggerUser": { + "createdBy": { "type": "string" - } - } - }, - "models.SyncLog": { - "type": "object", - "properties": { - "addedItems": { + }, + "currentRevision": { "type": "integer" }, - "commitSha": { + "description": { "type": "string" }, - "createdAt": { + "embeddingUpdatedAt": { "type": "string" }, - "deletedItems": { - "type": "integer" + "experienceScore": { + "type": "number" }, - "durationMs": { + "favoriteCount": { "type": "integer" }, - "errorMessage": { + "id": { "type": "string" }, - "failedItems": { + "installCount": { "type": "integer" }, - "finishedAt": { - "type": "string" - }, - "id": { + "itemType": { "type": "string" }, - "previousSha": { + "lastScanId": { "type": "string" }, - "registry": { - "$ref": "#/definitions/models.CapabilityRegistry" + "metadata": { + "type": "object" }, - "registryId": { + "name": { "type": "string" }, - "skippedItems": { + "previewCount": { "type": "integer" }, - "startedAt": { + "registry": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" + }, + "registryId": { "type": "string" }, - "status": { - "description": "running | success | failed | cancelled", + "repoId": { "type": "string" }, - "totalItems": { - "type": "integer" + "score": { + "type": "number" }, - "triggerType": { - "description": "scheduled | manual | webhook", + "securityStatus": { "type": "string" }, - "triggerUser": { + "slug": { "type": "string" }, - "updatedItems": { - "type": "integer" - } - } - }, - "models.SystemNotificationChannel": { - "type": "object", - "properties": { - "createdAt": { + "sourcePath": { "type": "string" }, - "createdBy": { + "sourceSha": { "type": "string" }, - "enabled": { - "type": "boolean" - }, - "id": { + "sourceType": { + "description": "direct | archive", "type": "string" }, - "name": { - "description": "显示名,如\"企业微信\"", + "status": { "type": "string" }, - "systemConfig": { - "description": "系统级配置", - "type": "object" - }, - "type": { - "description": "\"wecom\" | \"feishu\" | \"webhook\"", + "updatedAt": { "type": "string" }, - "updatedAt": { + "updatedBy": { "type": "string" }, - "workspaceId": { - "description": "空=全局", + "version": { "type": "string" + }, + "versions": { + "type": "array", + "items": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityVersion" + } } } }, - "models.UserBehaviorSummary": { + "github_com_costrict_costrict-web_server_internal_services.UpdateDirectoryRequest": { "type": "object", "properties": { - "favoriteCategories": { - "type": "array", - "items": { - "type": "string" - } - }, - "favoriteTypes": { - "type": "array", - "items": { - "type": "string" - } - }, - "successRate": { - "type": "number" - }, - "totalInstalls": { - "type": "integer" + "isDefault": { + "type": "boolean" }, - "totalUses": { - "type": "integer" + "name": { + "type": "string", + "maxLength": 100 }, - "totalViews": { - "type": "integer" + "path": { + "type": "string", + "maxLength": 500 }, - "userId": { - "type": "string" + "settings": { + "type": "object", + "additionalProperties": true } } }, - "models.UserNotificationChannel": { + "github_com_costrict_costrict-web_server_internal_services.UpdateWorkspaceRequest": { "type": "object", "properties": { - "channelType": { - "description": "\"wecom\" | \"feishu\" | \"webhook\"", - "type": "string" - }, - "createdAt": { - "type": "string" - }, - "enabled": { - "type": "boolean" + "description": { + "type": "string", + "maxLength": 500 }, - "id": { + "deviceId": { "type": "string" }, - "lastError": { - "type": "string" + "name": { + "type": "string", + "maxLength": 100 }, - "lastUsedAt": { - "type": "string" + "settings": { + "type": "object", + "additionalProperties": true }, - "name": { + "status": { + "type": "string", + "enum": [ + "active", + "inactive", + "archived" + ] + } + } + }, + "github_com_costrict_costrict-web_server_internal_services.UsageActivityResponse": { + "type": "object", + "properties": { + "git_repo_url": { "type": "string" }, - "systemChannelId": { - "description": "关联系统渠道(webhook 类型可为空)", - "type": "string" + "range": { + "type": "object", + "properties": { + "from": { + "type": "string" + }, + "to": { + "type": "string" + } + } }, - "triggerEvents": { + "users": { "type": "array", "items": { - "type": "string" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.UsageUserActivity" } - }, - "updatedAt": { + } + } + }, + "github_com_costrict_costrict-web_server_internal_services.UsageDaily": { + "type": "object", + "properties": { + "date": { "type": "string" }, - "userConfig": { - "description": "用户自己的配置", - "type": "object" - }, - "userId": { - "type": "string" + "requests": { + "type": "integer" } } }, - "models.WorkspaceDirectory": { + "github_com_costrict_costrict-web_server_internal_services.UsageReportItem": { "type": "object", + "required": [ + "date", + "git_repo_url", + "message_id", + "model_id", + "session_id", + "updated" + ], "properties": { - "createdAt": { - "type": "string" + "cache_read_tokens": { + "type": "integer" }, - "id": { + "cache_write_tokens": { + "type": "integer" + }, + "cost": { + "type": "number" + }, + "date": { "type": "string" }, - "isDefault": { - "description": "是否为默认目录", - "type": "boolean" + "git_repo_url": { + "type": "string" }, - "name": { + "git_worktree": { "type": "string" }, - "orderIndex": { - "description": "排序索引", + "input_tokens": { "type": "integer" }, - "path": { + "message_id": { "type": "string" }, - "settings": { - "description": "目录设置(如忽略模式等)", - "type": "object" + "model_id": { + "type": "string" }, - "updatedAt": { + "output_tokens": { + "type": "integer" + }, + "provider_id": { "type": "string" }, - "workspaceId": { + "reasoning_tokens": { + "type": "integer" + }, + "request_id": { "type": "string" - } - } - }, - "project.CreateInvitationRequest": { - "type": "object", - "required": [ - "inviteeId" - ], - "properties": { - "inviteeId": { + }, + "request_time": { "type": "string" }, - "message": { + "rounds": { + "type": "integer" + }, + "session_id": { "type": "string" }, - "role": { + "updated": { "type": "string" } } }, - "project.CreateProjectRequest": { + "github_com_costrict_costrict-web_server_internal_services.UsageReportRequest": { "type": "object", "required": [ - "name" + "reports" ], "properties": { - "description": { + "client_version": { "type": "string" }, - "enabledAt": { + "device_id": { "type": "string" }, - "name": { + "reported_at": { "type": "string" - } - } - }, - "project.InvitationResponse": { - "type": "object", - "properties": { - "invitation": { - "$ref": "#/definitions/models.ProjectInvitation" - } - } - }, - "project.InvitationsResponse": { - "type": "object", - "properties": { - "invitations": { + }, + "reports": { "type": "array", + "maxItems": 500, + "minItems": 1, "items": { - "$ref": "#/definitions/models.ProjectInvitation" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.UsageReportItem" } } } }, - "project.MemberResponse": { + "github_com_costrict_costrict-web_server_internal_services.UsageReportResponse": { "type": "object", "properties": { - "member": { - "$ref": "#/definitions/models.ProjectMember" + "accepted": { + "type": "integer" + }, + "errors": { + "type": "array", + "items": { + "type": "string" + } + }, + "skipped": { + "type": "integer" } } }, - "project.MembersResponse": { + "github_com_costrict_costrict-web_server_internal_services.UsageUserActivity": { "type": "object", "properties": { - "members": { + "daily": { "type": "array", "items": { - "$ref": "#/definitions/models.ProjectMember" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_services.UsageDaily" } + }, + "total_requests": { + "type": "integer" + }, + "user_id": { + "type": "string" + }, + "username": { + "type": "string" } } }, - "project.ProjectBasicInfo": { + "github_com_costrict_costrict-web_server_internal_services.WorkspaceWithDeviceStatus": { "type": "object", "properties": { - "archivedAt": { + "createdAt": { "type": "string" }, "description": { "type": "string" }, - "enabledAt": { + "deviceId": { + "description": "绑定的设备ID", "type": "string" }, - "id": { + "deviceStatus": { + "description": "online | offline | busy | \"\"", "type": "string" }, - "name": { + "deviceUniqueId": { + "description": "Device.DeviceID,用于代理路由", "type": "string" - } - } - }, - "project.ProjectBasicInfoResponse": { - "type": "object", - "properties": { - "project": { - "$ref": "#/definitions/project.ProjectBasicInfo" - } - } - }, - "project.ProjectResponse": { - "type": "object", - "properties": { - "project": { - "$ref": "#/definitions/models.Project" - } - } - }, - "project.ProjectsResponse": { - "type": "object", - "properties": { - "projects": { + }, + "directories": { "type": "array", "items": { - "$ref": "#/definitions/models.Project" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.WorkspaceDirectory" } - } - } - }, - "project.RespondInvitationRequest": { - "type": "object", - "properties": { - "accept": { - "type": "boolean" - } - } - }, - "project.SetProjectPinRequest": { - "type": "object", - "properties": { - "pinned": { - "type": "boolean" - } - } - }, - "project.UpdateMemberRoleRequest": { - "type": "object", - "required": [ - "role" - ], - "properties": { - "role": { - "type": "string" - } - } - }, - "project.UpdateProjectArchiveTimeRequest": { - "type": "object", - "required": [ - "archivedAt" - ], - "properties": { - "archivedAt": { - "type": "string" - } - } - }, - "project.UpdateProjectRequest": { - "type": "object", - "properties": { - "description": { - "type": "string" }, - "enabledAt": { + "id": { "type": "string" }, - "name": { - "type": "string" - } - } - }, - "services.CreateDirectoryRequest": { - "type": "object", - "required": [ - "name", - "path" - ], - "properties": { "isDefault": { + "description": "是否为默认工作空间", "type": "boolean" }, "name": { - "type": "string", - "maxLength": 100 - }, - "path": { - "type": "string", - "maxLength": 500 + "type": "string" }, "settings": { - "type": "object", - "additionalProperties": true + "description": "工作空间设置", + "type": "object" + }, + "status": { + "description": "active | inactive | archived", + "type": "string" + }, + "updatedAt": { + "type": "string" + }, + "userId": { + "type": "string" } } }, - "services.CreateWorkspaceRequest": { + "internal_handlers.CreateSyncRegistryInput": { "type": "object", - "required": [ - "directories", - "name" - ], "properties": { + "conflictStrategy": { + "type": "string" + }, "description": { - "type": "string", - "maxLength": 500 + "type": "string" }, - "deviceId": { + "excludePatterns": { + "type": "array", + "items": { + "type": "string" + } + }, + "externalBranch": { "type": "string" }, - "directories": { + "externalUrl": { + "type": "string" + }, + "includePatterns": { "type": "array", - "minItems": 1, "items": { - "$ref": "#/definitions/services.CreateDirectoryRequest" + "type": "string" } }, "name": { - "type": "string", - "maxLength": 100 + "type": "string" }, - "settings": { - "type": "object", - "additionalProperties": true + "syncEnabled": { + "type": "boolean" + }, + "syncInterval": { + "type": "integer" + }, + "webhookSecret": { + "type": "string" } } }, - "services.ItemBehaviorStats": { + "internal_handlers.ItemResponse": { "type": "object", "properties": { - "averageRating": { - "type": "number" + "artifacts": { + "type": "array", + "items": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityArtifact" + } }, - "clicks": { - "type": "integer" + "assets": { + "type": "array", + "items": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityAsset" + } }, - "failures": { - "type": "integer" + "category": { + "type": "string" }, - "favorites": { - "type": "integer" + "content": { + "type": "string" }, - "installs": { - "type": "integer" + "createdAt": { + "type": "string" }, - "itemId": { + "createdBy": { "type": "string" }, - "recentFeedback": { - "type": "array", - "items": { - "type": "string" - } + "description": { + "type": "string" }, - "successRate": { + "embeddingUpdatedAt": { + "type": "string" + }, + "experienceScore": { "type": "number" }, - "successes": { + "favoriteCount": { "type": "integer" }, - "uses": { + "favorited": { + "type": "boolean" + }, + "id": { + "type": "string" + }, + "installCount": { "type": "integer" }, - "views": { + "itemType": { + "type": "string" + }, + "lastScanId": { + "type": "string" + }, + "metadata": { + "type": "object" + }, + "name": { + "type": "string" + }, + "previewCount": { "type": "integer" - } - } - }, - "services.RecommendResponse": { - "type": "object", - "properties": { - "generatedAt": { + }, + "registry": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" + }, + "registryId": { + "type": "string" + }, + "repoId": { + "type": "string" + }, + "repoVisibility": { + "type": "string" + }, + "securityStatus": { + "type": "string" + }, + "slug": { + "type": "string" + }, + "sourcePath": { "type": "string" }, - "hasMore": { - "type": "boolean" + "sourceSha": { + "type": "string" }, - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/services.RecommendedItem" - } + "sourceType": { + "description": "direct | archive", + "type": "string" }, - "page": { - "type": "integer" + "status": { + "type": "string" }, - "pageSize": { - "type": "integer" + "updatedAt": { + "type": "string" }, - "strategies": { + "updatedBy": { + "type": "string" + }, + "version": { + "type": "string" + }, + "versions": { "type": "array", "items": { - "type": "string" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityVersion" } - }, - "total": { - "type": "integer" } } }, - "services.RecommendedItem": { + "internal_handlers.MyItem": { "type": "object", "properties": { "artifacts": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityArtifact" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityArtifact" } }, "assets": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityAsset" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityAsset" } }, "category": { @@ -12611,18 +13954,12 @@ const docTemplate = `{ "content": { "type": "string" }, - "contentMd5": { - "type": "string" - }, "createdAt": { "type": "string" }, "createdBy": { "type": "string" }, - "currentRevision": { - "type": "integer" - }, "description": { "type": "string" }, @@ -12656,11 +13993,8 @@ const docTemplate = `{ "previewCount": { "type": "integer" }, - "reason": { - "type": "string" - }, "registry": { - "$ref": "#/definitions/models.CapabilityRegistry" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityRegistry" }, "registryId": { "type": "string" @@ -12668,8 +14002,11 @@ const docTemplate = `{ "repoId": { "type": "string" }, - "score": { - "type": "number" + "repoName": { + "type": "string" + }, + "repoVisibility": { + "type": "string" }, "securityStatus": { "type": "string" @@ -12690,9 +14027,6 @@ const docTemplate = `{ "status": { "type": "string" }, - "strategy": { - "type": "string" - }, "updatedAt": { "type": "string" }, @@ -12705,419 +14039,462 @@ const docTemplate = `{ "versions": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityVersion" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.CapabilityVersion" } } } }, - "services.ReorderDirectoriesRequest": { + "internal_handlers.userBasicInfoResponse": { + "type": "object", + "properties": { + "avatarUrl": { + "type": "string" + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "internal_project.CreateInvitationRequest": { "type": "object", "required": [ - "directoryIds" + "inviteeId" ], "properties": { - "directoryIds": { - "type": "array", - "items": { - "type": "string" - } + "inviteeId": { + "type": "string" + }, + "message": { + "type": "string" + }, + "role": { + "type": "string" } } }, - "services.SearchResult": { + "internal_project.CreateProjectRequest": { "type": "object", + "required": [ + "name" + ], "properties": { - "durationMs": { - "type": "integer" - }, - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/services.SearchResultItem" - } + "description": { + "type": "string" }, - "query": { + "enabledAt": { "type": "string" }, - "total": { - "type": "integer" + "name": { + "type": "string" } } }, - "services.SearchResultItem": { + "internal_project.InvitationResponse": { "type": "object", "properties": { - "artifacts": { + "invitation": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.ProjectInvitation" + } + } + }, + "internal_project.InvitationsResponse": { + "type": "object", + "properties": { + "invitations": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityArtifact" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.ProjectInvitation" } - }, - "assets": { + } + } + }, + "internal_project.MemberResponse": { + "type": "object", + "properties": { + "member": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.ProjectMember" + } + } + }, + "internal_project.MembersResponse": { + "type": "object", + "properties": { + "members": { "type": "array", "items": { - "$ref": "#/definitions/models.CapabilityAsset" + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.ProjectMember" } - }, - "category": { - "type": "string" - }, - "content": { + } + } + }, + "internal_project.ProjectBasicInfo": { + "type": "object", + "properties": { + "archivedAt": { "type": "string" }, - "contentMd5": { + "description": { "type": "string" }, - "createdAt": { + "enabledAt": { "type": "string" }, - "createdBy": { + "id": { "type": "string" }, - "currentRevision": { - "type": "integer" - }, - "description": { + "name": { "type": "string" - }, - "embeddingUpdatedAt": { + } + } + }, + "internal_project.ProjectBasicInfoResponse": { + "type": "object", + "properties": { + "project": { + "$ref": "#/definitions/internal_project.ProjectBasicInfo" + } + } + }, + "internal_project.ProjectResponse": { + "type": "object", + "properties": { + "project": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.Project" + } + } + }, + "internal_project.ProjectsResponse": { + "type": "object", + "properties": { + "projects": { + "type": "array", + "items": { + "$ref": "#/definitions/github_com_costrict_costrict-web_server_internal_models.Project" + } + } + } + }, + "internal_project.RespondInvitationRequest": { + "type": "object", + "properties": { + "accept": { + "type": "boolean" + } + } + }, + "internal_project.SetProjectPinRequest": { + "type": "object", + "properties": { + "pinned": { + "type": "boolean" + } + } + }, + "internal_project.UpdateMemberRoleRequest": { + "type": "object", + "required": [ + "role" + ], + "properties": { + "role": { "type": "string" - }, - "experienceScore": { - "type": "number" - }, - "favoriteCount": { - "type": "integer" - }, - "id": { + } + } + }, + "internal_project.UpdateProjectArchiveTimeRequest": { + "type": "object", + "required": [ + "archivedAt" + ], + "properties": { + "archivedAt": { "type": "string" - }, - "installCount": { - "type": "integer" - }, - "itemType": { + } + } + }, + "internal_project.UpdateProjectRequest": { + "type": "object", + "properties": { + "description": { "type": "string" }, - "lastScanId": { + "enabledAt": { "type": "string" }, - "metadata": { - "type": "object" - }, "name": { "type": "string" + } + } + }, + "internal_team.SessionProgress": { + "type": "object", + "properties": { + "completedTasks": { + "type": "integer" }, - "previewCount": { + "failedTasks": { "type": "integer" }, - "registry": { - "$ref": "#/definitions/models.CapabilityRegistry" + "pendingTasks": { + "type": "integer" }, - "registryId": { - "type": "string" + "runningTasks": { + "type": "integer" }, - "repoId": { - "type": "string" + "teammates": { + "type": "array", + "items": { + "$ref": "#/definitions/internal_team.TeammateProgress" + } }, - "score": { - "type": "number" + "totalTasks": { + "type": "integer" + } + } + }, + "internal_team.TeamApprovalRequest": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" }, - "securityStatus": { + "description": { "type": "string" }, - "slug": { + "feedback": { "type": "string" }, - "sourcePath": { + "id": { "type": "string" }, - "sourceSha": { + "permissionUpdates": { + "type": "object" + }, + "requesterId": { "type": "string" }, - "sourceType": { - "description": "direct | archive", + "requesterName": { "type": "string" }, - "status": { + "resolvedAt": { "type": "string" }, - "updatedAt": { + "riskLevel": { "type": "string" }, - "updatedBy": { + "sessionId": { "type": "string" }, - "version": { + "status": { "type": "string" }, - "versions": { - "type": "array", - "items": { - "$ref": "#/definitions/models.CapabilityVersion" - } + "toolInput": { + "type": "object" + }, + "toolName": { + "type": "string" } } }, - "services.UpdateDirectoryRequest": { + "internal_team.TeamRepoAffinity": { "type": "object", "properties": { - "isDefault": { - "type": "boolean" + "createdAt": { + "type": "string" }, - "name": { - "type": "string", - "maxLength": 100 + "currentBranch": { + "type": "string" }, - "path": { - "type": "string", - "maxLength": 500 + "hasUncommittedChanges": { + "type": "boolean" }, - "settings": { - "type": "object", - "additionalProperties": true - } - } - }, - "services.UpdateWorkspaceRequest": { - "type": "object", - "properties": { - "description": { - "type": "string", - "maxLength": 500 + "id": { + "type": "string" }, - "deviceId": { + "lastSyncedAt": { "type": "string" }, - "name": { - "type": "string", - "maxLength": 100 + "memberId": { + "type": "string" }, - "settings": { - "type": "object", - "additionalProperties": true + "repoLocalPath": { + "type": "string" }, - "status": { - "type": "string", - "enum": [ - "active", - "inactive", - "archived" - ] - } - } - }, - "services.UsageActivityResponse": { - "type": "object", - "properties": { - "git_repo_url": { + "repoRemoteUrl": { "type": "string" }, - "range": { - "type": "object", - "properties": { - "from": { - "type": "string" - }, - "to": { - "type": "string" - } - } + "sessionId": { + "type": "string" }, - "users": { - "type": "array", - "items": { - "$ref": "#/definitions/services.UsageUserActivity" - } + "updatedAt": { + "type": "string" } } }, - "services.UsageDaily": { + "internal_team.TeamSession": { "type": "object", "properties": { - "date": { + "createdAt": { "type": "string" }, - "requests": { - "type": "integer" - } - } - }, - "services.UsageReportItem": { - "type": "object", - "required": [ - "date", - "git_repo_url", - "message_id", - "model_id", - "session_id", - "updated" - ], - "properties": { - "cache_read_tokens": { - "type": "integer" + "creatorId": { + "type": "string" }, - "cache_write_tokens": { + "fencingToken": { "type": "integer" }, - "cost": { - "type": "number" + "id": { + "type": "string" }, - "date": { + "leaderMachineId": { "type": "string" }, - "git_repo_url": { + "leaderUserId": { "type": "string" }, - "git_worktree": { + "metadata": { + "type": "object" + }, + "name": { "type": "string" }, - "input_tokens": { - "type": "integer" + "status": { + "type": "string" }, - "message_id": { + "updatedAt": { + "type": "string" + } + } + }, + "internal_team.TeamSessionMember": { + "type": "object", + "properties": { + "connectedAt": { "type": "string" }, - "model_id": { + "createdAt": { "type": "string" }, - "output_tokens": { - "type": "integer" + "id": { + "type": "string" }, - "provider_id": { + "lastHeartbeat": { "type": "string" }, - "reasoning_tokens": { - "type": "integer" + "machineId": { + "type": "string" }, - "request_id": { + "machineName": { "type": "string" }, - "request_time": { + "role": { "type": "string" }, - "rounds": { - "type": "integer" + "sessionId": { + "type": "string" }, - "session_id": { + "status": { "type": "string" }, - "updated": { + "updatedAt": { + "type": "string" + }, + "userId": { "type": "string" } } }, - "services.UsageReportRequest": { + "internal_team.TeamTask": { "type": "object", - "required": [ - "reports" - ], "properties": { - "client_version": { + "assignedMemberId": { "type": "string" }, - "device_id": { + "claimedAt": { "type": "string" }, - "reported_at": { + "completedAt": { "type": "string" }, - "reports": { + "createdAt": { + "type": "string" + }, + "dependencies": { "type": "array", - "maxItems": 500, - "minItems": 1, "items": { - "$ref": "#/definitions/services.UsageReportItem" + "type": "string" } - } - } - }, - "services.UsageReportResponse": { - "type": "object", - "properties": { - "accepted": { - "type": "integer" }, - "errors": { + "description": { + "type": "string" + }, + "errorMessage": { + "type": "string" + }, + "fileHints": { "type": "array", "items": { "type": "string" } }, - "skipped": { + "id": { + "type": "string" + }, + "maxRetries": { "type": "integer" - } - } - }, - "services.UsageUserActivity": { - "type": "object", - "properties": { - "daily": { + }, + "priority": { + "type": "integer" + }, + "repoAffinity": { "type": "array", "items": { - "$ref": "#/definitions/services.UsageDaily" + "type": "string" } }, - "total_requests": { + "result": { + "type": "object" + }, + "retryCount": { "type": "integer" }, - "user_id": { + "sessionId": { "type": "string" }, - "username": { + "startedAt": { + "type": "string" + }, + "status": { + "type": "string" + }, + "updatedAt": { "type": "string" } } }, - "services.WorkspaceWithDeviceStatus": { + "internal_team.TeammateProgress": { "type": "object", "properties": { - "createdAt": { - "type": "string" - }, - "description": { - "type": "string" - }, - "deviceId": { - "description": "绑定的设备ID", - "type": "string" - }, - "deviceStatus": { - "description": "online | offline | busy | \"\"", - "type": "string" - }, - "deviceUniqueId": { - "description": "Device.DeviceID,用于代理路由", - "type": "string" - }, - "directories": { - "type": "array", - "items": { - "$ref": "#/definitions/models.WorkspaceDirectory" - } - }, - "id": { - "type": "string" - }, - "isDefault": { - "description": "是否为默认工作空间", - "type": "boolean" + "completed": { + "type": "integer" }, - "name": { + "currentTaskId": { "type": "string" }, - "settings": { - "description": "工作空间设置", - "type": "object" + "failed": { + "type": "integer" }, - "status": { - "description": "active | inactive | archived", + "machineName": { "type": "string" }, - "updatedAt": { + "memberId": { "type": "string" }, - "userId": { - "type": "string" + "running": { + "type": "integer" } } } diff --git a/server/go.mod b/server/go.mod index d07b513..5a8352e 100644 --- a/server/go.mod +++ b/server/go.mod @@ -8,6 +8,7 @@ require ( github.com/go-git/go-git/v5 v5.11.0 github.com/golang-jwt/jwt/v4 v4.5.2 github.com/google/uuid v1.6.0 + github.com/gorilla/websocket v1.5.3 github.com/lib/pq v1.10.9 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pressly/goose/v3 v3.27.0 diff --git a/server/go.sum b/server/go.sum index 0e8734c..2393449 100644 --- a/server/go.sum +++ b/server/go.sum @@ -109,6 +109,8 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= diff --git a/server/internal/cloud/event_router.go b/server/internal/cloud/event_router.go index 26d6bec..619e880 100644 --- a/server/internal/cloud/event_router.go +++ b/server/internal/cloud/event_router.go @@ -96,6 +96,8 @@ func eventTypeToPath(eventType string, props map[string]any) string { if sessionID != "" { return "/api/session/" + sessionID + "/message" } + case EventTeamTaskDispatch: + return "/api/v1/teamworker/tasks/dispatch" } return "" } @@ -105,8 +107,8 @@ type discardResponseWriter struct { statusCode int } -func (w *discardResponseWriter) Header() http.Header { return w.header } -func (w *discardResponseWriter) WriteHeader(statusCode int) { w.statusCode = statusCode } +func (w *discardResponseWriter) Header() http.Header { return w.header } +func (w *discardResponseWriter) WriteHeader(statusCode int) { w.statusCode = statusCode } func (w *discardResponseWriter) Write(b []byte) (int, error) { return len(b), nil } func (r *EventRouter) startBatchFlush() { diff --git a/server/internal/cloud/types.go b/server/internal/cloud/types.go index 3820f0a..16c3cbc 100644 --- a/server/internal/cloud/types.go +++ b/server/internal/cloud/types.go @@ -3,12 +3,12 @@ package cloud import "errors" type SSEConnection struct { - ID string - Type ConnType - UserID string - WorkspaceID string - Send chan Event - Done chan struct{} + ID string + Type ConnType + UserID string + WorkspaceID string + Send chan Event + Done chan struct{} LastActivity int64 } @@ -31,19 +31,20 @@ type ManagerStats struct { } const ( - EventCloudConnected = "cloud.connected" - EventDeviceConnected = "device.connected" - EventHeartbeat = "heartbeat" - EventSessionStatus = "session.status" - EventSessionCreated = "session.created" - EventSessionUpdated = "session.updated" - EventMessagePartUpdated = "message.part.updated" - EventMessagePartDelta = "message.part.delta" - EventDeviceStatus = "device.status" - EventSessionAbort = "session.abort" - EventSessionMessage = "session.message" - EventBatch = "batch" - EventInterventionRequired = "intervention.required" + EventCloudConnected = "cloud.connected" + EventDeviceConnected = "device.connected" + EventHeartbeat = "heartbeat" + EventSessionStatus = "session.status" + EventSessionCreated = "session.created" + EventSessionUpdated = "session.updated" + EventMessagePartUpdated = "message.part.updated" + EventMessagePartDelta = "message.part.delta" + EventDeviceStatus = "device.status" + EventSessionAbort = "session.abort" + EventSessionMessage = "session.message" + EventTeamTaskDispatch = "team.task.dispatch" + EventBatch = "batch" + EventInterventionRequired = "intervention.required" ) const ( diff --git a/server/internal/llm/tools.go b/server/internal/llm/tools.go index 9abbefa..829f086 100644 --- a/server/internal/llm/tools.go +++ b/server/internal/llm/tools.go @@ -60,8 +60,8 @@ type BehaviorPattern struct { SuggestedAction string `json:"suggestedAction"` } -// extractJSON extracts JSON from LLM response that may contain markdown code blocks -func extractJSON(response string) string { +// ExtractJSON extracts JSON from LLM response that may contain markdown code blocks +func ExtractJSON(response string) string { response = strings.TrimSpace(response) // Try to extract JSON from markdown code blocks @@ -85,7 +85,7 @@ func extractJSON(response string) string { // ParseGeneratedSkill parses LLM response into GeneratedSkill func ParseGeneratedSkill(response string) (*GeneratedSkill, error) { - jsonStr := extractJSON(response) + jsonStr := ExtractJSON(response) var skill GeneratedSkill if err := json.Unmarshal([]byte(jsonStr), &skill); err != nil { return nil, fmt.Errorf("failed to parse generated skill: %w", err) @@ -95,7 +95,7 @@ func ParseGeneratedSkill(response string) (*GeneratedSkill, error) { // ParseSkillAnalysis parses LLM response into SkillAnalysis func ParseSkillAnalysis(response string) (*SkillAnalysis, error) { - jsonStr := extractJSON(response) + jsonStr := ExtractJSON(response) var analysis SkillAnalysis if err := json.Unmarshal([]byte(jsonStr), &analysis); err != nil { return nil, fmt.Errorf("failed to parse skill analysis: %w", err) @@ -105,7 +105,7 @@ func ParseSkillAnalysis(response string) (*SkillAnalysis, error) { // ParseQueryExpansion parses LLM response into QueryExpansion func ParseQueryExpansion(response string) (*QueryExpansion, error) { - jsonStr := extractJSON(response) + jsonStr := ExtractJSON(response) var expansion QueryExpansion if err := json.Unmarshal([]byte(jsonStr), &expansion); err != nil { return nil, fmt.Errorf("failed to parse query expansion: %w", err) diff --git a/server/internal/team/decompose.go b/server/internal/team/decompose.go new file mode 100644 index 0000000..60aebb5 --- /dev/null +++ b/server/internal/team/decompose.go @@ -0,0 +1,104 @@ +package team + +import ( + "fmt" + + "github.com/google/uuid" + "github.com/lib/pq" +) + +// DecomposeRequest is the request body for the decompose endpoint. +type DecomposeRequest struct { + Prompt string `json:"prompt" binding:"required"` + Context map[string]any `json:"context,omitempty"` + FencingToken int64 `json:"fencingToken,omitempty"` + DryRun bool `json:"dryRun,omitempty"` +} + +// DecomposeResultItem is a single task returned by the teammate's LLM. +type DecomposeResultItem struct { + TaskID string `json:"taskId"` + Description string `json:"description"` + RepoAffinity []string `json:"repoAffinity,omitempty"` + FileHints []string `json:"fileHints,omitempty"` + Dependencies []string `json:"dependencies,omitempty"` + Priority int `json:"priority,omitempty"` +} + +// toTeamTask converts a DecomposeResultItem into a TeamTask for persistence. +func (item DecomposeResultItem) toTeamTask(sessionID string) TeamTask { + t := TeamTask{ + ID: uuid.New().String(), + SessionID: sessionID, + Description: item.Description, + Status: TaskStatusPending, + Priority: item.Priority, + MaxRetries: 3, + } + if t.Priority == 0 { + t.Priority = 5 + } + if len(item.RepoAffinity) > 0 { + t.RepoAffinity = pq.StringArray(item.RepoAffinity) + } + if len(item.FileHints) > 0 { + t.FileHints = pq.StringArray(item.FileHints) + } + if len(item.Dependencies) > 0 { + t.Dependencies = pq.StringArray(item.Dependencies) + } + return t +} + +// buildFallbackTasks creates a single-task plan when decomposition fails. +func buildFallbackTasks(prompt, sessionID string) []TeamTask { + return []TeamTask{ + { + ID: uuid.New().String(), + SessionID: sessionID, + Description: prompt, + Status: TaskStatusPending, + Priority: 5, + MaxRetries: 3, + }, + } +} + +// pickDecomposeTarget selects a decomposition target. +// Strategy: +// 1) Prefer an online non-leader teammate. +// 2) If none is available, fall back to the online leader machine itself. +// Returns (memberID, machineID, error). +func pickDecomposeTarget(hub *Hub, store *Store, sessionID string) (string, string, error) { + leaderMachineID := hub.GetLeaderMachineID(sessionID) + members, err := store.ListMembers(sessionID) + if err != nil || len(members) == 0 { + return "", "", fmt.Errorf("no teammates available") + } + + var leaderMemberID string + + for _, m := range members { + if m.Status != MemberStatusOnline { + continue + } + // Verify the selected machine itself has an active WS connection. + // SessionConnCount(sessionID) only tells whether *someone* is connected, + // which may still select an offline/stale member and cause 60s timeouts. + if !hub.IsMachineOnline(sessionID, m.MachineID) { + continue + } + if leaderMachineID != "" && m.MachineID == leaderMachineID { + leaderMemberID = m.ID + continue + } + return m.ID, m.MachineID, nil + } + + // No online teammate available; allow leader self-decomposition. + if leaderMachineID != "" && hub.IsMachineOnline(sessionID, leaderMachineID) { + return leaderMemberID, leaderMachineID, nil + } + + return "", "", fmt.Errorf("no online teammate or leader available for decomposition") +} diff --git a/server/internal/team/handlers.go b/server/internal/team/handlers.go new file mode 100644 index 0000000..db6cf48 --- /dev/null +++ b/server/internal/team/handlers.go @@ -0,0 +1,2169 @@ +package team + +import ( + "context" + "encoding/json" + "net/http" + "time" + + "github.com/costrict/costrict-web/server/internal/logger" + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/gorilla/websocket" +) + +var upgrader = websocket.Upgrader{ + ReadBufferSize: DefaultWSReadBufferSize, + WriteBufferSize: DefaultWSWriteBufferSize, + CheckOrigin: func(r *http.Request) bool { + return true // origin check handled by upstream middleware + }, +} + +// Handler holds the dependencies for all team HTTP/WS endpoints. +type Handler struct { + store *Store + hub *Hub + pushAssignedTaskFn func(ctx context.Context, sessionID string, machineID string, userID string, task TeamTask) error +} + +func NewHandler(store *Store, hub *Hub) *Handler { + return &Handler{store: store, hub: hub} +} + +func (h *Handler) SetAssignedTaskPusher(fn func(ctx context.Context, sessionID string, machineID string, userID string, task TeamTask) error) { + h.pushAssignedTaskFn = fn +} + +// ─── Session ─────────────────────────────────────────────────────────────── + +// CreateSession godoc +// @Summary Create team session +// @Tags team +// @Accept json +// @Produce json +// @Param body body object{name=string} true "Session data" +// @Success 201 {object} TeamSession +// @Router /team/sessions [post] +func (h *Handler) CreateSession(c *gin.Context) { + userID := c.GetString("userId") + if userID == "" { + c.JSON(http.StatusUnauthorized, gin.H{"error": "authentication required"}) + return + } + + var req struct { + Name string `json:"name" binding:"required"` + } + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "name is required"}) + return + } + + sess := &TeamSession{ + ID: uuid.New().String(), + Name: req.Name, + CreatorID: userID, + Status: SessionStatusActive, + } + if err := h.store.CreateSession(sess); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create session"}) + return + } + c.JSON(http.StatusCreated, sess) +} + +// GetSession godoc +// @Summary Get team session +// @Tags team +// @Produce json +// @Param id path string true "Session ID" +// @Success 200 {object} TeamSession +// @Router /team/sessions/:id [get] +func (h *Handler) GetSession(c *gin.Context) { + sess, err := h.store.GetSession(c.Param("id")) + if err != nil || sess == nil { + c.JSON(http.StatusNotFound, gin.H{"error": "session not found"}) + return + } + c.JSON(http.StatusOK, sess) +} + +// ListSessions godoc +// @Summary List sessions for the current user +// @Tags team +// @Produce json +// @Success 200 {object} object{sessions=[]TeamSession} +// @Router /team/sessions [get] +func (h *Handler) ListSessions(c *gin.Context) { + userID := c.GetString("userId") + if userID == "" { + c.JSON(http.StatusUnauthorized, gin.H{"error": "authentication required"}) + return + } + sessions, err := h.store.ListSessionsByCreator(userID) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list sessions"}) + return + } + c.JSON(http.StatusOK, gin.H{"sessions": sessions}) +} + +// UpdateSession godoc +// @Summary Update team session status +// @Tags team +// @Accept json +// @Produce json +// @Param id path string true "Session ID" +// @Param body body object{status=string} true "Status" +// @Success 200 {object} TeamSession +// @Router /team/sessions/:id [patch] +// requireSessionLeader checks that the caller is the session creator or the current +// leader. Returns the session on success, writes error response and returns nil on failure. +func (h *Handler) requireSessionLeader(c *gin.Context) *TeamSession { + sessionID := c.Param("id") + sess, err := h.store.GetSession(sessionID) + if err != nil || sess == nil { + c.JSON(http.StatusNotFound, gin.H{"error": "session not found"}) + return nil + } + userID := c.GetString("userId") + leaderMachineID := h.hub.GetLeaderMachineID(sessionID) + if leaderMachineID == "" { + leaderMachineID = sess.LeaderMachineID + } + if sess.CreatorID != userID && leaderMachineID == "" { + c.JSON(http.StatusForbidden, gin.H{"error": "only the session creator or leader can perform this action"}) + return nil + } + return sess +} + +func (h *Handler) UpdateSession(c *gin.Context) { + sess := h.requireSessionLeader(c) + if sess == nil { + return + } + var req struct { + Status string `json:"status"` + Name string `json:"name"` + } + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + updates := map[string]any{} + if req.Status != "" { + updates["status"] = req.Status + } + if req.Name != "" { + updates["name"] = req.Name + } + if len(updates) == 0 { + c.JSON(http.StatusBadRequest, gin.H{"error": "nothing to update"}) + return + } + + if err := h.store.UpdateSession(sess.ID, updates); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to update session"}) + return + } + + updated, _ := h.store.GetSession(sess.ID) + c.JSON(http.StatusOK, updated) +} + +// DeleteSession godoc +// @Summary Close / delete team session +// @Tags team +// @Param id path string true "Session ID" +// @Success 200 {object} object{message=string} +// @Router /team/sessions/:id [delete] +func (h *Handler) DeleteSession(c *gin.Context) { + if sess := h.requireSessionLeader(c); sess == nil { + return + } + sessionID := c.Param("id") + if err := h.store.DeleteSession(sessionID); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to delete session"}) + return + } + c.JSON(http.StatusOK, gin.H{"message": "session deleted"}) +} + +// ensureSessionMember upserts a machine membership for a session. +// It revives soft-deleted rows and also supports legacy schemas where machine_id +// was globally unique across sessions. +func (h *Handler) ensureSessionMember( + sessionID string, + userID string, + machineID string, + machineName string, +) (*TeamSessionMember, error) { + // Fast path: already joined in this session. + existing, err := h.store.GetMemberByMachine(sessionID, machineID) + if err != nil { + return nil, err + } + if existing != nil { + now := time.Now() + if err := h.store.UpdateMember(existing.ID, map[string]any{ + "user_id": userID, + "machine_name": machineName, + "status": MemberStatusOnline, + "last_heartbeat": now, + }); err != nil { + return nil, err + } + return h.store.GetMember(existing.ID) + } + + now := time.Now() + revive := func(memberID string) (*TeamSessionMember, error) { + if err := h.store.db.Unscoped(). + Model(&TeamSessionMember{}). + Where("id = ?", memberID). + Updates(map[string]any{ + "session_id": sessionID, + "user_id": userID, + "machine_id": machineID, + "machine_name": machineName, + "role": MemberRoleTeammate, + "status": MemberStatusOnline, + "connected_at": now, + "last_heartbeat": now, + "deleted_at": nil, + }).Error; err != nil { + return nil, err + } + return h.store.GetMember(memberID) + } + + // If a soft-deleted row exists for the same session+machine, revive it. + softDeleted, err := h.store.GetMemberByMachineUnscoped(sessionID, machineID) + if err != nil { + return nil, err + } + if softDeleted != nil { + return revive(softDeleted.ID) + } + + member := &TeamSessionMember{ + ID: uuid.New().String(), + SessionID: sessionID, + UserID: userID, + MachineID: machineID, + MachineName: machineName, + Role: MemberRoleTeammate, + Status: MemberStatusOnline, + ConnectedAt: now, + LastHeartbeat: now, + } + createErr := h.store.CreateMember(member) + if createErr == nil { + return member, nil + } + + // Legacy fallback: global machine uniqueness (or stale row) blocked insert. + legacy, err := h.store.GetMemberByMachineAnySessionUnscoped(machineID) + if err != nil { + return nil, createErr + } + if legacy != nil { + return revive(legacy.ID) + } + return nil, createErr +} + +// assignPendingTasksIfPossible re-runs scheduling for pending tasks and emits +// fresh task.assigned events when members come online. +func (h *Handler) assignPendingTasksIfPossible(sessionID string) { + pending, err := h.store.ListPendingTasks(sessionID) + if err != nil || len(pending) == 0 { + return + } + + schedCtx, err := h.buildSchedulingContext(sessionID) + if err != nil { + return + } + + scheduled := ScheduleTasks(*schedCtx, pending) + var assigned []TeamTask + for _, task := range scheduled { + if task.AssignedMemberID == nil || task.Status != TaskStatusAssigned { + continue + } + if err := h.store.UpdateTask(task.ID, map[string]any{ + "assigned_member_id": *task.AssignedMemberID, + "status": TaskStatusAssigned, + }); err != nil { + continue + } + assigned = append(assigned, task) + } + + if len(assigned) > 0 { + h.notifyAssignedMachines(sessionID, assigned) + } +} + +// ─── Members ─────────────────────────────────────────────────────────────── + +// JoinSession godoc +// @Summary Join team session +// @Tags team +// @Accept json +// @Produce json +// @Param id path string true "Session ID" +// @Param body body object{machineId=string,machineName=string} true "Machine info" +// @Success 201 {object} TeamSessionMember +// @Router /team/sessions/:id/members [post] +func (h *Handler) JoinSession(c *gin.Context) { + userID := c.GetString("userId") + if userID == "" { + c.JSON(http.StatusUnauthorized, gin.H{"error": "authentication required"}) + return + } + + sessionID := c.Param("id") + sess, err := h.store.GetSession(sessionID) + if err != nil || sess == nil { + c.JSON(http.StatusNotFound, gin.H{"error": "session not found"}) + return + } + + var req struct { + MachineID string `json:"machineId" binding:"required"` + MachineName string `json:"machineName"` + } + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "machineId is required"}) + return + } + + if h.hub.SessionConnCount(sessionID) >= DefaultMaxConnectionsPerSession { + c.JSON(http.StatusTooManyRequests, gin.H{"error": "session is full"}) + return + } + + member, err := h.ensureSessionMember(sessionID, userID, req.MachineID, req.MachineName) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to join session"}) + return + } + h.assignPendingTasksIfPossible(sessionID) + c.JSON(http.StatusCreated, member) +} + +// ListMembers godoc +// @Summary List session members +// @Tags team +// @Produce json +// @Param id path string true "Session ID" +// @Success 200 {object} object{members=[]TeamSessionMember} +// @Router /team/sessions/:id/members [get] +func (h *Handler) ListMembers(c *gin.Context) { + members, err := h.store.ListMembers(c.Param("id")) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list members"}) + return + } + c.JSON(http.StatusOK, gin.H{"members": members}) +} + +// LeaveSession godoc +// @Summary Leave team session +// @Tags team +// @Param id path string true "Session ID" +// @Param mid path string true "Member ID" +// @Success 200 {object} object{message=string} +// @Router /team/sessions/:id/members/:mid [delete] +func (h *Handler) LeaveSession(c *gin.Context) { + if err := h.store.DeleteMember(c.Param("mid")); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to leave session"}) + return + } + c.JSON(http.StatusOK, gin.H{"message": "left session"}) +} + +// ─── Tasks ───────────────────────────────────────────────────────────────── + +// SubmitTaskPlan godoc +// @Summary Submit task plan (Leader) +// @Tags team +// @Accept json +// @Produce json +// @Param id path string true "Session ID" +// @Param body body object{tasks=[]TeamTask,fencingToken=integer} true "Task plan" +// @Success 201 {object} object{tasks=[]TeamTask} +// @Router /team/sessions/:id/tasks [post] +func (h *Handler) SubmitTaskPlan(c *gin.Context) { + sessionID := c.Param("id") + + var req struct { + Tasks []TeamTask `json:"tasks" binding:"required"` + FencingToken int64 `json:"fencingToken"` + } + if err := c.ShouldBindJSON(&req); err != nil || len(req.Tasks) == 0 { + c.JSON(http.StatusBadRequest, gin.H{"error": "tasks array is required"}) + return + } + + if !h.hub.ValidateFencingToken(sessionID, req.FencingToken) { + c.JSON(http.StatusConflict, gin.H{"error": "stale leader: fencing token rejected"}) + return + } + + for i := range req.Tasks { + // Respect a client-supplied ID (needed for dependency DAGs where tasks + // in the same batch reference each other by ID). Only generate a new UUID + // when the client did not provide one. + if req.Tasks[i].ID == "" { + req.Tasks[i].ID = uuid.New().String() + } + req.Tasks[i].SessionID = sessionID + req.Tasks[i].Status = TaskStatusPending + if req.Tasks[i].Priority == 0 { + req.Tasks[i].Priority = 5 + } + if req.Tasks[i].MaxRetries == 0 { + req.Tasks[i].MaxRetries = 3 + } + } + + // Auto-assign tasks via P1-P5 repo affinity scheduling + schedCtx, schedErr := h.buildSchedulingContext(sessionID) + if schedErr == nil { + req.Tasks = ScheduleTasks(*schedCtx, req.Tasks) + } + + if err := h.store.CreateTasks(req.Tasks); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create tasks"}) + return + } + + // Notify assigned machines + for _, task := range req.Tasks { + if task.AssignedMemberID == nil { + continue + } + member, _ := h.store.GetMember(*task.AssignedMemberID) + if member == nil { + continue + } + h.dispatchAssignedTaskToMachine(sessionID, member, task) + } + + c.JSON(http.StatusCreated, gin.H{"tasks": req.Tasks}) +} + +// DecomposeTask godoc +// @Summary Decompose a prompt into sub-tasks using LLM (Leader) +// @Tags team +// @Accept json +// @Produce json +// @Param id path string true "Session ID" +// @Param body body DecomposeRequest true "Prompt to decompose" +// @Success 201 {object} object{tasks=[]TeamTask} +// @Failure 503 {object} object{error=string} +// @Router /team/sessions/:id/decompose [post] +func (h *Handler) DecomposeTask(c *gin.Context) { + sessionID := c.Param("id") + sess, err := h.store.GetSession(sessionID) + if err != nil || sess == nil { + c.JSON(http.StatusNotFound, gin.H{"error": "session not found"}) + return + } + + var req DecomposeRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "prompt is required"}) + return + } + + // Validate fencing token — only the current leader may decompose + if req.FencingToken != 0 { + if !h.hub.ValidateFencingToken(sessionID, req.FencingToken) { + c.JSON(http.StatusConflict, gin.H{"error": "stale leader: fencing token rejected"}) + return + } + } + + createAndBroadcastFallbackTasks := func() ([]TeamTask, error) { + tasks := buildFallbackTasks(req.Prompt, sessionID) + schedCtx, schedErr := h.buildSchedulingContext(sessionID) + if schedErr == nil { + tasks = ScheduleTasks(*schedCtx, tasks) + } + if req.DryRun { + return tasks, nil + } + if storeErr := h.store.CreateTasks(tasks); storeErr != nil { + return nil, storeErr + } + h.hub.Broadcast(sessionID, newEvent(EventTaskPlanSubmit, sessionID, map[string]any{"tasks": tasks})) + h.notifyAssignedMachines(sessionID, tasks) + return tasks, nil + } + + // Pick an online teammate to delegate decomposition to + _, targetMachineID, err := pickDecomposeTarget(h.hub, h.store, sessionID) + if err != nil { + // No teammate available — fallback to a single task + tasks, storeErr := createAndBroadcastFallbackTasks() + if storeErr != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create tasks"}) + return + } + c.JSON(http.StatusCreated, gin.H{"tasks": tasks}) + return + } + leaderMachineID := h.hub.GetLeaderMachineID(sessionID) + if leaderMachineID == "" { + leaderMachineID = sess.LeaderMachineID + } + // If decomposition target falls back to the leader machine itself, avoid + // waiting for a decompose.result loop that may not exist in single-device mode. + if leaderMachineID != "" && targetMachineID == leaderMachineID { + tasks, storeErr := createAndBroadcastFallbackTasks() + if storeErr != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create tasks"}) + return + } + c.JSON(http.StatusCreated, gin.H{ + "tasks": tasks, + "degraded": true, + "reason": "single_leader_fallback", + "message": "no online teammate available; used fallback single-task decomposition", + }) + return + } + + requestID := uuid.New().String() + + // Register a channel to receive the decompose result + resultCh := h.hub.RegisterDecompose(requestID) + defer h.hub.CancelDecompose(requestID) + + // Send decompose.request to the target teammate via WS + evt := newEvent(EventDecomposeRequest, sessionID, map[string]any{ + "requestId": requestID, + "prompt": req.Prompt, + "context": req.Context, + }) + h.hub.SendToMachine(sessionID, targetMachineID, evt) + + // Wait up to 60 s for the decompose.result + ctx, cancel := context.WithTimeout(c.Request.Context(), 60*time.Second) + defer cancel() + + select { + case result := <-resultCh: + // Parse the result items into TeamTask records + itemsRaw, ok := result.Payload["tasks"].([]any) + if !ok || len(itemsRaw) == 0 { + tasks, storeErr := createAndBroadcastFallbackTasks() + if storeErr != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create tasks"}) + return + } + c.JSON(http.StatusCreated, gin.H{"tasks": tasks}) + return + } + + var tasks []TeamTask + for _, raw := range itemsRaw { + data, _ := json.Marshal(raw) + var item DecomposeResultItem + if json.Unmarshal(data, &item) != nil || item.Description == "" { + continue + } + tasks = append(tasks, item.toTeamTask(sessionID)) + } + if len(tasks) == 0 { + tasks = buildFallbackTasks(req.Prompt, sessionID) + } + + // Auto-assign tasks via P1-P5 repo affinity scheduling + schedCtx, schedErr := h.buildSchedulingContext(sessionID) + if schedErr == nil { + tasks = ScheduleTasks(*schedCtx, tasks) + } + + if req.DryRun { + c.JSON(http.StatusCreated, gin.H{"tasks": tasks, "dryRun": true}) + return + } + + if storeErr := h.store.CreateTasks(tasks); storeErr != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create tasks"}) + return + } + + // Broadcast task plan to all session members + h.hub.Broadcast(sessionID, newEvent(EventTaskPlanSubmit, sessionID, map[string]any{"tasks": tasks})) + + // Notify assigned machines + for _, task := range tasks { + if task.AssignedMemberID == nil { + continue + } + member, _ := h.store.GetMember(*task.AssignedMemberID) + if member == nil { + continue + } + h.dispatchAssignedTaskToMachine(sessionID, member, task) + } + + c.JSON(http.StatusCreated, gin.H{"tasks": tasks}) + + case <-ctx.Done(): + // Timeout — fallback to single task, but still return success with a degraded flag. + tasks, storeErr := createAndBroadcastFallbackTasks() + if storeErr != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create tasks"}) + return + } + c.JSON(http.StatusCreated, gin.H{ + "tasks": tasks, + "degraded": true, + "reason": "decompose_timeout", + "message": "decompose request timed out (teammate did not respond within 60s); used fallback single-task decomposition", + }) + } +} + +// ListTasks godoc +// @Summary List session tasks +// @Tags team +// @Produce json +// @Param id path string true "Session ID" +// @Success 200 {object} object{tasks=[]TeamTask} +// @Router /team/sessions/:id/tasks [get] +func (h *Handler) ListTasks(c *gin.Context) { + tasks, err := h.store.ListTasks(c.Param("id")) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list tasks"}) + return + } + c.JSON(http.StatusOK, gin.H{"tasks": tasks}) +} + +// TerminateTask godoc +// @Summary Terminate a task (Leader) +// @Tags team +// @Accept json +// @Produce json +// @Param id path string true "Session ID" +// @Param taskId path string true "Task ID" +// @Param body body object{reason=string,fencingToken=integer} false "Termination options" +// @Success 200 {object} TeamTask +// @Router /team/sessions/:id/tasks/:taskId/terminate [post] +func (h *Handler) TerminateTask(c *gin.Context) { + sessionID := c.Param("id") + taskID := c.Param("taskId") + + sess, err := h.store.GetSession(sessionID) + if err != nil || sess == nil { + c.JSON(http.StatusNotFound, gin.H{"error": "session not found"}) + return + } + + var req struct { + Reason string `json:"reason"` + FencingToken int64 `json:"fencingToken"` + } + if err := c.ShouldBindJSON(&req); err != nil { + // allow empty body + req = struct { + Reason string `json:"reason"` + FencingToken int64 `json:"fencingToken"` + }{} + } + + leaderMachineID := h.hub.GetLeaderMachineID(sessionID) + if leaderMachineID == "" { + leaderMachineID = sess.LeaderMachineID + } + if leaderMachineID != "" && req.FencingToken == 0 { + c.JSON(http.StatusBadRequest, gin.H{"error": "fencingToken is required"}) + return + } + if req.FencingToken != 0 && !h.hub.ValidateFencingToken(sessionID, req.FencingToken) { + c.JSON(http.StatusConflict, gin.H{"error": "stale leader: fencing token rejected"}) + return + } + + task, err := h.store.GetTask(taskID) + if err != nil || task == nil { + c.JSON(http.StatusNotFound, gin.H{"error": "task not found"}) + return + } + if task.SessionID != sessionID { + c.JSON(http.StatusBadRequest, gin.H{"error": "task does not belong to session"}) + return + } + + reason := req.Reason + if reason == "" { + reason = "terminated by leader" + } + + if err := h.store.UpdateTask(taskID, map[string]any{ + "status": TaskStatusInterrupted, + "error_message": reason, + }); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to terminate task"}) + return + } + + updated, _ := h.store.GetTask(taskID) + if updated != nil { + interruptedPayload := map[string]any{ + "taskId": taskID, + "reason": reason, + } + if updated.AssignedMemberID != nil { + if member, _ := h.store.GetMember(*updated.AssignedMemberID); member != nil { + interruptedPayload["machineId"] = member.MachineID + h.hub.SendToMachine(sessionID, member.MachineID, newEvent(EventTaskTerminate, sessionID, map[string]any{ + "taskId": taskID, + "reason": reason, + })) + } + } + h.hub.Broadcast(sessionID, newEvent(EventTaskInterrupted, sessionID, interruptedPayload)) + } + + c.JSON(http.StatusOK, updated) +} + +// GetTask godoc +// @Summary Get task +// @Tags team +// @Produce json +// @Param taskId path string true "Task ID" +// @Success 200 {object} TeamTask +// @Router /team/tasks/:taskId [get] +func (h *Handler) GetTask(c *gin.Context) { + task, err := h.store.GetTask(c.Param("taskId")) + if err != nil || task == nil { + c.JSON(http.StatusNotFound, gin.H{"error": "task not found"}) + return + } + c.JSON(http.StatusOK, task) +} + +// UpdateTask godoc +// @Summary Update task status/result (Teammate) +// @Tags team +// @Accept json +// @Produce json +// @Param taskId path string true "Task ID" +// @Param body body object{status=string,result=object,errorMessage=string} true "Task update" +// @Success 200 {object} TeamTask +// @Router /team/tasks/:taskId [patch] +func (h *Handler) UpdateTask(c *gin.Context) { + taskID := c.Param("taskId") + + var req struct { + Status string `json:"status"` + Result map[string]any `json:"result"` + ErrorMessage string `json:"errorMessage"` + } + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + updates := map[string]any{} + now := time.Now() + if req.Status != "" { + updates["status"] = req.Status + switch req.Status { + case TaskStatusRunning: + updates["started_at"] = now + case TaskStatusCompleted: + updates["completed_at"] = now + case TaskStatusFailed: + updates["error_message"] = req.ErrorMessage + } + } + if req.Result != nil { + data, _ := json.Marshal(req.Result) + updates["result"] = data + } + + if err := h.store.UpdateTask(taskID, updates); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to update task"}) + return + } + + task, _ := h.store.GetTask(taskID) + + // Broadcast status change to the session + if task != nil { + evt := newEvent(EventSessionUpdated, task.SessionID, map[string]any{ + "taskId": taskID, + "status": req.Status, + }) + h.hub.Broadcast(task.SessionID, evt) + + // Unlock any dependent tasks whose all dependencies are now completed + if req.Status == TaskStatusCompleted { + h.notifyUnlockedTasks(task.SessionID, taskID) + } + } + + c.JSON(http.StatusOK, task) +} + +// ─── Approvals ───────────────────────────────────────────────────────────── + +// ListApprovals godoc +// @Summary List pending approvals +// @Tags team +// @Produce json +// @Param id path string true "Session ID" +// @Success 200 {object} object{approvals=[]TeamApprovalRequest} +// @Router /team/sessions/:id/approvals [get] +func (h *Handler) ListApprovals(c *gin.Context) { + approvals, err := h.store.ListPendingApprovals(c.Param("id")) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list approvals"}) + return + } + c.JSON(http.StatusOK, gin.H{"approvals": approvals}) +} + +// RespondApproval godoc +// @Summary Respond to approval request (Leader) +// @Tags team +// @Accept json +// @Produce json +// @Param approvalId path string true "Approval ID" +// @Param body body object{status=string,feedback=string} true "Response" +// @Success 200 {object} TeamApprovalRequest +// @Router /team/approvals/:approvalId [patch] +func (h *Handler) RespondApproval(c *gin.Context) { + approvalID := c.Param("approvalId") + // Only the leader can respond to approvals + approval, err := h.store.GetApproval(approvalID) + if err != nil || approval == nil { + c.JSON(http.StatusNotFound, gin.H{"error": "approval not found"}) + return + } + leaderMachineID := h.hub.GetLeaderMachineID(approval.SessionID) + machineID := c.Query("machineId") + if machineID == "" { + machineID = c.GetString("machineId") + } + if leaderMachineID != "" && machineID != leaderMachineID { + c.JSON(http.StatusForbidden, gin.H{"error": "only the leader can respond to approvals"}) + return + } + + var req struct { + Status string `json:"status" binding:"required"` // approved | rejected + Feedback string `json:"feedback"` + } + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "status is required"}) + return + } + + now := time.Now() + updates := map[string]any{ + "status": req.Status, + "feedback": req.Feedback, + "resolved_at": now, + } + if err := h.store.UpdateApproval(approvalID, updates); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to update approval"}) + return + } + + approval, _ = h.store.GetApproval(approvalID) + if approval != nil { + // Notify the requesting member + member, _ := h.store.GetMember(approval.RequesterID) + if member != nil { + evt := newEvent(EventApprovalResponse, approval.SessionID, map[string]any{ + "approvalId": approvalID, + "status": req.Status, + "feedback": req.Feedback, + }) + h.hub.SendToMachine(approval.SessionID, member.MachineID, evt) + } + } + + c.JSON(http.StatusOK, approval) +} + +// ─── Repo Affinity ───────────────────────────────────────────────────────── + +// RegisterRepo godoc +// @Summary Register / update local repository info (Teammate) +// @Tags team +// @Accept json +// @Produce json +// @Param id path string true "Session ID" +// @Param body body TeamRepoAffinity true "Repo info" +// @Success 200 {object} TeamRepoAffinity +// @Router /team/sessions/:id/repos [post] +func (h *Handler) RegisterRepo(c *gin.Context) { + sessionID := c.Param("id") + var req TeamRepoAffinity + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + req.SessionID = sessionID + req.ID = uuid.New().String() + if req.LastSyncedAt.IsZero() { + req.LastSyncedAt = time.Now() + } + + if err := h.store.UpsertRepoAffinity(&req); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to register repo"}) + return + } + c.JSON(http.StatusOK, req) +} + +// QueryRepos godoc +// @Summary Query repo affinity registry +// @Tags team +// @Produce json +// @Param id path string true "Session ID" +// @Param remoteUrl query string false "Filter by repo remote URL" +// @Param memberId query string false "Filter by member ID" +// @Success 200 {object} object{repos=[]TeamRepoAffinity} +// @Router /team/sessions/:id/repos [get] +func (h *Handler) QueryRepos(c *gin.Context) { + sessionID := c.Param("id") + remoteURL := c.Query("remoteUrl") + memberID := c.Query("memberId") + + var repos []TeamRepoAffinity + var err error + switch { + case remoteURL != "": + repos, err = h.store.ListReposByURL(sessionID, remoteURL) + case memberID != "": + repos, err = h.store.ListReposByMember(sessionID, memberID) + default: + c.JSON(http.StatusBadRequest, gin.H{"error": "remoteUrl or memberId query param required"}) + return + } + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to query repos"}) + return + } + c.JSON(http.StatusOK, gin.H{"repos": repos}) +} + +// ─── Progress ────────────────────────────────────────────────────────────── + +// GetProgress godoc +// @Summary Get session progress snapshot +// @Tags team +// @Produce json +// @Param id path string true "Session ID" +// @Success 200 {object} SessionProgress +// @Router /team/sessions/:id/progress [get] +func (h *Handler) GetProgress(c *gin.Context) { + progress, err := h.store.GetProgress(c.Param("id")) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get progress"}) + return + } + c.JSON(http.StatusOK, progress) +} + +// ─── Leader election ─────────────────────────────────────────────────────── + +// ElectLeader godoc +// @Summary Attempt leader election +// @Tags team +// @Accept json +// @Produce json +// @Param id path string true "Session ID" +// @Param body body object{machineId=string} true "Candidate" +// @Success 200 {object} object{elected=bool,fencingToken=integer,leaderId=string} +// @Router /team/sessions/:id/leader/elect [post] +func (h *Handler) ElectLeader(c *gin.Context) { + sessionID := c.Param("id") + var req struct { + MachineID string `json:"machineId" binding:"required"` + Repos []string `json:"repos,omitempty"` + HeartbeatSuccessRate float64 `json:"heartbeatSuccessRate,omitempty"` + CPUIdlePercent float64 `json:"cpuIdlePercent,omitempty"` + MemoryFreeMB float64 `json:"memoryFreeMB,omitempty"` + RTTMs float64 `json:"rttMs,omitempty"` + } + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "machineId is required"}) + return + } + + // Persist capability data regardless of election outcome + m, _ := h.store.GetMemberByMachine(sessionID, req.MachineID) + if m != nil { + caps := LeaderCapability{ + MachineID: req.MachineID, + RepoURLs: req.Repos, + HeartbeatSuccessRate: req.HeartbeatSuccessRate, + CPUIdlePercent: req.CPUIdlePercent, + MemoryFreeMB: req.MemoryFreeMB, + RTTMs: req.RTTMs, + } + h.store.UpdateMemberCapabilities(m.ID, caps) //nolint:errcheck + } + + token, elected := h.hub.TryAcquireLeader(sessionID, req.MachineID) + + // Compute leader score for the candidate + var score *LeaderScore + if elected { + // Fetch session target repos for scoring context + targetRepos, _ := h.store.GetSessionTargetRepos(sessionID) + s := ScoreLeaderCandidate(LeaderCapability{ + MachineID: req.MachineID, + RepoURLs: req.Repos, + HeartbeatSuccessRate: req.HeartbeatSuccessRate, + CPUIdlePercent: req.CPUIdlePercent, + MemoryFreeMB: req.MemoryFreeMB, + RTTMs: req.RTTMs, + }, targetRepos) + score = &s + + // Update member role and persist leader info to DB + h.store.UpdateSession(sessionID, map[string]any{ //nolint:errcheck + "leader_machine_id": req.MachineID, + "fencing_token": token, + }) + if m != nil { + h.store.UpdateMember(m.ID, map[string]any{"role": MemberRoleLeader}) //nolint:errcheck + } + broadcastPayload := map[string]any{ + "leaderId": req.MachineID, + "fencingToken": token, + "score": score, + } + h.hub.Broadcast(sessionID, newEvent(EventLeaderElected, sessionID, broadcastPayload)) + + // Reconcile orphaned tasks from previous leader crash + go h.reconcileTasksOnLeaderChange(sessionID) + + // Send session snapshot to new leader + snapshot := h.buildSessionSnapshot(sessionID) + h.hub.SendToMachine(sessionID, req.MachineID, newEvent("leader.snapshot", sessionID, snapshot)) + } + + resp := gin.H{ + "elected": elected, + "fencingToken": token, + "leaderId": h.hub.GetLeaderMachineID(sessionID), + } + if score != nil { + resp["score"] = score + } + c.JSON(http.StatusOK, resp) +} + +// LeaderHeartbeat godoc +// @Summary Leader lock renewal heartbeat +// @Tags team +// @Accept json +// @Produce json +// @Param id path string true "Session ID" +// @Param body body object{machineId=string} true "Leader identity" +// @Success 200 {object} object{renewed=bool} +// @Router /team/sessions/:id/leader/heartbeat [post] +func (h *Handler) LeaderHeartbeat(c *gin.Context) { + sessionID := c.Param("id") + var req struct { + MachineID string `json:"machineId" binding:"required"` + Repos []string `json:"repos,omitempty"` + HeartbeatSuccessRate float64 `json:"heartbeatSuccessRate,omitempty"` + CPUIdlePercent float64 `json:"cpuIdlePercent,omitempty"` + MemoryFreeMB float64 `json:"memoryFreeMB,omitempty"` + RTTMs float64 `json:"rttMs,omitempty"` + } + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "machineId is required"}) + return + } + + // Persist capability updates from the leader + m, _ := h.store.GetMemberByMachine(sessionID, req.MachineID) + if m != nil { + caps := LeaderCapability{ + MachineID: req.MachineID, + RepoURLs: req.Repos, + HeartbeatSuccessRate: req.HeartbeatSuccessRate, + CPUIdlePercent: req.CPUIdlePercent, + MemoryFreeMB: req.MemoryFreeMB, + RTTMs: req.RTTMs, + } + h.store.UpdateMemberCapabilities(m.ID, caps) //nolint:errcheck + } + + renewed := h.hub.RenewLeader(sessionID, req.MachineID) + if !renewed { + // Lock expired: broadcast leader expiry so teammates re-elect + h.hub.Broadcast(sessionID, newEvent(EventLeaderExpired, sessionID, map[string]any{ + "expiredLeaderId": req.MachineID, + })) + } + c.JSON(http.StatusOK, gin.H{"renewed": renewed}) +} + +// GetLeader godoc +// @Summary Get current leader info +// @Tags team +// @Produce json +// @Param id path string true "Session ID" +// @Success 200 {object} object{leaderId=string} +// @Router /team/sessions/:id/leader [get] +func (h *Handler) GetLeader(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{ + "leaderId": h.hub.GetLeaderMachineID(c.Param("id")), + }) +} + +// ─── Remote Explore ──────────────────────────────────────────────────────── + +// Explore godoc +// @Summary Synchronous remote code explore (Leader → Teammate) +// @Description Leader sends explore queries targeting a specific Teammate machine. +// +// The cloud server forwards the request via WebSocket, waits up to 30 s +// for the result, and returns it synchronously. +// +// @Tags team +// @Accept json +// @Produce json +// @Param id path string true "Session ID" +// @Param body body object{targetMachineId=string,queries=[]object} true "Explore request" +// @Success 200 {object} object{result=object} +// @Failure 504 {object} object{error=string} +// @Router /team/sessions/:id/explore [post] +func (h *Handler) Explore(c *gin.Context) { + sessionID := c.Param("id") + + var req struct { + TargetMachineID string `json:"targetMachineId" binding:"required"` + Queries []any `json:"queries"` + FencingToken int64 `json:"fencingToken,omitempty"` + } + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "targetMachineId is required"}) + return + } + + // Only the current leader may issue explore requests + if req.FencingToken != 0 { + if !h.hub.ValidateFencingToken(sessionID, req.FencingToken) { + c.JSON(http.StatusConflict, gin.H{"error": "stale leader: fencing token rejected"}) + return + } + } + + requestID := uuid.New().String() + + // Register a channel to receive the explore result + resultCh := h.hub.RegisterExplore(requestID) + defer h.hub.CancelExplore(requestID) + + // Forward the explore.request to the target machine via WS (or backlog if offline) + evt := newEvent(EventExploreRequest, sessionID, map[string]any{ + "requestId": requestID, + "targetMachineId": req.TargetMachineID, + "fromMachineId": h.hub.GetLeaderMachineID(sessionID), + "queries": req.Queries, + }) + h.hub.SendToMachine(sessionID, req.TargetMachineID, evt) + + // Wait up to 30 s for the explore.result + ctx, cancel := context.WithTimeout(c.Request.Context(), 30*time.Second) + defer cancel() + + select { + case result := <-resultCh: + c.JSON(http.StatusOK, gin.H{"result": result.Payload}) + case <-ctx.Done(): + c.JSON(http.StatusGatewayTimeout, gin.H{"error": "explore request timed out (target machine did not respond within 30s)"}) + } +} + +// ─── WebSocket ───────────────────────────────────────────────────────────── + +// ServeWS handles the WebSocket upgrade and event loop. +// WS /ws/sessions/:id?token=&machineId=&userId=&lastEventId= +func (h *Handler) ServeWS(c *gin.Context) { + sessionID := c.Param("id") + machineID := c.Query("machineId") + userID := c.Query("userId") + if userID == "" { + userID = c.GetString("userId") // set by auth middleware if JWT present + } + + if machineID == "" { + c.JSON(http.StatusBadRequest, gin.H{"error": "machineId query param required"}) + return + } + + // Verify session exists + sess, err := h.store.GetSession(sessionID) + if err != nil || sess == nil { + c.JSON(http.StatusNotFound, gin.H{"error": "session not found"}) + return + } + + conn, err := upgrader.Upgrade(c.Writer, c.Request, nil) + if err != nil { + return + } + + wsConn := &WSConnection{ + ID: uuid.New().String(), + UserID: userID, + MachineID: machineID, + SessionID: sessionID, + Conn: conn, + Send: make(chan []byte, DefaultSendChannelCapacity), + Done: make(chan struct{}), + LastActivity: time.Now().UnixMilli(), + } + + h.hub.Register(wsConn) + h.hub.MarkPresence(sessionID, machineID) + + // Update member status to online + if m, _ := h.store.GetMemberByMachine(sessionID, machineID); m != nil { + h.store.UpdateMember(m.ID, map[string]any{ //nolint:errcheck + "status": MemberStatusOnline, + "last_heartbeat": time.Now(), + }) + } + + // Drain any queued backlog for this machine + backlog := h.hub.DrainBacklog(sessionID, machineID) + for _, evt := range backlog { + if data, err := jsonMarshal(evt); err == nil { + wsConn.Send <- data + } + } + + // Replay events since lastEventId (for reconnection catch-up) + lastEventID := c.Query("lastEventId") + if lastEventID != "" { + replayed := h.hub.ReplayEvents(sessionID, lastEventID) + for _, evt := range replayed { + if data, err := jsonMarshal(evt); err == nil { + select { + case wsConn.Send <- data: + default: + } + } + } + } + + // Notify session peers that this machine came online + h.hub.Broadcast(sessionID, newEvent(EventTeammateStatus, sessionID, map[string]any{ + "machineId": machineID, + "status": MemberStatusOnline, + })) + + go h.wsWritePump(wsConn) + h.wsReadPump(wsConn) // blocks until connection closes +} + +// wsWritePump serialises outbound messages from the Send channel to the wire. +func (h *Handler) wsWritePump(conn *WSConnection) { + ticker := time.NewTicker(WSPingIntervalSec * time.Second) + defer func() { + ticker.Stop() + conn.Conn.Close() + }() + + writeDeadline := func() { + conn.Conn.SetWriteDeadline(time.Now().Add(WSWriteWaitSec * time.Second)) //nolint:errcheck + } + + for { + select { + case msg, ok := <-conn.Send: + writeDeadline() + if !ok { + conn.Conn.WriteMessage(websocket.CloseMessage, []byte{}) //nolint:errcheck + return + } + if err := conn.Conn.WriteMessage(websocket.TextMessage, msg); err != nil { + return + } + + case <-ticker.C: + writeDeadline() + if err := conn.Conn.WriteMessage(websocket.PingMessage, nil); err != nil { + return + } + + case <-conn.Done: + return + } + } +} + +// wsReadPump reads inbound events from the client and dispatches them. +func (h *Handler) wsReadPump(conn *WSConnection) { + defer func() { + h.hub.Unregister(conn) + close(conn.Done) + conn.Conn.Close() + + // Mark member offline + if m, _ := h.store.GetMemberByMachine(conn.SessionID, conn.MachineID); m != nil { + h.store.UpdateMember(m.ID, map[string]any{"status": MemberStatusOffline}) //nolint:errcheck + } + + // Interrupt tasks currently running on this machine + h.interruptTasksForMember(conn.SessionID, conn.MachineID) + + // Release leader lock if this machine was the leader + h.hub.ReleaseLeader(conn.SessionID, conn.MachineID) + + // Broadcast offline status + h.hub.Broadcast(conn.SessionID, newEvent(EventTeammateStatus, conn.SessionID, map[string]any{ + "machineId": conn.MachineID, + "status": MemberStatusOffline, + })) + }() + + conn.Conn.SetReadDeadline(time.Now().Add(WSPongWaitSec * time.Second)) //nolint:errcheck + conn.Conn.SetPongHandler(func(string) error { + conn.Conn.SetReadDeadline(time.Now().Add(WSPongWaitSec * time.Second)) //nolint:errcheck + conn.LastActivity = time.Now().UnixMilli() + h.hub.MarkPresence(conn.SessionID, conn.MachineID) + return nil + }) + + for { + _, raw, err := conn.Conn.ReadMessage() + if err != nil { + return + } + conn.LastActivity = time.Now().UnixMilli() + + var evt CloudEvent + if err := json.Unmarshal(raw, &evt); err != nil { + continue + } + h.dispatchClientEvent(conn, evt) + } +} + +// dispatchClientEvent handles inbound CloudEvents from a connected machine. +func (h *Handler) dispatchClientEvent(conn *WSConnection, evt CloudEvent) { + switch evt.Type { + + case EventSessionCreate: + // Client creates a session over the WS connection. + // The session UUID comes from the WS URL (:id). If the session doesn't + // exist yet the server creates it and the connecting machine becomes leader. + name, _ := evt.Payload["name"].(string) + if name == "" { + name = "Session " + conn.SessionID[:8] + } + existing, _ := h.store.GetSession(conn.SessionID) + if existing == nil { + sess := &TeamSession{ + ID: conn.SessionID, + Name: name, + CreatorID: conn.UserID, + Status: SessionStatusActive, + LeaderMachineID: conn.MachineID, + } + h.store.CreateSession(sess) //nolint:errcheck + } + // Attempt leader election for the creator + token, elected := h.hub.TryAcquireLeader(conn.SessionID, conn.MachineID) + if elected { + h.store.UpdateSession(conn.SessionID, map[string]any{ //nolint:errcheck + "leader_machine_id": conn.MachineID, + "fencing_token": token, + }) + h.hub.Broadcast(conn.SessionID, newEvent(EventLeaderElected, conn.SessionID, map[string]any{ + "leaderId": conn.MachineID, + "fencingToken": token, + })) + } + + case EventSessionJoin: + // Upsert the member record for this machine (idempotent reconnect support). + machineName, _ := evt.Payload["machineName"].(string) + h.ensureSessionMember(conn.SessionID, conn.UserID, conn.MachineID, machineName) //nolint:errcheck + h.assignPendingTasksIfPossible(conn.SessionID) + h.hub.Broadcast(conn.SessionID, newEvent(EventTeammateStatus, conn.SessionID, map[string]any{ + "machineId": conn.MachineID, + "status": MemberStatusOnline, + })) + + case EventTaskPlanSubmit: + // Leader submits a task plan over WS. Mirrors the REST SubmitTaskPlan logic. + fencingTokenF, _ := evt.Payload["fencingToken"].(float64) + if !h.hub.ValidateFencingToken(conn.SessionID, int64(fencingTokenF)) { + h.hub.Send(conn.ID, newEvent(EventError, conn.SessionID, map[string]any{ + "message": "stale leader: fencing token rejected", + })) + return + } + tasksRaw, ok := evt.Payload["tasks"].([]any) + if !ok || len(tasksRaw) == 0 { + return + } + var tasks []TeamTask + for _, tr := range tasksRaw { + taskData, _ := json.Marshal(tr) + var t TeamTask + if json.Unmarshal(taskData, &t) == nil { + if t.ID == "" { + t.ID = uuid.New().String() + } + t.SessionID = conn.SessionID + t.Status = TaskStatusPending + if t.Priority == 0 { + t.Priority = 5 + } + if t.MaxRetries == 0 { + t.MaxRetries = 3 + } + tasks = append(tasks, t) + } + } + if len(tasks) == 0 { + return + } + // Auto-assign tasks via P1-P5 repo affinity scheduling + schedCtx, schedErr := h.buildSchedulingContext(conn.SessionID) + if schedErr == nil { + tasks = ScheduleTasks(*schedCtx, tasks) + } + + if err := h.store.CreateTasks(tasks); err != nil { + return + } + for _, task := range tasks { + if task.AssignedMemberID == nil { + continue + } + member, _ := h.store.GetMember(*task.AssignedMemberID) + if member == nil { + continue + } + h.dispatchAssignedTaskToMachine(conn.SessionID, member, task) + } + + case EventLeaderHeartbeat: + // Parse optional capability updates from heartbeat event + var hbcaps LeaderCapability + hbcaps.MachineID = conn.MachineID + if repos, ok := evt.Payload["repos"].([]any); ok { + for _, r := range repos { + if s, ok := r.(string); ok { + hbcaps.RepoURLs = append(hbcaps.RepoURLs, s) + } + } + } + if v, ok := evt.Payload["heartbeatSuccessRate"].(float64); ok { + hbcaps.HeartbeatSuccessRate = v + } + if v, ok := evt.Payload["cpuIdlePercent"].(float64); ok { + hbcaps.CPUIdlePercent = v + } + if v, ok := evt.Payload["memoryFreeMB"].(float64); ok { + hbcaps.MemoryFreeMB = v + } + if v, ok := evt.Payload["rttMs"].(float64); ok { + hbcaps.RTTMs = v + } + if len(hbcaps.RepoURLs) > 0 || hbcaps.HeartbeatSuccessRate > 0 || hbcaps.CPUIdlePercent > 0 || hbcaps.MemoryFreeMB > 0 || hbcaps.RTTMs > 0 { + m, _ := h.store.GetMemberByMachine(conn.SessionID, conn.MachineID) + if m != nil { + h.store.UpdateMemberCapabilities(m.ID, hbcaps) //nolint:errcheck + } + } + h.hub.RenewLeader(conn.SessionID, conn.MachineID) + h.hub.MarkPresence(conn.SessionID, conn.MachineID) + + case EventTaskProgress: + taskID, _ := evt.Payload["taskId"].(string) + if taskID != "" { + h.store.UpdateTask(taskID, map[string]any{"status": TaskStatusRunning}) //nolint:errcheck + } + // Relay progress to all session members (so leader's dashboard updates) + h.hub.Broadcast(conn.SessionID, evt) + + case EventTaskComplete: + taskID, _ := evt.Payload["taskId"].(string) + if taskID != "" { + now := time.Now() + result, _ := json.Marshal(evt.Payload["result"]) + h.store.UpdateTask(taskID, map[string]any{ //nolint:errcheck + "status": TaskStatusCompleted, + "completed_at": now, + "result": result, + }) + h.notifyUnlockedTasks(conn.SessionID, taskID) + } + h.hub.Broadcast(conn.SessionID, evt) + + case EventTaskFail: + taskID, _ := evt.Payload["taskId"].(string) + errMsg, _ := evt.Payload["errorMessage"].(string) + if taskID != "" { + retried, err := h.store.RetryTask(taskID) + if err == nil && retried != nil { + // Re-dispatch to the same assigned member + if retried.AssignedMemberID != nil { + member, _ := h.store.GetMember(*retried.AssignedMemberID) + if member != nil { + h.dispatchAssignedTaskToMachine(conn.SessionID, member, *retried) + } + } + } else { + h.store.UpdateTask(taskID, map[string]any{ //nolint:errcheck + "status": TaskStatusFailed, + "error_message": errMsg, + }) + } + } + h.hub.Broadcast(conn.SessionID, evt) + + case EventTaskClaim: + taskID, _ := evt.Payload["taskId"].(string) + if taskID != "" { + member, _ := h.store.GetMemberByMachine(conn.SessionID, conn.MachineID) + if member != nil { + h.store.ClaimTask(taskID, member.ID) //nolint:errcheck + } + } + + case EventApprovalRequest: + // Resolve the requesting member — RequesterID must be a TeamSessionMember UUID. + requester, _ := h.store.GetMemberByMachine(conn.SessionID, conn.MachineID) + if requester == nil { + return // machine not registered as a session member; ignore + } + // Persist approval and push to leader + approval := &TeamApprovalRequest{ + ID: uuid.New().String(), + SessionID: conn.SessionID, + RequesterID: requester.ID, + } + if toolName, ok := evt.Payload["toolName"].(string); ok { + approval.ToolName = toolName + } + if desc, ok := evt.Payload["description"].(string); ok { + approval.Description = desc + } + if risk, ok := evt.Payload["riskLevel"].(string); ok { + approval.RiskLevel = risk + } else { + approval.RiskLevel = "medium" + } + if inp, ok := evt.Payload["toolInput"]; ok { + data, _ := json.Marshal(inp) + approval.ToolInput = data + } + h.store.CreateApproval(approval) //nolint:errcheck + + // Forward to the session's leader machine + sess, _ := h.store.GetSession(conn.SessionID) + if sess != nil && sess.LeaderMachineID != "" { + fwd := newEvent(EventApprovalPush, conn.SessionID, map[string]any{ + "approval": approval, + }) + h.hub.SendToMachine(conn.SessionID, sess.LeaderMachineID, fwd) + } + + case EventApprovalRespond: + approvalID, _ := evt.Payload["approvalId"].(string) + status, _ := evt.Payload["status"].(string) + feedback, _ := evt.Payload["feedback"].(string) + if approvalID != "" && status != "" { + now := time.Now() + h.store.UpdateApproval(approvalID, map[string]any{ //nolint:errcheck + "status": status, + "feedback": feedback, + "resolved_at": now, + }) + approval, _ := h.store.GetApproval(approvalID) + if approval != nil { + member, _ := h.store.GetMember(approval.RequesterID) + if member != nil { + h.hub.SendToMachine(conn.SessionID, member.MachineID, + newEvent(EventApprovalResponse, conn.SessionID, map[string]any{ + "approvalId": approvalID, + "status": status, + "feedback": feedback, + })) + } + } + } + + case EventMessageSend: + to, _ := evt.Payload["to"].(string) + if to == "broadcast" { + h.hub.Broadcast(conn.SessionID, newEvent(EventMessageReceive, conn.SessionID, evt.Payload)) + } else if to != "" { + h.hub.SendToMachine(conn.SessionID, to, newEvent(EventMessageReceive, conn.SessionID, evt.Payload)) + } + + case EventRepoRegister: + repoURL, _ := evt.Payload["repoRemoteUrl"].(string) + if repoURL == "" { + return + } + member, _ := h.store.GetMemberByMachine(conn.SessionID, conn.MachineID) + if member == nil { + return + } + affinity := &TeamRepoAffinity{ + ID: uuid.New().String(), + SessionID: conn.SessionID, + MemberID: member.ID, + RepoRemoteURL: repoURL, + LastSyncedAt: time.Now(), + } + if path, ok := evt.Payload["repoLocalPath"].(string); ok { + affinity.RepoLocalPath = path + } + if branch, ok := evt.Payload["currentBranch"].(string); ok { + affinity.CurrentBranch = branch + } + if dirty, ok := evt.Payload["hasUncommittedChanges"].(bool); ok { + affinity.HasUncommittedChanges = dirty + } + h.store.UpsertRepoAffinity(affinity) //nolint:errcheck + + case EventExploreRequest: + // Route explore request to the target machine + targetMachineID, _ := evt.Payload["targetMachineId"].(string) + if targetMachineID != "" { + h.hub.SendToMachine(conn.SessionID, targetMachineID, evt) + } + + case EventExploreResult: + requestID, _ := evt.Payload["requestId"].(string) + // If a synchronous HTTP explore call is waiting, deliver to it. + if requestID != "" { + h.hub.DeliverExplore(requestID, evt) + } + // Also route back to the leader machine via WS (for streaming dashboards). + fromMachineID, _ := evt.Payload["fromMachineId"].(string) + if fromMachineID != "" { + h.hub.SendToMachine(conn.SessionID, fromMachineID, evt) + } + + case EventDecomposeResult: + requestID, _ := evt.Payload["requestId"].(string) + // If a synchronous HTTP decompose call is waiting, deliver to it. + if requestID != "" { + h.hub.DeliverDecompose(requestID, evt) + } + + case EventLeaderElect: + // Parse capability data from WS event (backward compatible) + var caps LeaderCapability + caps.MachineID = conn.MachineID + if repos, ok := evt.Payload["repos"].([]any); ok { + for _, r := range repos { + if s, ok := r.(string); ok { + caps.RepoURLs = append(caps.RepoURLs, s) + } + } + } + if v, ok := evt.Payload["heartbeatSuccessRate"].(float64); ok { + caps.HeartbeatSuccessRate = v + } + if v, ok := evt.Payload["cpuIdlePercent"].(float64); ok { + caps.CPUIdlePercent = v + } + if v, ok := evt.Payload["memoryFreeMB"].(float64); ok { + caps.MemoryFreeMB = v + } + if v, ok := evt.Payload["rttMs"].(float64); ok { + caps.RTTMs = v + } + + // Persist capabilities regardless of election outcome + m, _ := h.store.GetMemberByMachine(conn.SessionID, conn.MachineID) + if m != nil { + h.store.UpdateMemberCapabilities(m.ID, caps) //nolint:errcheck + } + + token, elected := h.hub.TryAcquireLeader(conn.SessionID, conn.MachineID) + if elected { + targetRepos, _ := h.store.GetSessionTargetRepos(conn.SessionID) + score := ScoreLeaderCandidate(caps, targetRepos) + + h.store.UpdateSession(conn.SessionID, map[string]any{ //nolint:errcheck + "leader_machine_id": conn.MachineID, + "fencing_token": token, + }) + if m != nil { + h.store.UpdateMember(m.ID, map[string]any{"role": MemberRoleLeader}) //nolint:errcheck + } + h.hub.Broadcast(conn.SessionID, newEvent(EventLeaderElected, conn.SessionID, map[string]any{ + "leaderId": conn.MachineID, + "fencingToken": token, + "score": score, + })) + } + } +} + +// ─── Helpers ─────────────────────────────────────────────────────────────── + +// notifyUnlockedTasks unlocks dependent tasks whose all dependencies are now +// completed and pushes task.assigned events to their assigned machines. +func (h *Handler) notifyUnlockedTasks(sessionID, completedTaskID string) { + unlocked, err := h.store.UnlockDependentTasks(sessionID, completedTaskID) + if err != nil || len(unlocked) == 0 { + return + } + for _, t := range unlocked { + if t.AssignedMemberID == nil { + continue + } + member, _ := h.store.GetMember(*t.AssignedMemberID) + if member == nil { + continue + } + h.dispatchAssignedTaskToMachine(sessionID, member, t) + } +} + +// interruptTasksForMember finds all running tasks assigned to a member +// whose machineID matches, transitions them to "interrupted", and broadcasts +// the change so the leader and other teammates see it immediately. +func (h *Handler) interruptTasksForMember(sessionID, machineID string) { + member, _ := h.store.GetMemberByMachine(sessionID, machineID) + if member == nil { + return + } + tasks, err := h.store.ListTasks(sessionID) + if err != nil { + return + } + for _, t := range tasks { + if t.AssignedMemberID == nil || *t.AssignedMemberID != member.ID { + continue + } + if t.Status != TaskStatusRunning && t.Status != TaskStatusClaimed && t.Status != TaskStatusAssigned { + continue + } + h.store.UpdateTask(t.ID, map[string]any{ //nolint:errcheck + "status": TaskStatusInterrupted, + }) + h.hub.Broadcast(sessionID, newEvent(EventTaskInterrupted, sessionID, map[string]any{ + "taskId": t.ID, + "machineId": machineID, + })) + } +} + +// ─── Auto-Explore for Context ────────────────────────────────────────────── + +// autoExploreForContext runs lightweight file_tree explore queries against +// online teammates that hold repos referenced in this session. Results are +// aggregated and returned as context for task decomposition. +func (h *Handler) autoExploreForContext(sessionID, _ string) map[string]any { + affinities, err := h.store.ListAllRepoAffinities(sessionID) + if err != nil || len(affinities) == 0 { + return nil + } + + members, err := h.store.ListMembers(sessionID) + if err != nil || len(members) == 0 { + return nil + } + + // Build repoURL → first online teammate machineID mapping + repoToMachine := make(map[string]string) + for _, a := range affinities { + if _, exists := repoToMachine[a.RepoRemoteURL]; exists { + continue + } + for _, m := range members { + if m.MachineID != "" && h.hub.IsMachineOnline(sessionID, m.MachineID) { + repoToMachine[a.RepoRemoteURL] = m.MachineID + break + } + } + } + + if len(repoToMachine) == 0 { + return nil + } + + type exploreResult struct { + repoURL string + data map[string]any + } + resultCh := make(chan exploreResult, len(repoToMachine)) + + for repoURL, machineID := range repoToMachine { + go func(rURL, mID string) { + requestID := uuid.New().String() + ch := h.hub.RegisterExplore(requestID) + defer h.hub.CancelExplore(requestID) + + evt := newEvent(EventExploreRequest, sessionID, map[string]any{ + "requestId": requestID, + "targetMachineId": mID, + "fromMachineId": h.hub.GetLeaderMachineID(sessionID), + "queries": []map[string]any{ + {"type": "file_tree", "params": map[string]any{}}, + }, + }) + h.hub.SendToMachine(sessionID, mID, evt) + + select { + case result := <-ch: + resultCh <- exploreResult{repoURL: rURL, data: result.Payload} + case <-time.After(10 * time.Second): + resultCh <- exploreResult{repoURL: rURL, data: nil} + } + }(repoURL, machineID) + } + + results := make([]map[string]any, 0, len(repoToMachine)) + for i := 0; i < len(repoToMachine); i++ { + r := <-resultCh + if r.data != nil { + results = append(results, map[string]any{ + "repoRemoteUrl": r.repoURL, + "result": r.data, + }) + } + } + + if len(results) == 0 { + return nil + } + return map[string]any{"exploreResults": results} +} + +// ─── Orchestrate ─────────────────────────────────────────────────────────── + +// OrchestrateTask godoc +// @Summary Orchestrate explore→decompose→schedule pipeline +// @Tags team +// @Accept json +// @Produce json +// @Param id path string true "Session ID" +// @Param body body object{prompt=string,fencingToken=integer} true "Orchestrate request" +// @Success 201 {object} object{tasks=[]TeamTask,dryRun=bool} +// @Router /team/sessions/:id/orchestrate [post] +func (h *Handler) OrchestrateTask(c *gin.Context) { + sessionID := c.Param("id") + sess, err := h.store.GetSession(sessionID) + if err != nil || sess == nil { + c.JSON(http.StatusNotFound, gin.H{"error": "session not found"}) + return + } + + var req struct { + Prompt string `json:"prompt" binding:"required"` + FencingToken int64 `json:"fencingToken"` + } + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "prompt is required"}) + return + } + + if req.FencingToken != 0 { + if !h.hub.ValidateFencingToken(sessionID, req.FencingToken) { + c.JSON(http.StatusConflict, gin.H{"error": "stale leader: fencing token rejected"}) + return + } + } + + // Phase 1: Auto-explore + h.hub.Broadcast(sessionID, newEvent(EventOrchestrateProgress, sessionID, map[string]any{ + "phase": "exploring", "message": "Exploring codebases...", + })) + exploreCtx := h.autoExploreForContext(sessionID, req.Prompt) + + // Phase 2: Decompose + h.hub.Broadcast(sessionID, newEvent(EventOrchestrateProgress, sessionID, map[string]any{ + "phase": "decomposing", "message": "Decomposing into tasks...", + })) + + + // Try to delegate decomposition to an online teammate + _, targetMachineID, pickErr := pickDecomposeTarget(h.hub, h.store, sessionID) + leaderMachineID := h.hub.GetLeaderMachineID(sessionID) + if leaderMachineID == "" { + leaderMachineID = sess.LeaderMachineID + } + + // If no teammate or only leader available, use fallback + if pickErr != nil || (leaderMachineID != "" && targetMachineID == leaderMachineID) { + tasks := buildFallbackTasks(req.Prompt, sessionID) + schedCtx, schedErr := h.buildSchedulingContext(sessionID) + if schedErr == nil { + tasks = ScheduleTasks(*schedCtx, tasks) + } + c.JSON(http.StatusCreated, gin.H{ + "tasks": tasks, + "dryRun": true, + "context": exploreCtx, + }) + return + } + + // Send decompose.request to teammate + requestID := uuid.New().String() + resultCh := h.hub.RegisterDecompose(requestID) + defer h.hub.CancelDecompose(requestID) + + evt := newEvent(EventDecomposeRequest, sessionID, map[string]any{ + "requestId": requestID, + "prompt": req.Prompt, + "context": exploreCtx, + }) + h.hub.SendToMachine(sessionID, targetMachineID, evt) + + ctx, cancel := context.WithTimeout(c.Request.Context(), 60*time.Second) + defer cancel() + + select { + case result := <-resultCh: + itemsRaw, ok := result.Payload["tasks"].([]any) + if !ok || len(itemsRaw) == 0 { + tasks := buildFallbackTasks(req.Prompt, sessionID) + schedCtx, _ := h.buildSchedulingContext(sessionID) + if schedCtx != nil { + tasks = ScheduleTasks(*schedCtx, tasks) + } + c.JSON(http.StatusCreated, gin.H{"tasks": tasks, "dryRun": true, "context": exploreCtx}) + return + } + + var tasks []TeamTask + for _, raw := range itemsRaw { + data, _ := json.Marshal(raw) + var item DecomposeResultItem + if json.Unmarshal(data, &item) != nil || item.Description == "" { + continue + } + tasks = append(tasks, item.toTeamTask(sessionID)) + } + if len(tasks) == 0 { + tasks = buildFallbackTasks(req.Prompt, sessionID) + } + + schedCtx, _ := h.buildSchedulingContext(sessionID) + if schedCtx != nil { + tasks = ScheduleTasks(*schedCtx, tasks) + } + + c.JSON(http.StatusCreated, gin.H{"tasks": tasks, "dryRun": true, "context": exploreCtx}) + + case <-ctx.Done(): + tasks := buildFallbackTasks(req.Prompt, sessionID) + schedCtx, _ := h.buildSchedulingContext(sessionID) + if schedCtx != nil { + tasks = ScheduleTasks(*schedCtx, tasks) + } + c.JSON(http.StatusCreated, gin.H{ + "tasks": tasks, + "dryRun": true, + "context": exploreCtx, + "degraded": true, + "reason": "decompose_timeout", + "message": "decompose request timed out; used fallback single-task decomposition", + }) + } +} + +// buildSchedulingContext assembles the data needed by ScheduleTasks from the +// current session state. It filters members to only those with active WS +// connections (Hub.IsMachineOnline is the source of truth). +func (h *Handler) buildSchedulingContext(sessionID string) (*SchedulingContext, error) { + allMembers, err := h.store.ListMembers(sessionID) + if err != nil { + return nil, err + } + // Keep only members whose machine has a live WS connection + var onlineMembers []TeamSessionMember + for _, m := range allMembers { + if h.hub.IsMachineOnline(sessionID, m.MachineID) { + onlineMembers = append(onlineMembers, m) + } + } + + affinities, err := h.store.ListAllRepoAffinities(sessionID) + if err != nil { + return nil, err + } + + running, err := h.store.GetRunningAssignedTasks(sessionID) + if err != nil { + return nil, err + } + + memberLoad, err := h.store.GetMemberLoadInfo(sessionID) + if err != nil { + return nil, err + } + + return &SchedulingContext{ + Members: onlineMembers, + RepoAffinities: affinities, + RunningTasks: running, + MemberLoad: memberLoad, + }, nil +} + +func newEvent(eventType, sessionID string, payload map[string]any) CloudEvent { + return CloudEvent{ + EventID: uuid.New().String(), + Type: eventType, + SessionID: sessionID, + Timestamp: time.Now().UnixMilli(), + Payload: payload, + } +} + +func jsonMarshal(v any) ([]byte, error) { + return json.Marshal(v) +} + +// notifyAssignedMachines sends task.assigned events to all machines that +// received an assignment in the current batch. +func (h *Handler) notifyAssignedMachines(sessionID string, tasks []TeamTask) { + for _, task := range tasks { + if task.AssignedMemberID == nil { + continue + } + member, _ := h.store.GetMember(*task.AssignedMemberID) + if member == nil { + continue + } + h.dispatchAssignedTaskToMachine(sessionID, member, task) + } +} + +func (h *Handler) dispatchAssignedTaskToMachine(sessionID string, member *TeamSessionMember, task TeamTask) { + if member == nil { + return + } + machineID := member.MachineID + + h.hub.SendToMachine(sessionID, machineID, + newEvent(EventTaskAssigned, sessionID, map[string]any{"task": task})) + + if h.pushAssignedTaskFn == nil || machineID == "" { + return + } + + // Push through the cloud gateway path asynchronously so HTTP/WS handlers + // never block on device reachability. + go func(pushedTask TeamTask, targetMachineID string, targetUserID string) { + if err := h.pushAssignedTaskFn(context.Background(), sessionID, targetMachineID, targetUserID, pushedTask); err != nil { + logger.Warn("[team] push assigned task failed session=%s task=%s machine=%s: %v", sessionID, pushedTask.ID, targetMachineID, err) + } + }(task, machineID, member.UserID) +} + +// ─── Leader Crash Recovery ──────────────────────────────────────────────── + +// reconcileTasksOnLeaderChange checks all in-flight tasks after a new leader +// is elected, resets tasks whose assigned machines are offline, and +// re-dispatches them. +func (h *Handler) reconcileTasksOnLeaderChange(sessionID string) { + tasks, err := h.store.ListTasks(sessionID) + if err != nil || len(tasks) == 0 { + return + } + + reset := false + for _, t := range tasks { + if t.Status != TaskStatusRunning && t.Status != TaskStatusClaimed && t.Status != TaskStatusAssigned { + continue + } + if t.AssignedMemberID == nil { + // No assignee — just reset to pending + h.store.UpdateTask(t.ID, map[string]any{ //nolint:errcheck + "status": TaskStatusPending, + "assigned_member_id": nil, + }) + h.hub.Broadcast(sessionID, newEvent(EventTaskInterrupted, sessionID, map[string]any{ + "taskId": t.ID, + "reason": "leader_change", + })) + reset = true + continue + } + + // Look up the assigned member's machine + member, _ := h.store.GetMember(*t.AssignedMemberID) + if member == nil || member.MachineID == "" { + continue + } + + if !h.hub.IsMachineOnline(sessionID, member.MachineID) { + h.store.UpdateTask(t.ID, map[string]any{ //nolint:errcheck + "status": TaskStatusPending, + "assigned_member_id": nil, + }) + h.hub.Broadcast(sessionID, newEvent(EventTaskInterrupted, sessionID, map[string]any{ + "taskId": t.ID, + "machineId": member.MachineID, + "reason": "leader_change_machine_offline", + })) + reset = true + } + } + + if reset { + h.assignPendingTasksIfPossible(sessionID) + } +} + +// buildSessionSnapshot assembles a full session state snapshot for a newly +// elected leader, including tasks, approvals, teammates, and repos. +func (h *Handler) buildSessionSnapshot(sessionID string) map[string]any { + snapshot := map[string]any{} + + tasks, _ := h.store.ListTasks(sessionID) + if tasks != nil { + snapshot["tasks"] = tasks + } + + approvals, _ := h.store.ListPendingApprovals(sessionID) + if approvals != nil { + snapshot["approvals"] = approvals + } + + members, _ := h.store.ListMembers(sessionID) + if members != nil { + snapshot["teammates"] = members + } + + repos, _ := h.store.ListAllRepoAffinities(sessionID) + if repos != nil { + snapshot["repos"] = repos + } + + snapshot["timestamp"] = time.Now().UnixMilli() + return snapshot +} diff --git a/server/internal/team/hub.go b/server/internal/team/hub.go new file mode 100644 index 0000000..5655d0a --- /dev/null +++ b/server/internal/team/hub.go @@ -0,0 +1,549 @@ +package team + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/redis/go-redis/v9" +) + +// Hub manages all active WebSocket connections for team sessions. +// It handles: +// - Connection registration / removal +// - Message routing (unicast, broadcast within a session) +// - Redis-backed leader lock (distributed election via SET NX + TTL) +// - Fencing token (monotonic INCR in Redis) +// - Offline message backlog (Redis list, TTL = DefaultEventBacklogTTLMin) +// - Synchronous explore channels (request/response pairing for remote explore) +type Hub struct { + mu sync.RWMutex + conns map[string]*WSConnection // connID → conn + + // sessionConns: sessionID → set of connIDs + sessionConns map[string]map[string]struct{} + + // machineConns: machineID → connID (one WS per machine per session) + machineConns map[string]string // "sessionID:machineID" → connID + + redis *redis.Client // nil → operate without Redis + + // exploreMu guards exploreChans + exploreMu sync.Mutex + exploreChans map[string]chan CloudEvent // requestID → result channel + + // decomposeMu guards decomposeChans + decomposeMu sync.Mutex + decomposeChans map[string]chan CloudEvent // requestID → result channel + + // leaderExpiryMu guards leaderExpiredSent + leaderExpiryMu sync.Mutex + leaderExpiredSent map[string]bool // sessionID → already broadcast leader.expired + + stopCh chan struct{} // closed on Close() to stop background goroutines +} + +func NewHub(rc *redis.Client) *Hub { + h := &Hub{ + conns: make(map[string]*WSConnection), + sessionConns: make(map[string]map[string]struct{}), + machineConns: make(map[string]string), + redis: rc, + exploreChans: make(map[string]chan CloudEvent), + decomposeChans: make(map[string]chan CloudEvent), + leaderExpiredSent: make(map[string]bool), + stopCh: make(chan struct{}), + } + if rc != nil { + go h.watchLeaderExpiry() + } + return h +} + +// Close stops background goroutines. Call on server shutdown. +func (h *Hub) Close() { + select { + case <-h.stopCh: + // already closed + default: + close(h.stopCh) + } +} + +// ─── Connection lifecycle ────────────────────────────────────────────────── + +func (h *Hub) Register(conn *WSConnection) { + h.mu.Lock() + defer h.mu.Unlock() + + h.conns[conn.ID] = conn + + if h.sessionConns[conn.SessionID] == nil { + h.sessionConns[conn.SessionID] = make(map[string]struct{}) + } + h.sessionConns[conn.SessionID][conn.ID] = struct{}{} + + key := machineKey(conn.SessionID, conn.MachineID) + h.machineConns[key] = conn.ID +} + +func (h *Hub) Unregister(conn *WSConnection) { + h.mu.Lock() + defer h.mu.Unlock() + + delete(h.conns, conn.ID) + + if subs, ok := h.sessionConns[conn.SessionID]; ok { + delete(subs, conn.ID) + if len(subs) == 0 { + delete(h.sessionConns, conn.SessionID) + } + } + + key := machineKey(conn.SessionID, conn.MachineID) + if h.machineConns[key] == conn.ID { + delete(h.machineConns, key) + } +} + +// ─── Routing ─────────────────────────────────────────────────────────────── + +// Send delivers an event to a single connection by connID. +func (h *Hub) Send(connID string, evt CloudEvent) { + h.mu.RLock() + conn, ok := h.conns[connID] + h.mu.RUnlock() + if !ok { + return + } + h.deliver(conn, evt) +} + +// SendToMachine routes an event to the machine's active WS connection. +// If offline, the event is pushed to the Redis backlog. +func (h *Hub) SendToMachine(sessionID, machineID string, evt CloudEvent) { + h.mu.RLock() + connID, ok := h.machineConns[machineKey(sessionID, machineID)] + var conn *WSConnection + if ok { + conn = h.conns[connID] + } + h.mu.RUnlock() + + if conn != nil { + h.deliver(conn, evt) + return + } + // Machine offline — persist to backlog + h.appendBacklog(sessionID, machineID, evt) +} + +// Broadcast sends an event to every connection in the session. +func (h *Hub) Broadcast(sessionID string, evt CloudEvent) { + h.mu.RLock() + subs := h.sessionConns[sessionID] + targets := make([]*WSConnection, 0, len(subs)) + for cid := range subs { + if c, ok := h.conns[cid]; ok { + targets = append(targets, c) + } + } + h.mu.RUnlock() + + for _, c := range targets { + h.deliver(c, evt) + } +} + +// DrainBacklog pushes all queued events to a reconnected machine. +func (h *Hub) DrainBacklog(sessionID, machineID string) []CloudEvent { + if h.redis == nil { + return nil + } + ctx := context.Background() + key := fmt.Sprintf(redisKeyBacklog, sessionID, machineID) + + data, err := h.redis.LRange(ctx, key, 0, -1).Result() + if err != nil || len(data) == 0 { + return nil + } + h.redis.Del(ctx, key) + + events := make([]CloudEvent, 0, len(data)) + for _, raw := range data { + var evt CloudEvent + if json.Unmarshal([]byte(raw), &evt) == nil { + events = append(events, evt) + } + } + return events +} + +// ─── Leader election (Redis) ─────────────────────────────────────────────── + +// TryAcquireLeader atomically tries to set the leader lock for the session. +// Returns (fencingToken, true) on success, (0, false) if another leader holds it. +func (h *Hub) TryAcquireLeader(sessionID, machineID string) (int64, bool) { + if h.redis == nil { + // No Redis: first caller always wins (single-node mode) + token, _ := h.incrFencingToken(sessionID) + h.ResetLeaderExpiredSent(sessionID) + return token, true + } + ctx := context.Background() + lockKey := fmt.Sprintf(redisKeyLeaderLock, sessionID) + ttl := time.Duration(DefaultLeaderLockTTLSec) * time.Second + + res, err := h.redis.SetArgs(ctx, lockKey, machineID, redis.SetArgs{TTL: ttl, Mode: "NX"}).Result() + if err != nil || res != "OK" { + return 0, false + } + token, err := h.incrFencingToken(sessionID) + if err != nil { + return 0, false + } + h.ResetLeaderExpiredSent(sessionID) + return token, true +} + +// RenewLeader refreshes the leader lock TTL. Returns false if the lock is gone. +func (h *Hub) RenewLeader(sessionID, machineID string) bool { + if h.redis == nil { + return true + } + ctx := context.Background() + lockKey := fmt.Sprintf(redisKeyLeaderLock, sessionID) + + current, err := h.redis.Get(ctx, lockKey).Result() + if err != nil || current != machineID { + return false + } + ttl := time.Duration(DefaultLeaderLockTTLSec) * time.Second + h.redis.Expire(ctx, lockKey, ttl) + return true +} + +// ReleaseLeader deletes the leader lock if this machine still owns it. +func (h *Hub) ReleaseLeader(sessionID, machineID string) { + if h.redis == nil { + return + } + ctx := context.Background() + lockKey := fmt.Sprintf(redisKeyLeaderLock, sessionID) + + current, err := h.redis.Get(ctx, lockKey).Result() + if err == nil && current == machineID { + h.redis.Del(ctx, lockKey) + } +} + +// GetLeaderMachineID returns the current leader's machineID, or "" if none. +func (h *Hub) GetLeaderMachineID(sessionID string) string { + if h.redis == nil { + return "" + } + ctx := context.Background() + lockKey := fmt.Sprintf(redisKeyLeaderLock, sessionID) + val, err := h.redis.Get(ctx, lockKey).Result() + if err != nil { + return "" + } + return val +} + +// ValidateFencingToken checks that the provided token matches the current +// session token (rejects stale leader writes). +func (h *Hub) ValidateFencingToken(sessionID string, token int64) bool { + if h.redis == nil { + return true + } + ctx := context.Background() + key := fmt.Sprintf(redisKeyFencingToken, sessionID) + current, err := h.redis.Get(ctx, key).Int64() + if err != nil { + return true // no token stored yet; allow + } + return token >= current +} + +// IsMachineOnline returns true if the machine has an active WS connection +// in the session. More reliable than DB status which may lag. +func (h *Hub) IsMachineOnline(sessionID, machineID string) bool { + h.mu.RLock() + _, ok := h.machineConns[machineKey(sessionID, machineID)] + h.mu.RUnlock() + return ok +} + +// ─── Presence ────────────────────────────────────────────────────────────── + +// MarkPresence records a machine's last-seen timestamp in Redis. +func (h *Hub) MarkPresence(sessionID, machineID string) { + if h.redis == nil { + return + } + ctx := context.Background() + key := fmt.Sprintf(redisKeyPresence, sessionID, machineID) + h.redis.Set(ctx, key, time.Now().UnixMilli(), time.Duration(DefaultLeaderLockTTLSec*3)*time.Second) +} + +// SessionConnCount returns the number of live WS connections for a session. +func (h *Hub) SessionConnCount(sessionID string) int { + h.mu.RLock() + defer h.mu.RUnlock() + return len(h.sessionConns[sessionID]) +} + +// ─── Internal helpers ────────────────────────────────────────────────────── + +func (h *Hub) deliver(conn *WSConnection, evt CloudEvent) { + data, err := json.Marshal(evt) + if err != nil { + return + } + select { + case conn.Send <- data: + default: + // Drop if channel full; caller may handle reconnect + } + + // Append to the session event log for lastEventId replay + h.appendEventLog(conn.SessionID, evt) +} + +func (h *Hub) appendBacklog(sessionID, machineID string, evt CloudEvent) { + if h.redis == nil { + return + } + ctx := context.Background() + key := fmt.Sprintf(redisKeyBacklog, sessionID, machineID) + data, err := json.Marshal(evt) + if err != nil { + return + } + pipe := h.redis.Pipeline() + pipe.RPush(ctx, key, data) + pipe.Expire(ctx, key, time.Duration(DefaultEventBacklogTTLMin)*time.Minute) + pipe.Exec(ctx) //nolint:errcheck +} + +func (h *Hub) incrFencingToken(sessionID string) (int64, error) { + if h.redis == nil { + return time.Now().UnixMilli(), nil + } + ctx := context.Background() + key := fmt.Sprintf(redisKeyFencingToken, sessionID) + return h.redis.Incr(ctx, key).Result() +} + +// appendEventLog stores the event in a Redis list for lastEventId-based replay. +// The list is capped to DefaultEventLogMaxLen entries using LTRIM. +func (h *Hub) appendEventLog(sessionID string, evt CloudEvent) { + if h.redis == nil { + return + } + ctx := context.Background() + key := fmt.Sprintf(redisKeyEventLog, sessionID) + data, err := json.Marshal(evt) + if err != nil { + return + } + pipe := h.redis.Pipeline() + pipe.RPush(ctx, key, data) + pipe.LTrim(ctx, key, 0, int64(DefaultEventLogMaxLen-1)) + pipe.Expire(ctx, key, time.Duration(DefaultEventBacklogTTLMin)*time.Minute) + pipe.Exec(ctx) //nolint:errcheck +} + +// ReplayEvents returns all events after the given lastEventId from the session +// event log. Returns nil if no Redis or no events to replay. +func (h *Hub) ReplayEvents(sessionID, lastEventID string) []CloudEvent { + if h.redis == nil || lastEventID == "" { + return nil + } + ctx := context.Background() + key := fmt.Sprintf(redisKeyEventLog, sessionID) + + data, err := h.redis.LRange(ctx, key, 0, -1).Result() + if err != nil || len(data) == 0 { + return nil + } + + // Find the position of lastEventID and return everything after it + startIdx := -1 + for i, raw := range data { + var evt CloudEvent + if json.Unmarshal([]byte(raw), &evt) == nil && evt.EventID == lastEventID { + startIdx = i + break + } + } + + if startIdx < 0 { + // Event ID not found in log — replay everything (conservative) + var events []CloudEvent + for _, raw := range data { + var evt CloudEvent + if json.Unmarshal([]byte(raw), &evt) == nil { + events = append(events, evt) + } + } + return events + } + + // Return events after the found position + var events []CloudEvent + for _, raw := range data[startIdx+1:] { + var evt CloudEvent + if json.Unmarshal([]byte(raw), &evt) == nil { + events = append(events, evt) + } + } + return events +} + +// ─── Leader expiry watcher ────────────────────────────────────────────── + +// watchLeaderExpiry subscribes to Redis keyspace notifications for leader +// lock key expiry events. When a leader lock expires, it broadcasts +// leader.expired to the session so clients can trigger re-election. +func (h *Hub) watchLeaderExpiry() { + ctx := context.Background() + + // Enable expiry notifications on the connection + h.redis.ConfigSet(ctx, "notify-keyspace-events", "Ex") + + sub := h.redis.PSubscribe(ctx, "__keyevent@0__:expired") + defer sub.Close() + + ch := sub.Channel() + for { + select { + case <-h.stopCh: + return + case msg, ok := <-ch: + if !ok { + return + } + // Key pattern: team:session::leader_lock + sessionID := extractSessionFromLockKey(msg.Payload) + if sessionID == "" { + continue + } + // Deduplicate: only broadcast once per expiry until a new leader is elected + h.leaderExpiryMu.Lock() + if h.leaderExpiredSent[sessionID] { + h.leaderExpiryMu.Unlock() + continue + } + h.leaderExpiredSent[sessionID] = true + h.leaderExpiryMu.Unlock() + + h.Broadcast(sessionID, CloudEvent{ + EventID: fmt.Sprintf("le-exp-%d", time.Now().UnixMilli()), + Type: EventLeaderExpired, + SessionID: sessionID, + Timestamp: time.Now().UnixMilli(), + Payload: map[string]any{"reason": "ttl_expired"}, + }) + } + } +} + +// ResetLeaderExpiredSent clears the dedup flag when a new leader is elected, +// so future expirations can be broadcast. +func (h *Hub) ResetLeaderExpiredSent(sessionID string) { + h.leaderExpiryMu.Lock() + delete(h.leaderExpiredSent, sessionID) + h.leaderExpiryMu.Unlock() +} + +// extractSessionFromLockKey parses "team:session::leader_lock" → "". +func extractSessionFromLockKey(key string) string { + // Expected: team:session:UUID:leader_lock + const prefix = "team:session:" + const suffix = ":leader_lock" + if len(key) <= len(prefix)+len(suffix) { + return "" + } + if key[:len(prefix)] != prefix || key[len(key)-len(suffix):] != suffix { + return "" + } + return key[len(prefix) : len(key)-len(suffix)] +} + +func machineKey(sessionID, machineID string) string { + return sessionID + ":" + machineID +} + +// ─── Synchronous explore channels ───────────────────────────────────────── +// These are used to pair an explore.request HTTP call with its explore.result +// WebSocket response from the target machine. + +// RegisterExplore creates a buffered channel for an outgoing explore request. +// The caller is responsible for calling CancelExplore when done. +func (h *Hub) RegisterExplore(requestID string) chan CloudEvent { + ch := make(chan CloudEvent, 1) + h.exploreMu.Lock() + h.exploreChans[requestID] = ch + h.exploreMu.Unlock() + return ch +} + +// DeliverExplore sends the result event to a waiting RegisterExplore channel. +// It is a no-op if no channel is registered for the requestID. +func (h *Hub) DeliverExplore(requestID string, evt CloudEvent) { + h.exploreMu.Lock() + ch, ok := h.exploreChans[requestID] + h.exploreMu.Unlock() + if !ok { + return + } + select { + case ch <- evt: + default: + } +} + +// CancelExplore removes the channel for the given requestID. +func (h *Hub) CancelExplore(requestID string) { + h.exploreMu.Lock() + delete(h.exploreChans, requestID) + h.exploreMu.Unlock() +} + +// ─── Synchronous decompose channels ────────────────────────────────────── +// Used to pair a decompose HTTP call with its decompose.result WebSocket +// response from the target teammate machine. + +// RegisterDecompose creates a buffered channel for an outgoing decompose request. +func (h *Hub) RegisterDecompose(requestID string) chan CloudEvent { + ch := make(chan CloudEvent, 1) + h.decomposeMu.Lock() + h.decomposeChans[requestID] = ch + h.decomposeMu.Unlock() + return ch +} + +// DeliverDecompose sends the result event to a waiting RegisterDecompose channel. +func (h *Hub) DeliverDecompose(requestID string, evt CloudEvent) { + h.decomposeMu.Lock() + ch, ok := h.decomposeChans[requestID] + h.decomposeMu.Unlock() + if !ok { + return + } + select { + case ch <- evt: + default: + } +} + +// CancelDecompose removes the channel for the given requestID. +func (h *Hub) CancelDecompose(requestID string) { + h.decomposeMu.Lock() + delete(h.decomposeChans, requestID) + h.decomposeMu.Unlock() +} diff --git a/server/internal/team/leader_score.go b/server/internal/team/leader_score.go new file mode 100644 index 0000000..fcf8754 --- /dev/null +++ b/server/internal/team/leader_score.go @@ -0,0 +1,105 @@ +package team + +// ─── Leader candidate scoring ───────────────────────────────────────────── + +// LeaderCapability represents a candidate's self-reported capabilities +// sent during leader election. +type LeaderCapability struct { + MachineID string + RepoURLs []string + HeartbeatSuccessRate float64 // 0.0–1.0 + CPUIdlePercent float64 // 0–100 + MemoryFreeMB float64 + RTTMs float64 // round-trip latency in ms +} + +// LeaderScore is the computed scoring breakdown for a leader candidate. +type LeaderScore struct { + MachineID string `json:"machineId"` + TotalScore float64 `json:"totalScore"` + RepoCoverageScore float64 `json:"repoCoverageScore"` + HeartbeatScore float64 `json:"heartbeatScore"` + PerformanceScore float64 `json:"performanceScore"` + LatencyScore float64 `json:"latencyScore"` +} + +// Weight constants for leader scoring. +const ( + LeaderWeightRepo = 0.4 + LeaderWeightHeartbeat = 0.3 + LeaderWeightPerf = 0.2 + LeaderWeightLatency = 0.1 +) + +// ScoreLeaderCandidate evaluates a leader candidate and returns the weighted +// score breakdown. sessionTargetRepos is the union of all repo URLs referenced +// by tasks in the session; if empty the repo score defaults to neutral (0.5). +func ScoreLeaderCandidate(cap LeaderCapability, sessionTargetRepos []string) LeaderScore { + repoScore := computeRepoCoverageScore(cap.RepoURLs, sessionTargetRepos) + heartbeatScore := cap.HeartbeatSuccessRate + if heartbeatScore == 0 { + heartbeatScore = 0.5 // neutral default when not provided + } + perfScore := computePerformanceScore(cap.CPUIdlePercent, cap.MemoryFreeMB) + latencyScore := computeLatencyScore(cap.RTTMs) + + total := repoScore*LeaderWeightRepo + + heartbeatScore*LeaderWeightHeartbeat + + perfScore*LeaderWeightPerf + + latencyScore*LeaderWeightLatency + + return LeaderScore{ + MachineID: cap.MachineID, + TotalScore: total, + RepoCoverageScore: repoScore, + HeartbeatScore: heartbeatScore, + PerformanceScore: perfScore, + LatencyScore: latencyScore, + } +} + +// computeRepoCoverageScore returns the fraction of session target repos that +// the candidate has locally. Returns 0.5 (neutral) when there are no target repos. +func computeRepoCoverageScore(candidateRepos, targetRepos []string) float64 { + if len(targetRepos) == 0 { + return 0.5 // no repo requirement → neutral + } + candidateSet := make(map[string]struct{}, len(candidateRepos)) + for _, url := range candidateRepos { + candidateSet[url] = struct{}{} + } + matched := 0 + for _, url := range targetRepos { + if _, ok := candidateSet[url]; ok { + matched++ + } + } + return float64(matched) / float64(len(targetRepos)) +} + +// computePerformanceScore combines CPU idle % and free memory into a 0–1 score. +// cpuIdle is 0–100, memFreeMB is absolute. The memory component caps at 8 GB. +func computePerformanceScore(cpuIdle, memFreeMB float64) float64 { + cpuComponent := cpuIdle / 100.0 + if cpuComponent > 1.0 { + cpuComponent = 1.0 + } + memComponent := memFreeMB / 8192.0 + if memComponent > 1.0 { + memComponent = 1.0 + } + return (cpuComponent + memComponent) / 2.0 +} + +// computeLatencyScore converts RTT in ms to a 0–1 score where lower latency +// is better. Uses max(0, 1 - rtt/500) so 0 ms → 1.0 and 500+ ms → 0.0. +func computeLatencyScore(rttMs float64) float64 { + score := 1.0 - rttMs/500.0 + if score < 0 { + return 0 + } + if score > 1 { + return 1 + } + return score +} diff --git a/server/internal/team/models.go b/server/internal/team/models.go new file mode 100644 index 0000000..6d20c34 --- /dev/null +++ b/server/internal/team/models.go @@ -0,0 +1,98 @@ +package team + +import ( + "time" + + "github.com/lib/pq" + "gorm.io/datatypes" + "gorm.io/gorm" +) + +// TeamSession represents a collaborative team session. +type TeamSession struct { + ID string `gorm:"primaryKey;type:uuid;default:gen_random_uuid()" json:"id"` + Name string `gorm:"not null" json:"name"` + CreatorID string `gorm:"not null;index" json:"creatorId"` + Status string `gorm:"not null;default:'active'" json:"status"` + LeaderMachineID string `gorm:"index" json:"leaderMachineId,omitempty"` + LeaderUserID string `gorm:"index" json:"leaderUserId,omitempty"` + FencingToken int64 `gorm:"not null;default:0" json:"fencingToken"` + Metadata datatypes.JSON `gorm:"type:jsonb;default:'{}'" json:"metadata" swaggertype:"object"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + DeletedAt gorm.DeletedAt `gorm:"index" json:"-"` +} + +// TeamSessionMember represents a member (machine) in a team session. +type TeamSessionMember struct { + ID string `gorm:"primaryKey;type:uuid;default:gen_random_uuid()" json:"id"` + SessionID string `gorm:"type:uuid;not null;index;uniqueIndex:idx_team_member_session_machine" json:"sessionId"` + UserID string `gorm:"not null;index" json:"userId"` + MachineID string `gorm:"not null;uniqueIndex:idx_team_member_session_machine" json:"machineId"` + MachineName string `json:"machineName,omitempty"` + Role string `gorm:"not null;default:'teammate'" json:"role"` + Status string `gorm:"not null;default:'online'" json:"status"` + ConnectedAt time.Time `json:"connectedAt"` + LastHeartbeat time.Time `json:"lastHeartbeat"` + CpuIdlePercent float64 `gorm:"default:0" json:"cpuIdlePercent,omitempty"` + MemoryFreeMB float64 `gorm:"default:0" json:"memoryFreeMB,omitempty"` + RTTMs float64 `gorm:"default:0" json:"rttMs,omitempty"` + HeartbeatSuccessRate float64 `gorm:"default:0" json:"heartbeatSuccessRate,omitempty"` + ReportedRepoURLs pq.StringArray `gorm:"type:text[]" json:"reportedRepoUrls,omitempty"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + DeletedAt gorm.DeletedAt `gorm:"index" json:"-"` +} + +// TeamTask represents a task within a team session. +type TeamTask struct { + ID string `gorm:"primaryKey;type:uuid;default:gen_random_uuid()" json:"id"` + SessionID string `gorm:"type:uuid;not null;index" json:"sessionId"` + Description string `gorm:"type:text;not null" json:"description"` + RepoAffinity pq.StringArray `gorm:"type:text[]" json:"repoAffinity,omitempty"` + FileHints pq.StringArray `gorm:"type:text[]" json:"fileHints,omitempty"` + Dependencies pq.StringArray `gorm:"type:text[]" json:"dependencies,omitempty"` + AssignedMemberID *string `gorm:"type:uuid;index" json:"assignedMemberId,omitempty"` + Status string `gorm:"not null;default:'pending';index" json:"status"` + Priority int `gorm:"not null;default:5" json:"priority"` + Result datatypes.JSON `gorm:"type:jsonb" json:"result" swaggertype:"object"` + RetryCount int `gorm:"not null;default:0" json:"retryCount"` + MaxRetries int `gorm:"not null;default:3" json:"maxRetries"` + ErrorMessage string `gorm:"type:text" json:"errorMessage,omitempty"` + CreatedAt time.Time `json:"createdAt"` + ClaimedAt *time.Time `json:"claimedAt,omitempty"` + StartedAt *time.Time `json:"startedAt,omitempty"` + CompletedAt *time.Time `json:"completedAt,omitempty"` + UpdatedAt time.Time `json:"updatedAt"` +} + +// TeamApprovalRequest represents a permission approval request (Phase 2 schema). +type TeamApprovalRequest struct { + ID string `gorm:"primaryKey;type:uuid;default:gen_random_uuid()" json:"id"` + SessionID string `gorm:"type:uuid;not null;index" json:"sessionId"` + RequesterID string `gorm:"type:uuid;not null;index" json:"requesterId"` + RequesterName string `json:"requesterName,omitempty"` + ToolName string `gorm:"not null" json:"toolName"` + ToolInput datatypes.JSON `gorm:"type:jsonb" json:"toolInput" swaggertype:"object"` + Description string `gorm:"type:text" json:"description,omitempty"` + RiskLevel string `gorm:"not null;default:'medium'" json:"riskLevel"` + Status string `gorm:"not null;default:'pending';index" json:"status"` + Feedback string `json:"feedback,omitempty"` + PermissionUpdates datatypes.JSON `gorm:"type:jsonb" json:"permissionUpdates" swaggertype:"object"` + CreatedAt time.Time `json:"createdAt"` + ResolvedAt *time.Time `json:"resolvedAt,omitempty"` +} + +// TeamRepoAffinity represents a teammate's local repository mapping (Phase 2 schema). +type TeamRepoAffinity struct { + ID string `gorm:"primaryKey;type:uuid;default:gen_random_uuid()" json:"id"` + SessionID string `gorm:"type:uuid;not null;index;uniqueIndex:idx_team_repo_unique" json:"sessionId"` + MemberID string `gorm:"type:uuid;not null;index;uniqueIndex:idx_team_repo_unique" json:"memberId"` + RepoRemoteURL string `gorm:"not null;uniqueIndex:idx_team_repo_unique" json:"repoRemoteUrl"` + RepoLocalPath string `json:"repoLocalPath,omitempty"` + CurrentBranch string `json:"currentBranch,omitempty"` + HasUncommittedChanges bool `gorm:"not null;default:false" json:"hasUncommittedChanges"` + LastSyncedAt time.Time `json:"lastSyncedAt"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} \ No newline at end of file diff --git a/server/internal/team/module.go b/server/internal/team/module.go new file mode 100644 index 0000000..802aead --- /dev/null +++ b/server/internal/team/module.go @@ -0,0 +1,82 @@ +package team + +import ( + "github.com/gin-gonic/gin" + "github.com/redis/go-redis/v9" + "gorm.io/gorm" +) + +// Module is the entry point for the Cloud Team feature. +// Initialise it with New() and call RegisterRoutes() to wire it in. +type Module struct { + Store *Store + Hub *Hub + Handler *Handler +} + +func New(db *gorm.DB, rc *redis.Client) *Module { + store := NewStore(db) + hub := NewHub(rc) + handler := NewHandler(store, hub) + return &Module{ + Store: store, + Hub: hub, + Handler: handler, + } +} + +// RegisterRoutes mounts all Cloud Team REST and WebSocket endpoints. +// +// REST base: /api/team +// WebSocket: /ws/sessions/:id +func (m *Module) RegisterRoutes(apiGroup, wsGroup *gin.RouterGroup) { + team := apiGroup.Group("/team") + { + sessions := team.Group("/sessions") + { + sessions.POST("", m.Handler.CreateSession) + sessions.GET("", m.Handler.ListSessions) + sessions.GET("/:id", m.Handler.GetSession) + sessions.PATCH("/:id", m.Handler.UpdateSession) + sessions.DELETE("/:id", m.Handler.DeleteSession) + + sessions.POST("/:id/members", m.Handler.JoinSession) + sessions.GET("/:id/members", m.Handler.ListMembers) + sessions.DELETE("/:id/members/:mid", m.Handler.LeaveSession) + + sessions.POST("/:id/tasks", m.Handler.SubmitTaskPlan) + sessions.GET("/:id/tasks", m.Handler.ListTasks) + sessions.POST("/:id/tasks/:taskId/terminate", m.Handler.TerminateTask) + sessions.POST("/:id/decompose", m.Handler.DecomposeTask) + sessions.POST("/:id/orchestrate", m.Handler.OrchestrateTask) + + sessions.GET("/:id/approvals", m.Handler.ListApprovals) + + sessions.POST("/:id/repos", m.Handler.RegisterRepo) + sessions.GET("/:id/repos", m.Handler.QueryRepos) + + sessions.GET("/:id/progress", m.Handler.GetProgress) + + sessions.POST("/:id/explore", m.Handler.Explore) + + sessions.POST("/:id/leader/elect", m.Handler.ElectLeader) + sessions.POST("/:id/leader/heartbeat", m.Handler.LeaderHeartbeat) + sessions.GET("/:id/leader", m.Handler.GetLeader) + } + + tasks := team.Group("/tasks") + { + tasks.GET("/:taskId", m.Handler.GetTask) + tasks.PATCH("/:taskId", m.Handler.UpdateTask) + } + + approvals := team.Group("/approvals") + { + approvals.PATCH("/:approvalId", m.Handler.RespondApproval) + } + } + + // WebSocket endpoint – auth is handled per-connection via ?token= query param + // or by the upstream middleware that already set c.GetString("userId"). + wsGroup.GET("/sessions/:id", m.Handler.ServeWS) +} diff --git a/server/internal/team/scheduler.go b/server/internal/team/scheduler.go new file mode 100644 index 0000000..4230b53 --- /dev/null +++ b/server/internal/team/scheduler.go @@ -0,0 +1,328 @@ +package team + +import ( + "math" + "sort" +) + +// ─── Priority tier constants ──────────────────────────────────────────── + +const ( + P1 = 1 // Has ALL target repos + no uncommitted changes + idle + P2 = 2 // Has ALL target repos + has uncommitted changes + idle (or same-repo serialization) + P3 = 3 // Has SOME target repos (missing need clone) + idle + P4 = 4 // Has NO target repos (all need clone) + idle + P5 = 5 // Any idle teammate (fallback for tasks with no repoAffinity) +) + +// ─── Scheduling types ─────────────────────────────────────────────────── + +// MemberLoadInfo is a summary of a member's current work state. +type MemberLoadInfo struct { + RunningCount int + AssignedCount int + CompletedCount int64 + FailedCount int64 + ActiveRepoURLs map[string]bool // repos with currently running/assigned tasks +} + +// SchedulingContext holds all data needed for one scheduling pass. +type SchedulingContext struct { + Members []TeamSessionMember + RepoAffinities []TeamRepoAffinity + RunningTasks []TeamTask + MemberLoad map[string]MemberLoadInfo // memberID → load summary +} + +// CandidateScore holds the computed priority and sub-scores for a member. +type CandidateScore struct { + MemberID string + MachineID string + PriorityTier int // P1=1 … P5=5 + RepoCoverage float64 // fraction of target repos this member has + QueueLength int // current assigned/running task count + SuccessRate float64 // completed / (completed + failed), 0.5 default + DirtyRepo bool // has uncommitted changes on target repos + IsIdle bool // not currently running any task + CompositeScore float64 // weighted secondary sort score +} + +// ─── Core scheduling function ─────────────────────────────────────────── + +// ScheduleTasks assigns each unassigned task to the best candidate member +// using the P1-P5 repo affinity scheduling algorithm. Tasks that already +// have an AssignedMemberID are left untouched (respecting explicit assignment). +func ScheduleTasks(ctx SchedulingContext, tasks []TeamTask) []TeamTask { + if len(ctx.Members) == 0 || len(tasks) == 0 { + return tasks + } + + // 1. Pre-compute per-member data structures + memberRepos := buildMemberRepoMap(ctx.RepoAffinities) + memberLoad := ctx.MemberLoad + if memberLoad == nil { + memberLoad = make(map[string]MemberLoadInfo) + } + runningByMemberRepo := buildRunningTaskRepoMap(ctx.RunningTasks, memberRepos) + + // 2. Schedule tasks with repo affinity + for i, task := range tasks { + if task.AssignedMemberID != nil && *task.AssignedMemberID != "" { + continue // client explicitly assigned; respect it + } + if len(task.RepoAffinity) == 0 { + continue // handled in step 3 (P5 fallback) + } + + candidates := scoreCandidates(ctx, task, memberRepos, memberLoad, runningByMemberRepo) + if len(candidates) == 0 { + continue + } + + // Sort: primary by PriorityTier ASC, secondary by CompositeScore DESC + sort.Slice(candidates, func(a, b int) bool { + if candidates[a].PriorityTier != candidates[b].PriorityTier { + return candidates[a].PriorityTier < candidates[b].PriorityTier + } + return candidates[a].CompositeScore > candidates[b].CompositeScore + }) + + assignedID := candidates[0].MemberID + tasks[i].AssignedMemberID = &assignedID + tasks[i].Status = TaskStatusAssigned + + // Update in-memory load so subsequent tasks see the updated queue + load := memberLoad[assignedID] + load.AssignedCount++ + for _, repoURL := range task.RepoAffinity { + if load.ActiveRepoURLs == nil { + load.ActiveRepoURLs = make(map[string]bool) + } + load.ActiveRepoURLs[repoURL] = true + } + memberLoad[assignedID] = load + + // Update runningByMemberRepo for serialization + for _, repoURL := range task.RepoAffinity { + if runningByMemberRepo[assignedID] == nil { + runningByMemberRepo[assignedID] = make(map[string]bool) + } + runningByMemberRepo[assignedID][repoURL] = true + } + } + + // 3. Handle tasks with no repoAffinity (P5 fallback) + for i, task := range tasks { + if (task.AssignedMemberID == nil || *task.AssignedMemberID == "") && + task.Status == TaskStatusPending { + assignToLeastLoadedIdleMember(ctx, &tasks[i], memberLoad) + } + } + + return tasks +} + +// ─── Candidate scoring ────────────────────────────────────────────────── + +// scoreCandidates evaluates all online members against a task and returns +// their P1-P5 tier scores. +func scoreCandidates( + ctx SchedulingContext, + task TeamTask, + memberRepos map[string]map[string]TeamRepoAffinity, // memberID → repoURL → affinity + memberLoad map[string]MemberLoadInfo, + runningByMemberRepo map[string]map[string]bool, // memberID → set of repoURLs +) []CandidateScore { + var candidates []CandidateScore + + for _, member := range ctx.Members { + load := memberLoad[member.ID] + isIdle := load.RunningCount == 0 && load.AssignedCount == 0 + + repos := memberRepos[member.ID] // repoURL → affinity entry + hasAll, hasSome, dirtyOnTarget := computeRepoCoverage(task.RepoAffinity, repos) + + tier := computePriorityTier(hasAll, hasSome, dirtyOnTarget, isIdle) + if tier == 0 { + continue // not eligible + } + + // Same-repo serialization: if this member is already running a task on + // one of the target repos, boost their tier to serialize writes. + sameRepoRunning := hasRunningSameRepoTask(task.RepoAffinity, runningByMemberRepo[member.ID]) + effectiveTier := tier + if sameRepoRunning && tier >= P3 { + effectiveTier = P2 // serialize: prefer the member already working on this repo + } + + coverage := computeCoverageFraction(task.RepoAffinity, repos) + successRate := computeSuccessRate(load.CompletedCount, load.FailedCount) + queueLen := load.RunningCount + load.AssignedCount + + // Composite secondary score (higher is better) + compositeScore := + (1.0-float64(queueLen)/10.0)*0.4 + + successRate*0.4 + + coverage*0.2 + + candidates = append(candidates, CandidateScore{ + MemberID: member.ID, + MachineID: member.MachineID, + PriorityTier: effectiveTier, + RepoCoverage: coverage, + QueueLength: queueLen, + SuccessRate: successRate, + DirtyRepo: dirtyOnTarget, + IsIdle: isIdle, + CompositeScore: compositeScore, + }) + } + return candidates +} + +// ─── Tier computation ─────────────────────────────────────────────────── + +// computePriorityTier determines the P1-P5 tier for a member. +// Returns 0 if the member is not eligible (not idle). +func computePriorityTier(hasAll, hasSome, dirtyOnTarget, isIdle bool) int { + if !isIdle { + // Only idle teammates are eligible for new assignment. + // (Non-idle members can still be boosted via same-repo serialization + // if they have running tasks on the target repo — handled in scoreCandidates.) + return 0 + } + if hasAll && !dirtyOnTarget { + return P1 + } + if hasAll && dirtyOnTarget { + return P2 + } + if hasSome && !hasAll { + return P3 + } + // hasSome == false means hasNone + return P4 +} + +// ─── Helper functions ─────────────────────────────────────────────────── + +// computeRepoCoverage checks if the member has all, some, or none of the target repos. +// Also returns whether the member has uncommitted changes on any target repo. +func computeRepoCoverage(targetRepos []string, memberRepos map[string]TeamRepoAffinity) (hasAll, hasSome, dirtyOnTarget bool) { + if len(targetRepos) == 0 { + return true, true, false // no affinity requirement → trivially satisfied + } + + matched := 0 + for _, url := range targetRepos { + affinity, ok := memberRepos[url] + if ok { + matched++ + if affinity.HasUncommittedChanges { + dirtyOnTarget = true + } + } + } + + hasSome = matched > 0 + hasAll = matched == len(targetRepos) + return +} + +// computeCoverageFraction returns 0.0-1.0 for what fraction of target repos +// the member has. +func computeCoverageFraction(targetRepos []string, memberRepos map[string]TeamRepoAffinity) float64 { + if len(targetRepos) == 0 { + return 1.0 + } + matched := 0 + for _, url := range targetRepos { + if _, ok := memberRepos[url]; ok { + matched++ + } + } + return float64(matched) / float64(len(targetRepos)) +} + +// computeSuccessRate returns completed/(completed+failed), defaulting to 0.5 +// when no data is available. +func computeSuccessRate(completed, failed int64) float64 { + total := completed + failed + if total == 0 { + return 0.5 // neutral default + } + return float64(completed) / float64(total) +} + +// hasRunningSameRepoTask checks if the member is already running a task +// touching one of the target repo URLs. +func hasRunningSameRepoTask(targetRepos []string, runningRepos map[string]bool) bool { + for _, url := range targetRepos { + if runningRepos[url] { + return true + } + } + return false +} + +// buildMemberRepoMap builds memberID → repoURL → TeamRepoAffinity from flat list. +func buildMemberRepoMap(affinities []TeamRepoAffinity) map[string]map[string]TeamRepoAffinity { + result := make(map[string]map[string]TeamRepoAffinity) + for _, a := range affinities { + if result[a.MemberID] == nil { + result[a.MemberID] = make(map[string]TeamRepoAffinity) + } + result[a.MemberID][a.RepoRemoteURL] = a + } + return result +} + +// buildRunningTaskRepoMap builds memberID → set of repoURLs from currently +// running/assigned tasks. +func buildRunningTaskRepoMap(running []TeamTask, memberRepos map[string]map[string]TeamRepoAffinity) map[string]map[string]bool { + result := make(map[string]map[string]bool) + for _, t := range running { + if t.AssignedMemberID == nil { + continue + } + memberID := *t.AssignedMemberID + if result[memberID] == nil { + result[memberID] = make(map[string]bool) + } + for _, url := range t.RepoAffinity { + result[memberID][url] = true + } + } + return result +} + +// assignToLeastLoadedIdleMember assigns a task with no repo affinity to the +// idle member with the shortest queue. +func assignToLeastLoadedIdleMember(ctx SchedulingContext, task *TeamTask, memberLoad map[string]MemberLoadInfo) { + var bestMemberID string + bestQueueLen := math.MaxInt + + for _, member := range ctx.Members { + load := memberLoad[member.ID] + queueLen := load.RunningCount + load.AssignedCount + + // Only consider members with relatively low load + // (allow up to 2 concurrent tasks per member for no-affinity tasks) + if queueLen >= 2 { + continue + } + + if queueLen < bestQueueLen { + bestQueueLen = queueLen + bestMemberID = member.ID + } + } + + if bestMemberID != "" { + task.AssignedMemberID = &bestMemberID + task.Status = TaskStatusAssigned + load := memberLoad[bestMemberID] + load.AssignedCount++ + memberLoad[bestMemberID] = load + } +} diff --git a/server/internal/team/scheduler_test.go b/server/internal/team/scheduler_test.go new file mode 100644 index 0000000..62c18b1 --- /dev/null +++ b/server/internal/team/scheduler_test.go @@ -0,0 +1,429 @@ +package team + +import ( + "testing" +) + +// ─── P1-P5 Tier computation ─────────────────────────────────────────────── + +func TestComputePriorityTier(t *testing.T) { + tests := []struct { + name string + hasAll bool + hasSome bool + dirtyOnTarget bool + isIdle bool + want int + }{ + {"P1: has all, no dirty, idle", true, true, false, true, P1}, + {"P2: has all, dirty, idle", true, true, true, true, P2}, + {"P3: has some, idle", false, true, false, true, P3}, + {"P4: has none, idle", false, false, false, true, P4}, + {"Not eligible: not idle", true, true, false, false, 0}, + {"Not idle even with all repos and dirty", true, true, true, false, 0}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := computePriorityTier(tt.hasAll, tt.hasSome, tt.dirtyOnTarget, tt.isIdle) + if got != tt.want { + t.Errorf("computePriorityTier() = %d, want %d", got, tt.want) + } + }) + } +} + +// ─── Repo coverage ─────────────────────────────────────────────────────── + +func TestComputeRepoCoverage(t *testing.T) { + memberRepos := map[string]TeamRepoAffinity{ + "https://github.com/org/repo-a": {RepoRemoteURL: "https://github.com/org/repo-a", HasUncommittedChanges: false}, + "https://github.com/org/repo-b": {RepoRemoteURL: "https://github.com/org/repo-b", HasUncommittedChanges: true}, + } + + tests := []struct { + name string + targetRepos []string + wantAll bool + wantSome bool + wantDirty bool + }{ + {"has all repos, no dirty", []string{"https://github.com/org/repo-a", "https://github.com/org/repo-b"}, true, true, true}, + {"has all repos, no dirty on specific", []string{"https://github.com/org/repo-a"}, true, true, false}, + {"has some repos", []string{"https://github.com/org/repo-a", "https://github.com/org/repo-c"}, false, true, false}, + {"has no repos", []string{"https://github.com/org/repo-x"}, false, false, false}, + {"empty target repos", []string{}, true, true, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + hasAll, hasSome, dirty := computeRepoCoverage(tt.targetRepos, memberRepos) + if hasAll != tt.wantAll || hasSome != tt.wantSome || dirty != tt.wantDirty { + t.Errorf("computeRepoCoverage() = (%v, %v, %v), want (%v, %v, %v)", + hasAll, hasSome, dirty, tt.wantAll, tt.wantSome, tt.wantDirty) + } + }) + } +} + +func TestComputeCoverageFraction(t *testing.T) { + memberRepos := map[string]TeamRepoAffinity{ + "https://github.com/org/repo-a": {}, + "https://github.com/org/repo-b": {}, + } + + tests := []struct { + name string + targetRepos []string + want float64 + }{ + {"all repos", []string{"https://github.com/org/repo-a", "https://github.com/org/repo-b"}, 1.0}, + {"half repos", []string{"https://github.com/org/repo-a", "https://github.com/org/repo-c"}, 0.5}, + {"no repos", []string{"https://github.com/org/repo-x"}, 0.0}, + {"empty target", []string{}, 1.0}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := computeCoverageFraction(tt.targetRepos, memberRepos) + if got != tt.want { + t.Errorf("computeCoverageFraction() = %f, want %f", got, tt.want) + } + }) + } +} + +// ─── Success rate ──────────────────────────────────────────────────────── + +func TestComputeSuccessRate(t *testing.T) { + tests := []struct { + name string + completed int64 + failed int64 + want float64 + }{ + {"all success", 10, 0, 1.0}, + {"half and half", 5, 5, 0.5}, + {"all failed", 0, 10, 0.0}, + {"no data — neutral default", 0, 0, 0.5}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := computeSuccessRate(tt.completed, tt.failed) + if got != tt.want { + t.Errorf("computeSuccessRate() = %f, want %f", got, tt.want) + } + }) + } +} + +// ─── Same-repo serialization ────────────────────────────────────────────── + +func TestHasRunningSameRepoTask(t *testing.T) { + runningRepos := map[string]bool{ + "https://github.com/org/repo-a": true, + } + + tests := []struct { + name string + targetRepos []string + want bool + }{ + {"same repo running", []string{"https://github.com/org/repo-a"}, true}, + {"different repo", []string{"https://github.com/org/repo-b"}, false}, + {"mixed — one matches", []string{"https://github.com/org/repo-b", "https://github.com/org/repo-a"}, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := hasRunningSameRepoTask(tt.targetRepos, runningRepos) + if got != tt.want { + t.Errorf("hasRunningSameRepoTask() = %v, want %v", got, tt.want) + } + }) + } +} + +// ─── Full scheduling integration ────────────────────────────────────────── + +func TestScheduleTasks_P1Preferred(t *testing.T) { + // Member A has all target repos and no dirty state → P1 + memberA := TeamSessionMember{ID: "m-a", MachineID: "machine-a"} + memberB := TeamSessionMember{ID: "m-b", MachineID: "machine-b"} + + ctx := SchedulingContext{ + Members: []TeamSessionMember{memberA, memberB}, + RepoAffinities: []TeamRepoAffinity{ + {MemberID: "m-a", RepoRemoteURL: "https://github.com/org/repo-x"}, + }, + RunningTasks: []TeamTask{}, + MemberLoad: map[string]MemberLoadInfo{}, + } + + tasks := []TeamTask{ + {ID: "task-1", Description: "do stuff", RepoAffinity: []string{"https://github.com/org/repo-x"}}, + } + + result := ScheduleTasks(ctx, tasks) + if result[0].AssignedMemberID == nil || *result[0].AssignedMemberID != "m-a" { + t.Errorf("expected task assigned to m-a (P1), got %v", result[0].AssignedMemberID) + } + if result[0].Status != TaskStatusAssigned { + t.Errorf("expected status assigned, got %s", result[0].Status) + } +} + +func TestScheduleTasks_ExplicitAssignmentRespected(t *testing.T) { + // If the client explicitly sets AssignedMemberID, the scheduler must not change it + explicitID := "m-explicit" + tasks := []TeamTask{ + {ID: "task-1", Description: "do stuff", AssignedMemberID: &explicitID, RepoAffinity: []string{"https://github.com/org/repo-x"}}, + } + + ctx := SchedulingContext{ + Members: []TeamSessionMember{{ID: "m-other", MachineID: "machine-other"}}, + RepoAffinities: []TeamRepoAffinity{}, + RunningTasks: []TeamTask{}, + MemberLoad: map[string]MemberLoadInfo{}, + } + + result := ScheduleTasks(ctx, tasks) + if result[0].AssignedMemberID == nil || *result[0].AssignedMemberID != explicitID { + t.Errorf("explicit assignment should be preserved, got %v", result[0].AssignedMemberID) + } +} + +func TestScheduleTasks_NoAffinityFallback(t *testing.T) { + // Tasks with no repoAffinity should be assigned to least-loaded idle member + memberA := TeamSessionMember{ID: "m-a", MachineID: "machine-a"} + memberB := TeamSessionMember{ID: "m-b", MachineID: "machine-b"} + + ctx := SchedulingContext{ + Members: []TeamSessionMember{memberA, memberB}, + RepoAffinities: []TeamRepoAffinity{}, + RunningTasks: []TeamTask{}, + MemberLoad: map[string]MemberLoadInfo{ + "m-a": {RunningCount: 1, AssignedCount: 0}, + "m-b": {RunningCount: 0, AssignedCount: 0}, + }, + } + + tasks := []TeamTask{ + {ID: "task-1", Description: "no repo affinity", Status: TaskStatusPending, RepoAffinity: nil}, + } + + result := ScheduleTasks(ctx, tasks) + if result[0].AssignedMemberID == nil { + t.Errorf("expected task assigned to m-b (idle), got nil. Status=%s, RepoAffinity=%v", result[0].Status, result[0].RepoAffinity) + } else if *result[0].AssignedMemberID != "m-b" { + t.Errorf("expected task assigned to m-b (idle), got %s", *result[0].AssignedMemberID) + } +} + +func TestScheduleTasks_SameRepoSerialization(t *testing.T) { + // Member A already has a running task on repo-x. + // Member B is idle and has repo-x. + // P1 logic should prefer idle member B, but same-repo serialization + // should boost non-idle member A when it's the only option or at P3+ tier. + memberA := TeamSessionMember{ID: "m-a", MachineID: "machine-a"} + memberB := TeamSessionMember{ID: "m-b", MachineID: "machine-b"} + + ctx := SchedulingContext{ + Members: []TeamSessionMember{memberA, memberB}, + RepoAffinities: []TeamRepoAffinity{ + {MemberID: "m-a", RepoRemoteURL: "https://github.com/org/repo-x"}, + {MemberID: "m-b", RepoRemoteURL: "https://github.com/org/repo-x"}, + }, + RunningTasks: []TeamTask{ + {ID: "existing", AssignedMemberID: strPtr("m-a"), RepoAffinity: []string{"https://github.com/org/repo-x"}}, + }, + MemberLoad: map[string]MemberLoadInfo{ + "m-a": {RunningCount: 1, AssignedCount: 0}, + "m-b": {RunningCount: 0, AssignedCount: 0}, + }, + } + + tasks := []TeamTask{ + {ID: "task-1", Description: "same repo", RepoAffinity: []string{"https://github.com/org/repo-x"}}, + } + + result := ScheduleTasks(ctx, tasks) + // Member B is P1 (idle, has all repos, no dirty), Member A is not idle → 0 tier + // So B should be assigned + if result[0].AssignedMemberID == nil || *result[0].AssignedMemberID != "m-b" { + t.Errorf("expected task assigned to m-b (P1 idle), got %v", result[0].AssignedMemberID) + } +} + +func TestScheduleTasks_MultipleTasks(t *testing.T) { + // Two tasks, one member idle — first task gets assigned, second task + // sees updated load and should not be assigned if the member is no longer idle + memberA := TeamSessionMember{ID: "m-a", MachineID: "machine-a"} + + ctx := SchedulingContext{ + Members: []TeamSessionMember{memberA}, + RepoAffinities: []TeamRepoAffinity{}, + RunningTasks: []TeamTask{}, + MemberLoad: map[string]MemberLoadInfo{}, + } + + tasks := []TeamTask{ + {ID: "task-1", Description: "first", RepoAffinity: []string{"https://github.com/org/repo-x"}}, + {ID: "task-2", Description: "second", RepoAffinity: []string{"https://github.com/org/repo-y"}}, + } + + result := ScheduleTasks(ctx, tasks) + // First task: member A is idle, no repo affinity match → P4 (has none), gets assigned + if result[0].AssignedMemberID == nil { + t.Errorf("task-1 should be assigned, got nil") + } + // Second task: member A now has AssignedCount=1, RunningCount=0 → still eligible at P4 + // because the scheduler only checks idle via RunningCount==0 && AssignedCount==0 + // After first assignment, AssignedCount=1, so IsIdle = false → not eligible + if result[1].AssignedMemberID != nil { + // This depends on the exact idle check; our scheduler checks RunningCount+AssignedCount==0 + // After task-1 assigns, member has AssignedCount=1 → not idle → tier 0 + // So task-2 should not be assigned via P1-P4, falls to P5 fallback + // P5 allows up to 2 concurrent tasks per member + t.Logf("task-2 assigned to %s (P5 fallback)", *result[1].AssignedMemberID) + } +} + +func TestBuildMemberRepoMap(t *testing.T) { + affinities := []TeamRepoAffinity{ + {MemberID: "m-a", RepoRemoteURL: "https://github.com/org/repo-1"}, + {MemberID: "m-a", RepoRemoteURL: "https://github.com/org/repo-2"}, + {MemberID: "m-b", RepoRemoteURL: "https://github.com/org/repo-1"}, + } + + result := buildMemberRepoMap(affinities) + if len(result) != 2 { + t.Errorf("expected 2 members, got %d", len(result)) + } + if len(result["m-a"]) != 2 { + t.Errorf("expected m-a to have 2 repos, got %d", len(result["m-a"])) + } + if len(result["m-b"]) != 1 { + t.Errorf("expected m-b to have 1 repo, got %d", len(result["m-b"])) + } +} + +func TestBuildRunningTaskRepoMap(t *testing.T) { + memberRepos := map[string]map[string]TeamRepoAffinity{} + tasks := []TeamTask{ + {ID: "t1", AssignedMemberID: strPtr("m-a"), RepoAffinity: []string{"https://github.com/org/repo-1"}}, + {ID: "t2", AssignedMemberID: strPtr("m-a"), RepoAffinity: []string{"https://github.com/org/repo-2"}}, + {ID: "t3", AssignedMemberID: nil, RepoAffinity: []string{"https://github.com/org/repo-3"}}, // no member + } + + result := buildRunningTaskRepoMap(tasks, memberRepos) + if len(result) != 1 { + t.Errorf("expected 1 member with running tasks, got %d", len(result)) + } + if !result["m-a"]["https://github.com/org/repo-1"] || !result["m-a"]["https://github.com/org/repo-2"] { + t.Errorf("m-a should have repo-1 and repo-2 as running repos") + } +} + +// ─── Leader score ──────────────────────────────────────────────────────── + +func TestScoreLeaderCandidate(t *testing.T) { + cap := LeaderCapability{ + MachineID: "machine-1", + RepoURLs: []string{"https://github.com/org/repo-a", "https://github.com/org/repo-b"}, + HeartbeatSuccessRate: 0.9, + CPUIdlePercent: 60, + MemoryFreeMB: 4096, + RTTMs: 50, + } + targetRepos := []string{"https://github.com/org/repo-a", "https://github.com/org/repo-b", "https://github.com/org/repo-c"} + + score := ScoreLeaderCandidate(cap, targetRepos) + + // Repo coverage: 2/3 → 0.667 + if score.RepoCoverageScore < 0.66 || score.RepoCoverageScore > 0.67 { + t.Errorf("RepoCoverageScore = %f, want ~0.667", score.RepoCoverageScore) + } + // Heartbeat: 0.9 + if score.HeartbeatScore != 0.9 { + t.Errorf("HeartbeatScore = %f, want 0.9", score.HeartbeatScore) + } + // Perf: (60/100 + 4096/8192) / 2 = (0.6 + 0.5) / 2 = 0.55 + if score.PerformanceScore != 0.55 { + t.Errorf("PerformanceScore = %f, want 0.55", score.PerformanceScore) + } + // Latency: max(0, 1 - 50/500) = 0.9 + if score.LatencyScore != 0.9 { + t.Errorf("LatencyScore = %f, want 0.9", score.LatencyScore) + } + // Total: 0.667*0.4 + 0.9*0.3 + 0.55*0.2 + 0.9*0.1 ≈ 0.737 + if score.TotalScore < 0.73 || score.TotalScore > 0.75 { + t.Errorf("TotalScore = %f, want ~0.737", score.TotalScore) + } +} + +func TestScoreLeaderCandidate_NoTargetRepos(t *testing.T) { + cap := LeaderCapability{ + MachineID: "machine-1", + RepoURLs: []string{"https://github.com/org/repo-a"}, + HeartbeatSuccessRate: 0, + CPUIdlePercent: 0, + MemoryFreeMB: 0, + RTTMs: 0, + } + + score := ScoreLeaderCandidate(cap, nil) + + // No target repos → repo score = 0.5 (neutral) + if score.RepoCoverageScore != 0.5 { + t.Errorf("RepoCoverageScore = %f, want 0.5", score.RepoCoverageScore) + } + // No heartbeat provided → default 0.5 + if score.HeartbeatScore != 0.5 { + t.Errorf("HeartbeatScore = %f, want 0.5", score.HeartbeatScore) + } +} + +func TestComputeLatencyScore(t *testing.T) { + tests := []struct { + name string + rttMs float64 + want float64 + }{ + {"zero RTT", 0, 1.0}, + {"250ms", 250, 0.5}, + {"500ms → 0", 500, 0.0}, + {"600ms → clamped to 0", 600, 0.0}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := computeLatencyScore(tt.rttMs) + if got != tt.want { + t.Errorf("computeLatencyScore(%f) = %f, want %f", tt.rttMs, got, tt.want) + } + }) + } +} + +func TestComputePerformanceScore(t *testing.T) { + tests := []struct { + name string + cpuIdle float64 + memFreeMB float64 + want float64 + }{ + {"full idle, full mem", 100, 8192, 1.0}, + {"half idle, half mem", 50, 4096, 0.5}, + {"zero everything", 0, 0, 0.0}, + {"cpu cap, mem overflow", 150, 16000, 1.0}, // both capped at 1.0 + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := computePerformanceScore(tt.cpuIdle, tt.memFreeMB) + if got != tt.want { + t.Errorf("computePerformanceScore(%f, %f) = %f, want %f", + tt.cpuIdle, tt.memFreeMB, got, tt.want) + } + }) + } +} + +// ─── Helpers ───────────────────────────────────────────────────────────── + +func strPtr(s string) *string { return &s } diff --git a/server/internal/team/store.go b/server/internal/team/store.go new file mode 100644 index 0000000..32784da --- /dev/null +++ b/server/internal/team/store.go @@ -0,0 +1,561 @@ +package team + +import ( + "errors" + "strings" + "time" + + "gorm.io/gorm" +) + +// Store handles all database operations for the team module. +type Store struct { + db *gorm.DB +} + +func NewStore(db *gorm.DB) *Store { + return &Store{db: db} +} + +// ─── Session ─────────────────────────────────────────────────────────────── + +func (s *Store) CreateSession(sess *TeamSession) error { + return s.db.Create(sess).Error +} + +func (s *Store) GetSession(id string) (*TeamSession, error) { + var sess TeamSession + err := s.db.First(&sess, "id = ?", id).Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, nil + } + return &sess, err +} + +func (s *Store) UpdateSession(id string, updates map[string]any) error { + return s.db.Model(&TeamSession{}).Where("id = ?", id).Updates(updates).Error +} + +func (s *Store) DeleteSession(id string) error { + return s.db.Delete(&TeamSession{}, "id = ?", id).Error +} + +func (s *Store) ListSessionsByCreator(creatorID string) ([]TeamSession, error) { + var sessions []TeamSession + err := s.db.Where("creator_id = ?", creatorID).Order("created_at DESC").Find(&sessions).Error + return sessions, err +} + +// ─── Member ──────────────────────────────────────────────────────────────── + +func (s *Store) CreateMember(m *TeamSessionMember) error { + // Backward-compatible insert: older databases may not yet have newer + // capability columns on team_session_members. + return s.db.Select( + "ID", + "SessionID", + "UserID", + "MachineID", + "MachineName", + "Role", + "Status", + "ConnectedAt", + "LastHeartbeat", + ).Create(m).Error +} + +func (s *Store) GetMember(id string) (*TeamSessionMember, error) { + var m TeamSessionMember + err := s.db.First(&m, "id = ?", id).Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, nil + } + return &m, err +} + +func (s *Store) GetMemberByMachine(sessionID, machineID string) (*TeamSessionMember, error) { + var m TeamSessionMember + err := s.db.Where("session_id = ? AND machine_id = ?", sessionID, machineID).First(&m).Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, nil + } + return &m, err +} + +// GetMemberByMachineUnscoped returns a member by session+machine, including soft-deleted rows. +func (s *Store) GetMemberByMachineUnscoped(sessionID, machineID string) (*TeamSessionMember, error) { + var m TeamSessionMember + err := s.db.Unscoped().Where("session_id = ? AND machine_id = ?", sessionID, machineID).First(&m).Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, nil + } + return &m, err +} + +// GetMemberByMachineAnySessionUnscoped returns a member by machine across any session, +// including soft-deleted rows. Used for legacy schemas that enforced global machine uniqueness. +func (s *Store) GetMemberByMachineAnySessionUnscoped(machineID string) (*TeamSessionMember, error) { + var m TeamSessionMember + err := s.db.Unscoped().Where("machine_id = ?", machineID).Order("updated_at DESC").First(&m).Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, nil + } + return &m, err +} + +func (s *Store) ListMembers(sessionID string) ([]TeamSessionMember, error) { + var members []TeamSessionMember + err := s.db.Where("session_id = ?", sessionID).Order("created_at ASC").Find(&members).Error + return members, err +} + +func (s *Store) UpdateMember(id string, updates map[string]any) error { + return s.db.Model(&TeamSessionMember{}).Where("id = ?", id).Updates(updates).Error +} + +func (s *Store) UpdateMemberHeartbeat(id string) error { + return s.db.Model(&TeamSessionMember{}).Where("id = ?", id). + Update("last_heartbeat", time.Now()).Error +} + +func (s *Store) DeleteMember(id string) error { + return s.db.Delete(&TeamSessionMember{}, "id = ?", id).Error +} + +// ─── Task ────────────────────────────────────────────────────────────────── + +func (s *Store) CreateTask(t *TeamTask) error { + return s.db.Create(t).Error +} + +func (s *Store) CreateTasks(tasks []TeamTask) error { + return s.db.Create(&tasks).Error +} + +func (s *Store) GetTask(id string) (*TeamTask, error) { + var t TeamTask + err := s.db.First(&t, "id = ?", id).Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, nil + } + return &t, err +} + +func (s *Store) ListTasks(sessionID string) ([]TeamTask, error) { + var tasks []TeamTask + err := s.db.Where("session_id = ?", sessionID).Order("priority DESC, created_at ASC").Find(&tasks).Error + return tasks, err +} + +func (s *Store) ListPendingTasks(sessionID string) ([]TeamTask, error) { + var tasks []TeamTask + err := s.db.Where("session_id = ? AND status = ?", sessionID, TaskStatusPending). + Order("priority DESC, created_at ASC").Find(&tasks).Error + return tasks, err +} + +func (s *Store) UpdateTask(id string, updates map[string]any) error { + return s.db.Model(&TeamTask{}).Where("id = ?", id).Updates(updates).Error +} + +// RetryTask increments retry_count and resets the task to pending/assigned so it +// can be dispatched again. Returns the updated task, or nil if the task has +// exhausted its retries or does not exist. +func (s *Store) RetryTask(taskID string) (*TeamTask, error) { + var task TeamTask + if err := s.db.First(&task, "id = ?", taskID).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, nil + } + return nil, err + } + if task.RetryCount >= task.MaxRetries { + return nil, nil + } + updates := map[string]any{ + "retry_count": task.RetryCount + 1, + "status": TaskStatusAssigned, + "error_message": "", + "claimed_at": nil, + "started_at": nil, + } + if err := s.db.Model(&TeamTask{}).Where("id = ?", taskID).Updates(updates).Error; err != nil { + return nil, err + } + task.RetryCount++ + task.Status = TaskStatusAssigned + return &task, nil +} + +// ClaimTask atomically transitions a task from pending/assigned → claimed. +func (s *Store) ClaimTask(taskID, memberID string) error { + now := time.Now() + result := s.db.Model(&TeamTask{}). + Where("id = ? AND status IN ?", taskID, []string{TaskStatusPending, TaskStatusAssigned}). + Updates(map[string]any{ + "assigned_member_id": memberID, + "status": TaskStatusClaimed, + "claimed_at": now, + }) + if result.Error != nil { + return result.Error + } + if result.RowsAffected == 0 { + return errors.New("task not available for claiming") + } + return nil +} + +// UnlockDependentTasks checks all tasks in the session that list completedTaskID +// in their Dependencies array. For each, if ALL its dependencies are now completed, +// the task is promoted to 'pending' and returned so the caller can notify the +// assigned machine. +func (s *Store) UnlockDependentTasks(sessionID, completedTaskID string) ([]TeamTask, error) { + // Step 1: tasks that mention completedTaskID in their dependencies array + var candidates []TeamTask + if err := s.db. + Where("session_id = ? AND ? = ANY(dependencies)", sessionID, completedTaskID). + Find(&candidates).Error; err != nil { + return nil, err + } + if len(candidates) == 0 { + return nil, nil + } + + // Step 2: for each candidate, verify ALL dependencies are completed + var unlocked []TeamTask + for _, task := range candidates { + if task.Status != TaskStatusPending && task.Status != TaskStatusAssigned { + continue // only unlock tasks that are still waiting + } + if len(task.Dependencies) == 0 { + continue + } + + var blockedCount int64 + if err := s.db.Model(&TeamTask{}). + Where("id = ANY(?) AND status != ?", task.Dependencies, TaskStatusCompleted). + Count(&blockedCount).Error; err != nil { + return nil, err + } + if blockedCount > 0 { + continue // still has incomplete dependencies + } + + // All dependencies done — promote to pending + if err := s.db.Model(&TeamTask{}). + Where("id = ?", task.ID). + Update("status", TaskStatusPending).Error; err != nil { + return nil, err + } + task.Status = TaskStatusPending + unlocked = append(unlocked, task) + } + return unlocked, nil +} + +// ─── Approval ────────────────────────────────────────────────────────────── + +func (s *Store) CreateApproval(a *TeamApprovalRequest) error { + return s.db.Create(a).Error +} + +func (s *Store) GetApproval(id string) (*TeamApprovalRequest, error) { + var a TeamApprovalRequest + err := s.db.First(&a, "id = ?", id).Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, nil + } + return &a, err +} + +func (s *Store) ListPendingApprovals(sessionID string) ([]TeamApprovalRequest, error) { + var approvals []TeamApprovalRequest + err := s.db.Where("session_id = ? AND status = ?", sessionID, "pending"). + Order("created_at ASC").Find(&approvals).Error + return approvals, err +} + +func (s *Store) UpdateApproval(id string, updates map[string]any) error { + return s.db.Model(&TeamApprovalRequest{}).Where("id = ?", id).Updates(updates).Error +} + +// ─── Repo Affinity ───────────────────────────────────────────────────────── + +func (s *Store) UpsertRepoAffinity(r *TeamRepoAffinity) error { + return s.db. + Where("session_id = ? AND member_id = ? AND repo_remote_url = ?", + r.SessionID, r.MemberID, r.RepoRemoteURL). + Assign(TeamRepoAffinity{ + RepoLocalPath: r.RepoLocalPath, + CurrentBranch: r.CurrentBranch, + HasUncommittedChanges: r.HasUncommittedChanges, + LastSyncedAt: r.LastSyncedAt, + }). + FirstOrCreate(r).Error +} + +func (s *Store) ListReposByURL(sessionID, repoRemoteURL string) ([]TeamRepoAffinity, error) { + var affinities []TeamRepoAffinity + err := s.db.Where("session_id = ? AND repo_remote_url = ?", sessionID, repoRemoteURL). + Order("last_synced_at DESC").Find(&affinities).Error + return affinities, err +} + +func (s *Store) ListReposByMember(sessionID, memberID string) ([]TeamRepoAffinity, error) { + var affinities []TeamRepoAffinity + err := s.db.Where("session_id = ? AND member_id = ?", sessionID, memberID).Find(&affinities).Error + return affinities, err +} + +// ─── Progress ────────────────────────────────────────────────────────────── + +// TeammateProgress holds per-machine task counters for the progress view. +type TeammateProgress struct { + MemberID string `json:"memberId"` + MachineName string `json:"machineName"` + CurrentTaskID *string `json:"currentTaskId,omitempty"` + Completed int64 `json:"completed"` + Failed int64 `json:"failed"` + Running int64 `json:"running"` +} + +// SessionProgress aggregates task counts for a session, with per-member detail. +type SessionProgress struct { + TotalTasks int64 `json:"totalTasks"` + CompletedTasks int64 `json:"completedTasks"` + FailedTasks int64 `json:"failedTasks"` + RunningTasks int64 `json:"runningTasks"` + PendingTasks int64 `json:"pendingTasks"` + Teammates []TeammateProgress `json:"teammates"` +} + +func (s *Store) GetProgress(sessionID string) (*SessionProgress, error) { + // ── Aggregate counts ────────────────────────────────────────────────── + type statusCount struct { + Status string + Count int64 + } + var rows []statusCount + if err := s.db.Model(&TeamTask{}). + Select("status, count(*) as count"). + Where("session_id = ?", sessionID). + Group("status"). + Scan(&rows).Error; err != nil { + return nil, err + } + p := &SessionProgress{ + Teammates: []TeammateProgress{}, + } + for _, r := range rows { + p.TotalTasks += r.Count + switch r.Status { + case TaskStatusCompleted: + p.CompletedTasks = r.Count + case TaskStatusFailed: + p.FailedTasks = r.Count + case TaskStatusRunning: + p.RunningTasks = r.Count + case TaskStatusPending, TaskStatusAssigned: + p.PendingTasks += r.Count + } + } + + // ── Per-teammate breakdown ───────────────────────────────────────────── + type memberStatusCount struct { + MemberID string + Status string + Count int64 + } + var memberRows []memberStatusCount + if err := s.db.Model(&TeamTask{}). + Select("assigned_member_id as member_id, status, count(*) as count"). + Where("session_id = ? AND assigned_member_id IS NOT NULL", sessionID). + Group("assigned_member_id, status"). + Scan(&memberRows).Error; err != nil { + return nil, err + } + + // Build per-member map + type tmpProgress struct { + Completed int64 + Failed int64 + Running int64 + } + memberMap := make(map[string]*tmpProgress) + for _, r := range memberRows { + if memberMap[r.MemberID] == nil { + memberMap[r.MemberID] = &tmpProgress{} + } + switch r.Status { + case TaskStatusCompleted: + memberMap[r.MemberID].Completed = r.Count + case TaskStatusFailed: + memberMap[r.MemberID].Failed = r.Count + case TaskStatusRunning: + memberMap[r.MemberID].Running = r.Count + } + } + + // Fetch member names and running task IDs + if len(memberMap) > 0 { + memberIDs := make([]string, 0, len(memberMap)) + for id := range memberMap { + memberIDs = append(memberIDs, id) + } + var members []TeamSessionMember + s.db.Where("id IN ?", memberIDs).Find(&members) + memberNames := make(map[string]string, len(members)) + for _, m := range members { + memberNames[m.ID] = m.MachineName + } + + // Running task ID per member + var runningTasks []TeamTask + s.db.Select("id, assigned_member_id"). + Where("session_id = ? AND status = ? AND assigned_member_id IS NOT NULL", + sessionID, TaskStatusRunning). + Find(&runningTasks) + runningTaskMap := make(map[string]string) + for _, t := range runningTasks { + if t.AssignedMemberID != nil { + runningTaskMap[*t.AssignedMemberID] = t.ID + } + } + + for memberID, tmp := range memberMap { + tp := TeammateProgress{ + MemberID: memberID, + MachineName: memberNames[memberID], + Completed: tmp.Completed, + Failed: tmp.Failed, + Running: tmp.Running, + } + if taskID, ok := runningTaskMap[memberID]; ok { + tp.CurrentTaskID = &taskID + } + p.Teammates = append(p.Teammates, tp) + } + } + + return p, nil +} + +// ─── Scheduling support ────────────────────────────────────────────────── + +// ListAllRepoAffinities returns every repo affinity entry for a session. +func (s *Store) ListAllRepoAffinities(sessionID string) ([]TeamRepoAffinity, error) { + var affinities []TeamRepoAffinity + err := s.db.Where("session_id = ?", sessionID).Find(&affinities).Error + return affinities, err +} + +// GetMemberLoadInfo returns per-member load summary for scheduling. +func (s *Store) GetMemberLoadInfo(sessionID string) (map[string]MemberLoadInfo, error) { + type memberStatusCount struct { + MemberID string + Status string + Count int64 + } + var rows []memberStatusCount + if err := s.db.Model(&TeamTask{}). + Select("assigned_member_id as member_id, status, count(*) as count"). + Where("session_id = ? AND assigned_member_id IS NOT NULL", sessionID). + Group("assigned_member_id, status"). + Scan(&rows).Error; err != nil { + return nil, err + } + + result := make(map[string]MemberLoadInfo) + for _, r := range rows { + load := result[r.MemberID] + switch r.Status { + case TaskStatusRunning, TaskStatusClaimed: + load.RunningCount += int(r.Count) + case TaskStatusAssigned: + load.AssignedCount += int(r.Count) + case TaskStatusCompleted: + load.CompletedCount += r.Count + case TaskStatusFailed: + load.FailedCount += r.Count + } + result[r.MemberID] = load + } + + // Build ActiveRepoURLs per member from running/assigned tasks + var activeTasks []TeamTask + s.db.Select("id, assigned_member_id, repo_affinity"). + Where("session_id = ? AND status IN ? AND assigned_member_id IS NOT NULL", + sessionID, []string{TaskStatusRunning, TaskStatusAssigned, TaskStatusClaimed}). + Find(&activeTasks) + for _, t := range activeTasks { + if t.AssignedMemberID == nil { + continue + } + load := result[*t.AssignedMemberID] + if load.ActiveRepoURLs == nil { + load.ActiveRepoURLs = make(map[string]bool) + } + for _, url := range t.RepoAffinity { + load.ActiveRepoURLs[url] = true + } + result[*t.AssignedMemberID] = load + } + + return result, nil +} + +// GetRunningAssignedTasks returns all tasks that are currently running, +// assigned, or claimed. +func (s *Store) GetRunningAssignedTasks(sessionID string) ([]TeamTask, error) { + var tasks []TeamTask + err := s.db.Where("session_id = ? AND status IN ?", + sessionID, []string{TaskStatusRunning, TaskStatusAssigned, TaskStatusClaimed}). + Find(&tasks).Error + return tasks, err +} + +// UpdateMemberCapabilities persists a leader candidate's capability data. +func (s *Store) UpdateMemberCapabilities(memberID string, caps LeaderCapability) error { + updates := map[string]any{ + "cpu_idle_percent": caps.CPUIdlePercent, + "memory_free_mb": caps.MemoryFreeMB, + "rtt_ms": caps.RTTMs, + "heartbeat_success_rate": caps.HeartbeatSuccessRate, + } + if len(caps.RepoURLs) > 0 { + updates["reported_repo_urls"] = caps.RepoURLs + } + err := s.db.Model(&TeamSessionMember{}).Where("id = ?", memberID).Updates(updates).Error + if err == nil { + return nil + } + // Compatibility fallback for databases that haven't applied the migration yet. + // Missing capability columns should not break join/election flow. + if strings.Contains(err.Error(), "SQLSTATE 42703") || strings.Contains(err.Error(), "does not exist") { + return nil + } + return err +} + +// GetSessionTargetRepos returns the distinct set of repo URLs referenced by +// all tasks in the session (used for leader scoring context). +func (s *Store) GetSessionTargetRepos(sessionID string) ([]string, error) { + var tasks []TeamTask + if err := s.db.Select("repo_affinity"). + Where("session_id = ? AND repo_affinity IS NOT NULL", sessionID). + Find(&tasks).Error; err != nil { + return nil, err + } + seen := make(map[string]struct{}) + for _, t := range tasks { + for _, url := range t.RepoAffinity { + seen[url] = struct{}{} + } + } + result := make([]string, 0, len(seen)) + for url := range seen { + result = append(result, url) + } + return result, nil +} diff --git a/server/internal/team/types.go b/server/internal/team/types.go new file mode 100644 index 0000000..f0f0ba6 --- /dev/null +++ b/server/internal/team/types.go @@ -0,0 +1,118 @@ +package team + +import "github.com/gorilla/websocket" + +// CloudEvent is the unified event envelope for all team communication. +type CloudEvent struct { + EventID string `json:"eventId"` + Type string `json:"type"` + SessionID string `json:"sessionId"` + Timestamp int64 `json:"timestamp"` + Payload map[string]any `json:"payload,omitempty"` +} + +// WSConnection represents an active WebSocket connection from a client. +type WSConnection struct { + ID string + UserID string + MachineID string + SessionID string + Conn *websocket.Conn + Send chan []byte + Done chan struct{} + LastActivity int64 +} + +// Event types: Client → Cloud +const ( + EventSessionCreate = "session.create" + EventSessionJoin = "session.join" + EventTaskPlanSubmit = "task.plan.submit" + EventTaskClaim = "task.claim" + EventTaskProgress = "task.progress" + EventTaskComplete = "task.complete" + EventTaskFail = "task.fail" + EventDecomposeRequest = "decompose.request" + EventDecomposeResult = "decompose.result" + EventApprovalRequest = "approval.request" + EventApprovalRespond = "approval.respond" + EventMessageSend = "message.send" + EventRepoRegister = "repo.register" + EventExploreRequest = "explore.request" + EventExploreResult = "explore.result" + EventLeaderElect = "leader.elect" + EventLeaderHeartbeat = "leader.heartbeat" +) + +// Event types: Cloud → Client +const ( + EventTaskAssigned = "task.assigned" + EventTaskTerminate = "task.terminate" + EventTaskInterrupted = "task.interrupted" + EventApprovalPush = "approval.push" + EventApprovalResponse = "approval.response" + EventMessageReceive = "message.receive" + EventSessionUpdated = "session.updated" + EventTeammateStatus = "teammate.status" + EventLeaderElected = "leader.elected" + EventLeaderExpired = "leader.expired" + EventLeaderSnapshot = "leader.snapshot" + EventOrchestrateProgress = "orchestrate.progress" + EventError = "error" +) + +// Session statuses +const ( + SessionStatusActive = "active" + SessionStatusPaused = "paused" + SessionStatusCompleted = "completed" + SessionStatusFailed = "failed" +) + +// Member statuses +const ( + MemberStatusOnline = "online" + MemberStatusOffline = "offline" + MemberStatusBusy = "busy" +) + +// Member roles +const ( + MemberRoleLeader = "leader" + MemberRoleTeammate = "teammate" +) + +// Task statuses +const ( + TaskStatusPending = "pending" + TaskStatusAssigned = "assigned" + TaskStatusClaimed = "claimed" + TaskStatusRunning = "running" + TaskStatusCompleted = "completed" + TaskStatusFailed = "failed" + TaskStatusInterrupted = "interrupted" +) + +// WebSocket configuration defaults +const ( + DefaultWSReadBufferSize = 1024 + DefaultWSWriteBufferSize = 1024 + DefaultLeaderLockTTLSec = 30 + DefaultLeaderHeartbeatSec = 10 + DefaultEventBacklogTTLMin = 60 + DefaultEventLogMaxLen = 200 // max events kept in Redis list for replay + DefaultMaxConnectionsPerSession = 20 + DefaultSendChannelCapacity = 256 + WSPingIntervalSec = 30 + WSWriteWaitSec = 10 + WSPongWaitSec = 60 +) + +// Redis key patterns +const ( + redisKeyLeaderLock = "team:session:%s:leader_lock" + redisKeyFencingToken = "team:session:%s:fencing_token" + redisKeyPresence = "team:session:%s:presence:%s" + redisKeyBacklog = "team:session:%s:backlog:%s" + redisKeyEventLog = "team:session:%s:events" // recent event log for replay +) diff --git a/server/migrations/20260420162000_add_team_member_capability_columns.sql b/server/migrations/20260420162000_add_team_member_capability_columns.sql new file mode 100644 index 0000000..ce97a23 --- /dev/null +++ b/server/migrations/20260420162000_add_team_member_capability_columns.sql @@ -0,0 +1,25 @@ +-- Add capability-related columns for CloudTeam member scoring/heartbeat. + +-- +goose Up +-- +goose StatementBegin + +ALTER TABLE team_session_members + ADD COLUMN IF NOT EXISTS cpu_idle_percent DOUBLE PRECISION NOT NULL DEFAULT 0, + ADD COLUMN IF NOT EXISTS memory_free_mb DOUBLE PRECISION NOT NULL DEFAULT 0, + ADD COLUMN IF NOT EXISTS rtt_ms DOUBLE PRECISION NOT NULL DEFAULT 0, + ADD COLUMN IF NOT EXISTS heartbeat_success_rate DOUBLE PRECISION NOT NULL DEFAULT 0, + ADD COLUMN IF NOT EXISTS reported_repo_urls TEXT[]; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +ALTER TABLE team_session_members + DROP COLUMN IF EXISTS reported_repo_urls, + DROP COLUMN IF EXISTS heartbeat_success_rate, + DROP COLUMN IF EXISTS rtt_ms, + DROP COLUMN IF EXISTS memory_free_mb, + DROP COLUMN IF EXISTS cpu_idle_percent; + +-- +goose StatementEnd