diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4ed0490..6ba2807 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,9 +2,9 @@ name: CI on: push: - branches: [main, develop] + branches: [main, develop, 'release/**'] pull_request: - branches: [main, develop] + branches: [main, develop, 'release/**'] env: CARGO_TERM_COLOR: always diff --git a/CHANGELOG.md b/CHANGELOG.md index b326ae4..e99b10b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,58 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] + +## [0.4.0] - 2026-05-04 + +### Added +- **Bitbucket Cloud forge** — full PR lifecycle support (create, list, view, + merge, comments). New tracker/forge entries `bitbucket` selectable via + `parsec config` and `[forge]` settings (#240). +- **Bitbucket Pipelines CI integration** — `parsec ci` and `pr-status` + commands now report Bitbucket Pipelines build state alongside GitHub + Actions and GitLab CI (#279). +- **`parsec compress` command** — squash a stack of related commits into a + single tidy commit before shipping, preserving co-author trailers (#236). +- **`parsec ship --template`** — auto-populate the PR description from a + repository's `.github/PULL_REQUEST_TEMPLATE.md` (or first match under + `.github/PULL_REQUEST_TEMPLATE/`) (#233). +- **`ship --reviewer` and `--label`** — attach reviewers and labels at PR + creation time (#261). +- **Stack `--submit`** — open all PRs in a stack in one command (#261). +- **Stack navigation comments** — auto-posted "← prev / next →" comments on + every PR in a stack so reviewers can walk the chain (#234). +- **`ship.draft` config + `--draft` flag** — open PRs as drafts by default + when working in throwaway / WIP branches (#238). +- **`[worktree]` shared build cache** — `shared_cache` and `cache_strategy` + settings let new worktrees reuse `target/`, `node_modules/`, `.venv/`, etc. + from the main repo via symlink (default) or recursive copy, eliminating + cold-build cost on `parsec start` (#207). +- **Offline mode toggle** — `[behavior].offline` config and per-command + `--no-pr` / `--no-tracker` flags so parsec can operate without forge or + tracker connectivity (#237). +- **Observability lite** — every command run now has an execution ID and + step timing; opt in to JSONL export via `[observability]` settings for + tooling/agents to consume (#166). +- **Config JSON Schema + `parsec schema`** — schema published to + schemastore.org so editors auto-complete `parsec.toml`. The new + `parsec schema` subcommand emits the schema on demand (#239). +- **Windows CI coverage** — full test matrix on Windows runners (#257). +- 11 new integration tests across forge adapters and worktree paths (#278). + +### Changed +- README and reference docs updated to cover ship `--reviewer` / `--label`, + stack `--submit`, Bitbucket adapter, offline flags, build cache config, + and `parsec compress` (#265). + +### Fixed +- Windows UNC path issue (`\\?\` prefix) breaking worktree operations on + Windows hosts — resolved via the `dunce` crate (#263). + +### CI +- Trigger CI on `release/**` branches in addition to feature branches and + develop, so release-prep work is exercised before merge (#277). + ## [0.3.3] - 2026-04-22 ### Added diff --git a/Cargo.toml b/Cargo.toml index ed09c95..933c91f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "git-parsec" -version = "0.3.3" +version = "0.4.0" edition = "2021" authors = ["erishforG"] description = "Git worktree lifecycle manager — ticket to PR in one command. Parallel AI agent workflows with Jira & GitHub Issues integration." @@ -34,8 +34,10 @@ tokio = { version = "1", features = ["full"] } clap_mangen = "0.3" clap_complete = "4" dunce = "1" +uuid = { version = "1", features = ["v4"] } [dev-dependencies] assert_cmd = "2" predicates = "3" tempfile = "3" +mockito = "1" diff --git a/README.md b/README.md index 32a3ee5..6685b72 100644 --- a/README.md +++ b/README.md @@ -4,926 +4,111 @@ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) [![CI](https://github.com/erishforG/git-parsec/actions/workflows/ci.yml/badge.svg)](https://github.com/erishforG/git-parsec/actions) -> Git worktree lifecycle manager for parallel AI agent workflows - -**parsec** manages isolated git worktrees tied to tickets (Jira, GitHub Issues), enabling multiple AI agents or developers to work on the same repository in parallel without lock conflicts. +> **From ticket to PR. One command.** +> Git worktree lifecycle manager — isolated workspaces tied to your tickets, with stacked PRs, multi-forge support, and clean shipping. ![demo](demo.gif) -## What is parsec? - -**parsec** is a command-line tool (CLI) that automates the full lifecycle of git worktrees: create an isolated workspace from a ticket ID, work in parallel without lock conflicts, then push + create PR + clean up in one command. It integrates with **Jira**, **GitHub Issues**, and **GitLab Issues** for automatic ticket title lookup, and supports **GitHub** and **GitLab** for PR/MR creation. - -Unlike plain `git worktree`, parsec tracks workspace state, detects file conflicts across worktrees, provides operation history with undo, supports stacked PRs, and offers CI status monitoring — all from a single CLI. - ---- - -## Why parsec? - -### What changes day-to-day - -| What you do today | With parsec | -|---|---| -| `git checkout -b feat/xyz`, `git worktree add`, configure manually | `parsec start TICKET` | -| `git push`, `gh pr create`, `git worktree remove`, delete branch | `parsec ship TICKET` | -| Open GitHub web UI to check CI results | `parsec ci --watch` | -| Merge PR on GitHub, delete branch, clean local worktree | `parsec merge TICKET` | - -### Key metrics - -- **PR lead time**: Less time between starting a ticket and opening a PR — no setup friction, no stash management -- **Context switches eliminated**: Jump between tickets without losing state; each ticket lives in its own directory -- **Conflict prevention**: `parsec conflicts` catches cross-ticket file collisions before they become merge problems -- **0 `index.lock` conflicts**: Every worktree has its own `.git` index — no serialized writes - -### Use cases - -**Solo developer** — Work on several tickets in parallel without stashing. Ship complete features (push + PR + cleanup) with one command. See all in-flight work at a glance with `parsec list`. - -**Team** — View the active sprint as a Kanban board with `parsec board`. Detect which tickets touch the same files before review. Monitor all open PRs and their CI status from the terminal. - -**AI agent orchestration** — Run multiple coding agents on the same repo simultaneously. Every agent gets its own isolated worktree with no `index.lock` contention. Use `--json` on every command for structured output agents can parse directly. - ---- - -## The Problem - -Git uses a single working directory with a single `index.lock`. When multiple AI agents (or developers) try to work on the same repo simultaneously: - -- `git add/commit` operations collide on `.git/index.lock` -- Context switching between tasks requires stashing or committing WIP -- Worktrees exist but have poor lifecycle management -- No connection between tickets and working directories - -## The Solution - -```bash -# Create isolated workspaces for two tickets -$ parsec start PROJ-1234 --title "Add user authentication" -Created workspace for PROJ-1234 at /home/user/myapp.PROJ-1234 - Add user authentication - -$ parsec start PROJ-5678 --title "Fix payment timeout" -Created workspace for PROJ-5678 at /home/user/myapp.PROJ-5678 - Fix payment timeout - -# See all active workspaces -$ parsec list -╭───────────┬──────────────────────┬────────┬──────────────────┬──────────────────────────────╮ -│ Ticket │ Branch │ Status │ Created │ Path │ -├───────────┼──────────────────────┼────────┼──────────────────┼──────────────────────────────┤ -│ PROJ-1234 │ feature/PROJ-1234 │ active │ 2026-04-15 09:00 │ /home/user/myapp.PROJ-1234 │ -│ PROJ-5678 │ feature/PROJ-5678 │ active │ 2026-04-15 09:01 │ /home/user/myapp.PROJ-5678 │ -╰───────────┴──────────────────────┴────────┴──────────────────┴──────────────────────────────╯ - -# Check if any workspaces touch the same files -$ parsec conflicts -No conflicts detected. - -# Complete: push, create PR, and clean up in one step -$ parsec ship PROJ-1234 -Shipped PROJ-1234! - PR: https://github.com/org/repo/pull/42 - Workspace cleaned up. - -# Remove all remaining workspaces -$ parsec clean --all -Removed 1 worktree(s): - - PROJ-5678 -``` - -## Features - -- **Ticket-driven workspaces** -- Create worktrees named after Jira/GitHub Issues tickets -- **Zero-conflict parallelism** -- Each workspace has its own index, no lock contention -- **Conflict detection** -- Warns when multiple workspaces modify the same files -- **One-step shipping** -- `parsec ship` pushes, creates a GitHub PR or GitLab MR, and cleans up -- **Adopt existing branches** -- Import branches already in progress with `parsec adopt` -- **Attach to existing branches** -- Start a workspace from an existing local or remote branch with `--branch` -- **Operation history and undo** -- `parsec log` shows what happened, `parsec undo` reverts it -- **Keep branches fresh** -- `parsec sync` rebases or merges the latest base branch into any worktree -- **Agent-friendly output** -- `--json` flag on every command for machine consumption -- **Status dashboard** -- See all parallel work at a glance -- **Auto-cleanup** -- Remove worktrees for merged branches automatically -- **GitHub and GitLab** -- PR and MR creation for both platforms -- **Stacked PRs** -- Create dependent PR chains with `--on` and sync the entire stack -- **Sprint board view** -- See the active sprint as a Kanban board with `parsec board` -- **Environment diagnostics** -- `parsec doctor` validates your setup and shows what needs fixing -- **Pre-ship hooks** -- Run custom commands before shipping with configurable `[hooks]` pre_ship -- **Issue creation** -- Create GitHub/Jira issues and start worktrees in one step with `parsec create` -- **Release workflow** -- Merge, tag, and create GitHub Releases with `parsec release` -- **PR reviewers and labels** -- Assign reviewers and labels on ship with `--reviewer`/`--label` or config defaults -- **Stack submit** -- Ship an entire stack in topological order with `parsec stack --submit` -- **Cross-platform** -- Tested on Linux, macOS, and Windows CI - ---- - -## Impact - -### Concrete time savings - -The typical "start ticket, work, ship" flow goes from 5+ commands to 2: - -```bash -# Before parsec -git fetch origin -git checkout -b feature/PROJ-1234 origin/main -# ... open browser, look up Jira ticket title ... -git push -u origin feature/PROJ-1234 -gh pr create --title "Add user authentication" --base main -git checkout main -git worktree remove ../myapp.PROJ-1234 - -# With parsec: 2 commands, no browser -parsec start PROJ-1234 # fetches title from Jira automatically -parsec ship PROJ-1234 # push + PR + cleanup in one step -``` - -### Concrete risk reduction - -- **0 `index.lock` conflicts** — worktree isolation is physical; each workspace has its own `.git/index` -- **Conflict detection before it hurts** — `parsec conflicts` shows cross-worktree file overlap before any push -- **Undo for mistakes** — `parsec undo` reverses the last operation (start, ship, clean) - -### Token efficiency for AI agents - -Traditional AI agents waste tokens calling raw APIs. Each Jira or GitHub API call costs dozens of tokens for auth setup, pagination, and response parsing. **parsec packages git + tracker operations into single commands with structured output.** - -#### Before: Raw API Calls - ```bash -# Agent needs: sprint tickets + status + worktree info + PR status -# Step 1: Authenticate with Jira API -# Step 2: Find active sprint (GET /rest/agile/1.0/board/{id}/sprint?state=active) -# Step 3: Fetch sprint issues (GET /rest/agile/1.0/sprint/{id}/issue) -# Step 4: For each ticket, check local worktrees (git worktree list, parse output) -# Step 5: For each ticket, check PR status (GitHub API) -# → 5+ API calls, 100+ tokens, custom parsing logic +$ parsec start PROJ-1234 # creates worktree, fetches Jira title, sets up branch +$ parsec ship PROJ-1234 # pushes, opens PR, cleans worktree ``` -#### After: One parsec Command - -```bash -parsec board --json -# → Sprint + status-grouped tickets + worktree/PR flags in one structured JSON -``` - -#### Key benefits for AI agents - -| Capability | What it means | -|------------|---------------| -| `--json` on every command | Structured output AI can parse instantly | -| `parsec start` | git worktree + Jira fetch + state management in one call | -| `parsec board --json` | Sprint + tickets + worktree/PR status in one call | -| `parsec ship` | Push + PR creation + cleanup in one call | -| Env var defaults | Zero-arg commands after one-time setup | -| Conflict detection | AI agents can check before parallel edits | +That's the whole loop. Plain `git worktree` doesn't track state, doesn't talk to your tracker, doesn't open PRs, doesn't clean up. **parsec** does. --- -## Use Cases - -### Solo developer - -Work on multiple tickets in parallel without stashing or losing context. Each ticket lives in its own sibling directory, so switching is just `cd`. - -```bash -parsec start PROJ-1234 # new worktree from Jira ticket -parsec start PROJ-5678 # second worktree, works in parallel -cd $(parsec switch PROJ-1234) -# ... make changes, commit normally ... -parsec ship PROJ-1234 # push + PR + cleanup -``` - -### Team - -Keep the whole team's sprint visible from the terminal. Catch file conflicts before they become merge problems. Track all open PRs without leaving the shell. - -```bash -parsec board # sprint board: In Progress / In Review / Done -parsec conflicts # which tickets touch the same files? -parsec pr-status # CI and review state for all open PRs -parsec ci PROJ-1234 --watch # wait for CI to go green -``` - -### AI agent orchestration - -Run multiple coding agents on the same repo simultaneously. Each agent calls `parsec start` to get an isolated worktree, uses `--json` for structured output, and calls `parsec ship` when done. No `index.lock` contention, no custom shell parsing. - -```bash -# Agent 1 -parsec start PROJ-100 --json # isolated workspace, structured response - -# Agent 2 (same repo, same time) -parsec start PROJ-101 --json # separate worktree, no collision +## Why use it -# Coordinator -parsec conflicts --json # detect overlap before agents commit -parsec board --json # full sprint + PR status in one call -``` +- **No `index.lock` collisions** — every workspace has its own `.git/index`, so multiple developers (or AI agents) can run `git add` on the same repo at the same time. +- **One command per phase** — `start` (worktree + tracker fetch), `ship` (push + PR + cleanup), `merge` (merge + branch cleanup), `clean` (sweep merged branches). No web UI round-trips. +- **Stacked PRs that don't melt your brain** — `parsec start FOO --on BAR` chains workspaces; `parsec stack --submit` opens the whole stack in topological order with auto-posted "← prev / next →" navigation comments. +- **Cross-worktree conflict detection** — `parsec conflicts` flags files modified in two workspaces *before* anyone pushes. +- **Multi-forge** — GitHub, GitLab, Bitbucket Cloud. Multi-tracker — Jira, GitHub Issues, GitLab Issues, Bitbucket. +- **Agent-friendly** — `--json` on every command, structured error codes, JSONL execution log, headless/offline modes. --- -## Installation - -### Pre-built binaries (recommended) - -Download the latest release for your platform from [GitHub Releases](https://github.com/erishforG/git-parsec/releases): +## Install ```bash -# macOS (Apple Silicon) -curl -LO https://github.com/erishforG/git-parsec/releases/latest/download/parsec-{version}-aarch64-apple-darwin.tar.gz -tar xzf parsec-*-aarch64-apple-darwin.tar.gz -sudo mv parsec /usr/local/bin/ +# Homebrew / pre-built binary (recommended) +curl -LO https://github.com/erishforG/git-parsec/releases/latest/download/parsec-x86_64-unknown-linux-gnu.tar.gz +tar xzf parsec-*.tar.gz && sudo mv parsec /usr/local/bin/ -# macOS (Intel) -curl -LO https://github.com/erishforG/git-parsec/releases/latest/download/parsec-{version}-x86_64-apple-darwin.tar.gz - -# Linux (x86_64) -curl -LO https://github.com/erishforG/git-parsec/releases/latest/download/parsec-{version}-x86_64-unknown-linux-gnu.tar.gz - -# Windows — download .zip from the Releases page -``` - -### Via Cargo - -```bash +# Cargo (Rust toolchain required) cargo install git-parsec ``` -### Build from source - -```bash -git clone https://github.com/erishforG/git-parsec.git -cd git-parsec -cargo build --release -# Binary at ./target/release/parsec -``` - -## Quick Start - -```bash -# 1. (Optional) Run interactive setup -$ parsec config init - -# 2. Start work on a ticket -$ parsec start PROJ-1234 --title "Add rate limiting" -Created workspace for PROJ-1234 at /home/user/myapp.PROJ-1234 - Add rate limiting - - Tip: cd $(parsec switch PROJ-1234) - -# 3. Switch into the workspace -$ cd $(parsec switch PROJ-1234) - -# 4. Work, commit as normal... -$ git add . && git commit -m "Implement rate limiter" - -# 5. Start a second ticket in parallel -$ parsec start PROJ-5678 --title "Fix auth bug" - -# 6. Check for file conflicts across workspaces -$ parsec conflicts - -# 7. Ship when done -$ parsec ship PROJ-1234 -Shipped PROJ-1234! - PR: https://github.com/org/repo/pull/42 - Workspace cleaned up. - -# 8. See what happened -$ parsec log -``` - ---- - -## Command Reference - -| Command | What it does | -|---------|-------------| -| [`parsec start`](#parsec-start-ticket) | Create an isolated worktree for a ticket | -| [`parsec adopt`](#parsec-adopt-ticket) | Import an existing branch into parsec management | -| [`parsec list`](#parsec-list) | List all active parsec-managed worktrees | -| [`parsec status`](#parsec-status-ticket) | Show detailed status of a workspace | -| [`parsec ticket`](#parsec-ticket-ticket) | View ticket details from the configured tracker | -| [`parsec ship`](#parsec-ship-ticket) | Push, create PR/MR, and clean up in one step | -| [`parsec clean`](#parsec-clean) | Remove worktrees for merged branches | -| [`parsec conflicts`](#parsec-conflicts) | Detect files modified in more than one worktree | -| [`parsec switch`](#parsec-switch-ticket) | Print (or cd to) a ticket's worktree path | -| [`parsec log`](#parsec-log-ticket) | Show operation history | -| [`parsec undo`](#parsec-undo) | Undo the last parsec operation | -| [`parsec sync`](#parsec-sync-ticket) | Rebase/merge latest base branch into a worktree | -| [`parsec open`](#parsec-open-ticket) | Open PR or ticket page in browser | -| [`parsec pr-status`](#parsec-pr-status-ticket) | Check CI and review status of shipped PRs | -| [`parsec ci`](#parsec-ci-ticket---watch---all) | Check CI pipeline status for a PR | -| [`parsec merge`](#parsec-merge-ticket---rebase---no-wait---no-delete-branch) | Merge a PR from the terminal | -| [`parsec diff`](#parsec-diff-ticket---stat---name-only) | View changes vs base branch | -| [`parsec stack`](#parsec-stack---sync---submit) | View and manage stacked PR dependencies | -| [`parsec board`](#parsec-board) | Show sprint as a Kanban board | -| [`parsec init`](#parsec-init) | Install shell integration | -| [`parsec config`](#parsec-config) | Configure parsec | -| [`parsec doctor`](#parsec-doctor) | Validate environment and configuration | -| [`parsec create`](#parsec-create) | Create a new issue and optionally start a worktree | -| [`parsec new-issue`](#parsec-new-issue) | Create a new issue (alias with extra options) | -| [`parsec release`](#parsec-release-version) | Merge, tag, and create a GitHub Release | -| [`parsec rename`](#parsec-rename-ticket---new-ticket-id) | Re-ticket a workspace to a different ticket ID | +Other targets (macOS arm64/x86_64, Windows x86_64) ship on every release — see [Releases](https://github.com/erishforG/git-parsec/releases). After install, run `parsec config init` for the interactive first-time setup, then `parsec doctor` to validate. --- -### `parsec start ` - -Create an isolated worktree for a ticket. Fetches the ticket title from your configured tracker (Jira, GitHub Issues) or accepts a manual title. - -``` -parsec start [--base ] [--title "text"] [--on ] [--branch ] [--hook "cmd"] -``` - -| Option | Description | -|--------|-------------| -| `-b, --base ` | Base branch to create from (default: main/master) | -| `--title "text"` | Set ticket title manually, skip tracker lookup | -| `--on ` | Stack on another ticket's branch (for dependent PRs) | -| `--branch ` | Use an existing branch instead of creating a new one | -| `--hook "cmd"` | Run a command after worktree creation (one-off hook) | +## 60-second tour ```bash -# With Jira integration (title auto-fetched) -$ parsec start CL-2283 -Created workspace for CL-2283 at /home/user/myapp.CL-2283 - Implement rate limiting for API endpoints - - Tip: cd $(parsec switch CL-2283) - -# With manual title -$ parsec start 42 --title "Fix login redirect" -Created workspace for 42 at /home/user/myapp.42 - Fix login redirect - - Tip: cd $(parsec switch 42) - -# From a specific base branch -$ parsec start PROJ-99 --base release/2.0 - -# Attach to an existing branch (local or remote) -$ parsec start CL-2208 --branch feature/CL-2208 - -# Attach to a remote-only branch (auto-fetches and tracks) -$ parsec start CL-2208 --branch origin/feature/CL-2208 - -# Run a setup command after creation -$ parsec start PROJ-42 --hook "npm install" -``` - ---- - -### `parsec adopt ` - -Import an existing branch into parsec management. Useful when you started work before using parsec, or when taking over someone else's branch. - -``` -parsec adopt [--branch ] [--title "text"] -``` - -| Option | Description | -|--------|-------------| -| `-b, --branch ` | Branch to adopt (default: ``) | -| `--title "text"` | Set ticket title manually | - -```bash -# Adopt a branch matching the default prefix -$ parsec adopt PROJ-1234 -Adopted branch 'feature/PROJ-1234' as PROJ-1234 at /home/user/myapp.PROJ-1234 - -# Adopt a branch with a different name -$ parsec adopt PROJ-99 --branch fix/payment-timeout -Adopted branch 'fix/payment-timeout' as PROJ-99 at /home/user/myapp.PROJ-99 -``` - ---- - -### `parsec list` +# Pull a ticket from Jira / GitHub / GitLab / Bitbucket and start work +$ parsec start PROJ-1234 +✓ Worktree: ../myapp.PROJ-1234 + Title: Add rate limiting (fetched from Jira) + Branch: feature/PROJ-1234 -List all active parsec-managed worktrees. - -``` -parsec list [--full] [--no-pr] -``` - -```bash +# See everything in flight $ parsec list -╭────────┬────────────────┬────────┬──────────────────┬────────────────────────────╮ -│ Ticket │ Branch │ Status │ Created │ Path │ -├────────┼────────────────┼────────┼──────────────────┼────────────────────────────┤ -│ TEST-1 │ feature/TEST-1 │ active │ 2026-04-15 09:00 │ /home/user/myapp.TEST-1 │ -│ TEST-2 │ feature/TEST-2 │ active │ 2026-04-15 09:05 │ /home/user/myapp.TEST-2 │ -╰────────┴────────────────┴────────┴──────────────────┴────────────────────────────╯ - -# Show extended metadata per worktree -$ parsec list --full -╭────────┬────────────────┬────────┬──────────────┬──────────┬─────────────────────┬───────────┬────────────────────────────╮ -│ Ticket │ Branch │ Status │ Ahead/Behind │ Unpushed │ Last Commit │ Age │ Path │ -├────────┼────────────────┼────────┼──────────────┼──────────┼─────────────────────┼───────────┼────────────────────────────┤ -│ TEST-1 │ feature/TEST-1 │ active │ +3 / -0 │ 1 │ Add rate limiting │ 2h ago │ /home/user/myapp.TEST-1 │ -│ TEST-2 │ feature/TEST-2 │ active │ +1 / -2 │ 0 │ Fix auth redirect │ 30m ago │ /home/user/myapp.TEST-2 │ -╰────────┴────────────────┴────────┴──────────────┴──────────┴─────────────────────┴───────────┴────────────────────────────╯ - -$ parsec list --json -[{"ticket":"TEST-1","path":"/home/user/myapp.TEST-1","branch":"feature/TEST-1","base_branch":"main","created_at":"2026-04-15T09:00:00Z","ticket_title":"Add auth","status":"active"}] -``` - -| Option | Description | -|--------|-------------| -| `--full` | Show extended metadata (commits, divergence, last commit) | -| `--no-pr` | Skip PR status lookup (faster, works offline) | - ---- - -### `parsec status [ticket]` - -Show detailed status of a workspace. Shows all workspaces if no ticket is specified. - -``` -parsec status [ticket] -``` - -```bash -$ parsec status PROJ-1234 -────────────────────────────────────────────────── - Ticket: PROJ-1234 - Title: Add user authentication - Branch: feature/PROJ-1234 - Base: main - Status: active - Created: 2026-04-15 09:00 UTC - Path: /home/user/myapp.PROJ-1234 -────────────────────────────────────────────────── -``` - ---- - -### `parsec ticket [ticket]` - -View ticket details from the configured tracker. Auto-detects the ticket from the current worktree if no argument is given. - -``` -parsec ticket [ticket] -``` - -```bash -# Auto-detect from current worktree -$ parsec ticket -CL-2283: Implement rate limiting for API endpoints - Status: In Progress - Assignee: eric.signal - URL: https://jira.example.com/browse/CL-2283 - -# Explicit ticket -$ parsec ticket CL-2283 - -# JSON output -$ parsec ticket CL-2283 --json -{"id":"CL-2283","title":"Implement rate limiting","status":"In Progress","assignee":"eric.signal","url":"https://jira.example.com/browse/CL-2283"} -``` - ---- - -### `parsec ship ` - -Push the branch, create a PR (GitHub) or MR (GitLab), and clean up the worktree. The forge is auto-detected from the remote URL. - -``` -parsec ship [--draft] [--no-pr] [--base ] [--skip-hooks] [--reviewer ]... [--label ]... -``` - -| Option | Description | -|--------|-------------| -| `--draft` | Create the PR/MR as a draft | -| `--no-pr` | Push only, skip PR/MR creation | -| `--base ` | Target base branch for PR (overrides config `default_base` and worktree base) | -| `--skip-hooks` | Skip pre-ship hooks defined in config | -| `-r, --reviewer ` | Request review from a GitHub user (repeatable) | -| `-l, --label ` | Add a label to the PR (repeatable) | - -```bash -# Push + PR + cleanup -$ parsec ship PROJ-1234 -Shipped PROJ-1234! - PR: https://github.com/org/repo/pull/42 - Workspace cleaned up. - -# Draft PR -$ parsec ship PROJ-5678 --draft - -# Push only, no PR -$ parsec ship PROJ-9000 --no-pr - -# Ship with reviewers and labels -$ parsec ship PROJ-1234 --reviewer alice --reviewer bob --label "needs-review" -``` - -Reviewers and labels can also be set as defaults in config: - -```toml -[ship] -default_reviewers = ["alice", "bob"] -default_labels = ["team-backend"] -``` - -Token required: set `PARSEC_GITHUB_TOKEN` (or `GITHUB_TOKEN`, `GH_TOKEN`) for GitHub, or `PARSEC_GITLAB_TOKEN` (or `GITLAB_TOKEN`) for GitLab. - ---- - -### `parsec clean` - -Remove worktrees whose branches have been merged. Use `--all` to remove everything. - -``` -parsec clean [--all] [--dry-run] -``` - -| Option | Description | -|--------|-------------| -| `--all` | Remove all worktrees, including unmerged | -| `--dry-run` | Preview what would be removed | - -```bash -# Preview first -$ parsec clean --dry-run -Would remove 1 worktree(s): - - PROJ-1234 - -# Remove merged worktrees -$ parsec clean -Removed 1 worktree(s): - - PROJ-1234 - -# Remove everything -$ parsec clean --all -Removed 3 worktree(s): - - PROJ-1234 - - PROJ-5678 - - PROJ-9000 -``` - ---- - -### `parsec conflicts` - -Detect files modified in more than one active worktree. Workspaces with no changes are skipped. - -``` -parsec conflicts -``` - -```bash -# No conflicts -$ parsec conflicts -No conflicts detected. - -# Conflicts found -$ parsec conflicts -╭──────────────────┬──────────────────────╮ -│ File │ Worktrees │ -├──────────────────┼──────────────────────┤ -│ src/api/router.rs│ PROJ-1234, PROJ-5678 │ -╰──────────────────┴──────────────────────╯ -``` - ---- - -### `parsec switch [ticket]` - -Print the absolute path to a ticket's worktree. When called without a ticket, shows an interactive picker. Designed for `cd $(parsec switch ...)`. - -``` -parsec switch [ticket] -``` - -```bash -# Direct switch +╭───────────┬───────────────────┬────────┬─────────────────────────╮ +│ Ticket │ Branch │ Status │ Path │ +├───────────┼───────────────────┼────────┼─────────────────────────┤ +│ PROJ-1234 │ feature/PROJ-1234 │ active │ ../myapp.PROJ-1234 │ +│ PROJ-5678 │ feature/PROJ-5678 │ active │ ../myapp.PROJ-5678 │ +╰───────────┴───────────────────┴────────┴─────────────────────────╯ + +# Hop in (shell integration auto-cd's) $ parsec switch PROJ-1234 -/home/user/myapp.PROJ-1234 - -# Interactive picker (no argument) -$ parsec switch -? Switch to workspace › -❯ PROJ-1234 — Add user authentication - PROJ-5678 — Fix payment timeout - -# Use with cd -$ cd $(parsec switch PROJ-1234) -``` - ---- - -### `parsec log [ticket]` - -Show the history of parsec operations. Each mutating command (start, adopt, ship, clean, undo) is recorded with a timestamp. - -``` -parsec log [ticket] [-n, --last N] -``` - -| Option | Description | -|--------|-------------| -| `[ticket]` | Filter to a specific ticket | -| `-n, --last N` | Show last N entries (default: 20) | - -```bash -$ parsec log -╭───┬───────┬───────────┬───────────────────────────────────────────────┬──────────────────╮ -│ # │ Op │ Ticket │ Detail │ Time │ -├───┼───────┼───────────┼───────────────────────────────────────────────┼──────────────────┤ -│ 4 │ clean │ PROJ-5678 │ Cleaned workspace for branch 'feature/5678' │ 2026-04-15 14:30 │ -│ 3 │ ship │ PROJ-1234 │ Shipped branch 'feature/PROJ-1234' │ 2026-04-15 14:02 │ -│ 2 │ start │ PROJ-5678 │ Created workspace at /home/user/myapp.5678 │ 2026-04-15 13:55 │ -│ 1 │ start │ PROJ-1234 │ Created workspace at /home/user/myapp.1234 │ 2026-04-15 09:14 │ -╰───┴───────┴───────────┴───────────────────────────────────────────────┴──────────────────╯ - -# Filter by ticket -$ parsec log PROJ-1234 - -# Last 3 entries only -$ parsec log --last 3 -``` - ---- - -### `parsec undo` - -Undo the last parsec operation. - -- Undo `start` or `adopt`: removes the worktree and deletes the branch -- Undo `ship` or `clean`: re-creates the worktree from the branch (if still available locally or on remote) - -``` -parsec undo [--dry-run] -``` - -| Option | Description | -|--------|-------------| -| `--dry-run` | Preview what would be undone | - -```bash -# Preview -$ parsec undo --dry-run -Would undo: start PROJ-5678 - Would remove worktree at /home/user/myapp.PROJ-5678 - Would delete branch 'feature/PROJ-5678' - -# Execute -$ parsec undo -Undid start for PROJ-5678 - Worktree removed. - -# Nothing to undo -$ parsec undo -Error: nothing to undo. Run `parsec log` to see operation history. -``` - ---- - -### `parsec sync [ticket]` - -Fetch the latest base branch and rebase (or merge) the worktree on top. Detects the current worktree automatically when no ticket is given. - -``` -parsec sync [ticket] [--all] [--strategy rebase|merge] -``` - -| Option | Description | -|--------|-------------| -| `--all` | Sync all active worktrees | -| `--strategy` | `rebase` (default) or `merge` | -```bash -# Sync current worktree -$ parsec sync -✓ rebase 1 worktree(s): - - PROJ-1234 - -# Sync a specific worktree -$ parsec sync PROJ-5678 - -# Sync all worktrees at once -$ parsec sync --all - -# Use merge instead of rebase -$ parsec sync --strategy merge -``` - ---- - -### `parsec open ` - -Open the associated PR/MR or ticket tracker page in your default browser. If the ticket has been shipped, opens the PR by default; otherwise opens the tracker page. - -``` -parsec open [--pr] [--ticket-page] -``` - -| Option | Description | -|--------|-------------| -| `--pr` | Force open the PR/MR page | -| `--ticket-page` | Force open the ticket tracker page | - -```bash -# Open PR if shipped, otherwise ticket page -$ parsec open PROJ-1234 -Opening https://github.com/org/repo/pull/42 - -# Force open the Jira ticket -$ parsec open PROJ-1234 --ticket-page -Opening https://yourcompany.atlassian.net/browse/PROJ-1234 - -# Force open the PR -$ parsec open PROJ-1234 --pr -Opening https://github.com/org/repo/pull/42 -``` - ---- - -### `parsec pr-status [ticket]` - -Check the CI and review status of shipped PRs. Shows CI check results, review approvals, and merge state in a color-coded table. - -``` -parsec pr-status [ticket] -``` - -```bash -# Check a specific ticket's PR -$ parsec pr-status PROJ-1234 -┌───────────┬─────┬────────┬──────────┬──────────────┐ -│ Ticket │ PR │ State │ CI │ Reviews │ -├───────────┼─────┼────────┼──────────┼──────────────┤ -│ PROJ-1234 │ #42 │ open │ ✓ passed │ ✓ approved │ -└───────────┴─────┴────────┴──────────┴──────────────┘ - -# Check all shipped PRs -$ parsec pr-status - -# JSON output -$ parsec pr-status PROJ-1234 --json -``` - -Requires: `PARSEC_GITHUB_TOKEN` (or `GITHUB_TOKEN`, `GH_TOKEN`) - ---- - -### `parsec ci [ticket] [--watch] [--all]` - -Check CI/CD pipeline status for a ticket's PR. Shows individual check runs with status, duration, and an overall summary. - -``` -parsec ci [ticket] [--watch] [--all] -``` - -| Option | Description | -|--------|-------------| -| `ticket` | Ticket identifier (auto-detects current worktree if omitted) | -| `--watch` | Poll CI every 5s until all checks complete | -| `--all` | Show CI for all shipped PRs | - -```bash -# Auto-detect from current worktree -$ parsec ci -CI for PROJ-1234 (PR #42, a1b2c3d) -┌────────────┬───────────┬──────────┐ -│ Check │ Status │ Duration │ -├────────────┼───────────┼──────────┤ -│ Tests │ ✓ passed │ 2m 15s │ -│ Build │ ✓ passed │ 1m 42s │ -│ Lint │ ● running │ running… │ -└────────────┴───────────┴──────────┘ -✓ CI: 2/3 — 2 passed, 1 running - -# Check a specific ticket -$ parsec ci PROJ-1234 +# Make commits the normal way, then ship — push + PR + cleanup in one shot +$ parsec ship PROJ-1234 +✓ Pushed feature/PROJ-1234 +✓ PR opened: github.com/org/repo/pull/42 +✓ Worktree cleaned up -# Watch mode — refreshes every 5s until done +# Watch CI without leaving the terminal $ parsec ci PROJ-1234 --watch -# All shipped PRs -$ parsec ci --all - -# JSON output -$ parsec ci PROJ-1234 --json -``` - -Requires: `PARSEC_GITHUB_TOKEN` (or `GITHUB_TOKEN`, `GH_TOKEN`) - ---- - -### `parsec merge [ticket] [--rebase] [--no-wait] [--no-delete-branch]` - -Merge a ticket's PR directly from the terminal. Waits for CI to pass before merging, then cleans up the local worktree. - -``` -parsec merge [ticket] [--rebase] [--no-wait] [--no-delete-branch] -``` - -| Option | Description | -|--------|-------------| -| `ticket` | Ticket identifier (auto-detects current worktree if omitted) | -| `--rebase` | Use rebase merge instead of squash (default: squash) | -| `--no-wait` | Skip CI check before merging | -| `--no-delete-branch` | Keep remote branch after merge | - -```bash -# Squash merge (default) +# Merge from terminal once CI is green $ parsec merge PROJ-1234 -Waiting for CI to pass... ✓ -Merged PR #42 for PROJ-1234! - Method: squash - SHA: a1b2c3d - -# Rebase merge -$ parsec merge PROJ-1234 --rebase - -# Skip CI wait -$ parsec merge PROJ-1234 --no-wait - -# JSON output -$ parsec merge PROJ-1234 --json ``` -Requires: `PARSEC_GITHUB_TOKEN` (or `GITHUB_TOKEN`, `GH_TOKEN`) - --- -### `parsec diff [ticket] [--stat] [--name-only]` - -View changes in a worktree compared to its base branch. Uses merge-base for accurate comparison. - -``` -parsec diff [ticket] [--stat] [--name-only] -``` - -| Option | Description | -|--------|-------------| -| `ticket` | Ticket identifier (auto-detects current worktree if omitted) | -| `--stat` | Show file-level summary only | -| `--name-only` | List changed file names only | +## Top features +### 🌿 Stacked PRs that don't melt your brain ```bash -# Full diff for current worktree -$ parsec diff - -# File summary -$ parsec diff PROJ-1234 --stat - -# Just file names -$ parsec diff --name-only - -# JSON output (changed files list) -$ parsec diff PROJ-1234 --json +$ parsec start PROJ-2 --on PROJ-1 # new worktree on top of PROJ-1's branch +$ parsec stack --submit # open all PRs in the stack, root first ``` +parsec auto-posts `← previous PR` / `next PR →` navigation comments so reviewers can walk the chain. ---- - -### `parsec stack [--sync] [--submit]` +### 🔄 Multi-forge, multi-tracker +- **Forges**: GitHub · GitLab · Bitbucket Cloud (full PR lifecycle on each) +- **Trackers**: Jira · GitHub Issues · GitLab Issues · Bitbucket +- **CI status**: GitHub Actions · GitLab CI · Bitbucket Pipelines -View and manage stacked PR dependencies. Worktrees created with `--on` form a dependency chain. PRs include a **stack navigation table** showing parent/child relationships. +`parsec ci` and `pr-status` work the same shape across all of them. -``` -parsec stack [--sync] [--submit] -``` - -| Option | Description | -|--------|-------------| -| `--sync` | Rebase the entire stack chain | -| `--submit` | Ship the entire stack in topological order (root first) | - -```bash -# Create a stack -$ parsec start PROJ-1 --title "Add models" -$ parsec start PROJ-2 --on PROJ-1 --title "Add API endpoints" -$ parsec start PROJ-3 --on PROJ-2 --title "Add frontend" +### 🤖 Agent-friendly by design +Every command has `--json`. Errors emit structured codes (E001…E013). `parsec log --export` outputs JSONL with execution IDs and per-step timing for tooling/agents to consume. `--offline` and `[behavior].offline` config skip all network ops for air-gapped or CI environments. -# View the dependency graph -$ parsec stack -Stack dependency graph: -└── PROJ-1 Add models - └── PROJ-2 Add API endpoints - └── PROJ-3 Add frontend +### 🧹 Lifecycle hygiene +`parsec clean` sweeps worktrees for already-merged branches. `parsec conflicts` flags cross-worktree file overlap before you push. `parsec undo` reverses the last operation (start, ship, clean). `parsec doctor` validates every part of your setup with actionable fix instructions. -# Sync the entire stack -$ parsec stack --sync +### 📂 Worktree build cache sharing +`[worktree].shared_cache = ["target", "node_modules", ".venv"]` lets new worktrees reuse the main repo's caches via symlink (default) or copy. Eliminates cold-build cost on `parsec start` for any project with significant dependency caches. -# Ship creates PRs with correct base branches -$ parsec ship PROJ-1 # PR to main -$ parsec ship PROJ-2 # PR to feature/PROJ-1 -$ parsec ship PROJ-3 # PR to feature/PROJ-2 +### 📋 Sprint board + issue creation +`parsec board` turns your active sprint into a Kanban board in the terminal. `parsec create` and `parsec new-issue` open issues in your tracker without leaving the shell. -# Or ship the entire stack at once -$ parsec stack --submit -Submitting stack (3 worktrees): - 1. PROJ-1 - 2. PROJ-2 - 3. PROJ-3 -Stack submit complete: 3/3 shipped -``` +> 27 commands total — see the [full command reference](https://erishforg.github.io/git-parsec/reference/) for every flag and example. Each PR body includes a stack navigation table: @@ -935,438 +120,85 @@ Each PR body includes a stack navigation table: --- -### `parsec board` - -Show the active sprint as a vertical board view. Fetches tickets from Jira grouped by status column, with worktree and PR indicators. - -``` -parsec board [--project ] [--board-id ] [--assignee ] [--all] -``` - -| Option | Description | -|--------|-------------| -| `-p, --project ` | Jira project key (default from env/config) | -| `--board-id ` | Jira board ID (auto-detected from project) | -| `--assignee ` | Filter by assignee (default from env/config) | -| `--all` | Show all tickets (ignore assignee filter) | - -```bash -# Show your tickets (with PARSEC_JIRA_ASSIGNEE configured) -$ parsec board - -26.04.06 ~ 26.04.20 - -In Progress (3) - CL-2283 [wt] 로그 분석 서비스 개발 - CL-2284 [wt] FDE 대시보드 관련 - CL-2291 반품 요청 API 개발 - -In Review (2) - CL-2281 [pr] ai 커피챗 준비 - CL-2280 이관 요청할 API 정리 - -# Show all team tickets -$ parsec board --all - -# JSON output for AI agents -$ parsec board --json -{"sprint":{"id":123,"name":"...","start":"...","end":"..."},"total_count":48,"columns":{"In Progress":[...],...}} -``` - -Defaults can be set via environment variables or config file (see below). - ---- - -### `parsec init` +## Configuration (minimal example) -Output or install shell integration for auto-cd on `parsec switch` and CWD recovery after `parsec merge`. - -``` -parsec init [shell] [--install] [--yes] -``` - -| Option | Description | -|--------|-------------| -| `shell` | Shell type: `zsh` (default) or `bash` | -| `--install` | Auto-append integration to shell config file | -| `-y, --yes` | Skip confirmation prompt (for scripting) | - -```bash -# Print the shell function (pipe to eval) -$ parsec init zsh - -# Auto-install into ~/.zshrc -$ parsec init --install -Add shell integration to /home/user/.zshrc? [Y/n] y -Shell integration added. Run `source ~/.zshrc` or restart your shell. - -# Non-interactive install -$ parsec init --install --yes -``` - ---- - -### `parsec config` - -```bash -# Interactive setup wizard -$ parsec config init +```toml +# ~/.config/parsec/config.toml +#:schema https://json.schemastore.org/parsec.json -# Show current configuration -$ parsec config show [workspace] - layout = sibling - base_dir = .parsec/workspaces - branch_prefix = feature/ +layout = "sibling" # ../repo.ticket/ (alt: "internal") +branch_prefix = "feature/" +offline = false # true = skip all network ops [tracker] - provider = jira - jira.base_url = https://yourcompany.atlassian.net - -[ship] - auto_pr = true - auto_cleanup = true - draft = false - # default_base = "develop" # Target branch for PRs (default: worktree base) - -# Output shell integration script -$ parsec config shell zsh - -# Generate shell completions -$ parsec config completions zsh - -# Install man page -$ sudo parsec config man -``` - ---- - -### `parsec doctor` - -Validate your environment and configuration. Prints ✓/✗ for each check with actionable fix instructions. - -```bash -$ parsec doctor -parsec doctor - ✓ git version 2.43.0 (worktree support ok) - ✓ config file found at ~/.config/parsec/config.toml - ✓ GitHub token configured (github.com) via gh auth token - ✗ shell integration not found in shell config - Add to ~/.zshrc: eval "$(parsec init zsh)" - ✗ tab completions not configured - Add to ~/.zshrc: eval "$(parsec config completions zsh)" - ✓ remote origin accessible - -2 check(s) failed. - -$ parsec doctor --json -{"checks":[...],"all_ok":false} -``` - -**AI agent mode** — output parsec workflow rules as a Markdown document for AI agents to consume: +provider = "jira" # jira | github | gitlab | bitbucket | none -```bash -$ parsec doctor --ai -# Outputs structured Markdown with workflow rules, command patterns, -# and best practices for AI agents using parsec -``` - ---- - -### `parsec create` - -Create a new issue on the configured tracker (GitHub Issues or Jira) and optionally start a worktree for it immediately. - -``` -parsec create --title "text" [--body "text"] [--label "a,b"] [--project KEY] [--start] -``` - -| Option | Description | -|--------|-------------| -| `--title "text"` | Issue title (required) | -| `--body "text"` | Issue body/description | -| `--label "a,b"` | Comma-separated labels | -| `-p, --project KEY` | Jira project key (auto-detected from config) | -| `--start` | Start a worktree after creation | - -```bash -# Create a GitHub issue -$ parsec create --title "Fix login redirect" --label "bug" -Created #145: Fix login redirect - https://github.com/org/repo/issues/145 - -# Create and immediately start working -$ parsec create --title "Add caching layer" --start -Created #146: Add caching layer -Created workspace for #146 at /home/user/myapp.146 -``` - ---- - -### `parsec new-issue` - -Create a new issue on the tracker (alias for `create` with additional options). Supports GitHub Issues and Jira with configurable issue type. - -``` -parsec new-issue --title "text" [--body "text"] [--label "a"] [--project KEY] [--issue-type TYPE] [--start] -``` - -| Option | Description | -|--------|-------------| -| `--title "text"` | Issue title (required) | -| `--body "text"` | Issue body/description | -| `--label "a"` | Labels (can be specified multiple times) | -| `-p, --project KEY` | Jira project key (auto-detected from config) | -| `--issue-type TYPE` | Jira issue type (default: Task) | -| `--start` | Auto-start a worktree for the new issue | - -```bash -# Create with issue type for Jira -$ parsec new-issue --title "Implement API caching" --issue-type Story --project CL - -# Multiple labels -$ parsec new-issue --title "Fix auth bug" --label bug --label priority -``` - ---- - -### `parsec release ` - -Create a release: merge develop to main, create a git tag, and optionally create a GitHub Release with auto-generated changelog. - -``` -parsec release [--from ] [--no-github-release] [--dry-run] -``` - -| Option | Description | -|--------|-------------| -| `` | Version string (e.g., "0.3.0") | -| `--from ` | Source branch to release from (default: develop) | -| `--no-github-release` | Skip creating GitHub Release | -| `--dry-run` | Show what would happen without making changes | - -```bash -# Full release -$ parsec release 0.3.0 -✓ Merged develop → main -✓ Tagged v0.3.0 -✓ GitHub Release created: https://github.com/org/repo/releases/tag/v0.3.0 - -# Dry run first -$ parsec release 0.4.0 --dry-run - -# Skip GitHub Release -$ parsec release 0.3.1 --no-github-release -``` - ---- - -### `parsec rename --new ` - -Re-ticket an existing workspace to a different ticket ID. Renames the branch and updates internal state. Useful when a ticket is split or re-assigned. - -``` -parsec rename --new -``` - -| Option | Description | -|--------|-------------| -| `--new ` | New ticket ID to assign (required) | - -```bash -# Re-ticket a workspace -$ parsec rename PROJ-100 --new PROJ-200 -Renamed PROJ-100 → PROJ-200 - Branch: feature/PROJ-100 → feature/PROJ-200 - Path: /home/user/myapp.PROJ-200 - -# JSON output -$ parsec rename PROJ-100 --new PROJ-200 --json -``` - ---- - -## Global Flags - -These flags work on every command: - -| Flag | Description | -|------|-------------| -| `--dry-run` | Preview what a command would do without making changes | -| `--json` | Machine-readable JSON output | -| `-q, --quiet` | Suppress non-essential output | -| `--repo ` | Target a different repository | - -```bash -$ parsec list --json -$ parsec ship PROJ-1234 --quiet -$ parsec status --repo /path/to/other-repo -``` - ---- - -## Shell Integration - -`parsec switch` prints a path but cannot `cd` for you. The shell integration wraps `parsec switch` so it changes your directory automatically: - -```bash -# Preferred: auto-install (appends to your shell config with confirmation) -$ parsec init --install -Add shell integration to /home/user/.zshrc? [Y/n] y -Shell integration added to /home/user/.zshrc. Run `source ~/.zshrc` or restart your shell. - -# Or with --yes for scripted setup -$ parsec init --install --yes +[tracker.jira] +base_url = "https://yourcompany.atlassian.net" +# Auth: PARSEC_JIRA_TOKEN env var -# Manual: add to ~/.zshrc yourself -eval "$(parsec init zsh)" +[ship] +auto_pr = true +auto_cleanup = true +draft = false # true = open PRs as drafts +# template = ".github/PULL_REQUEST_TEMPLATE.md" -# Or for bash -eval "$(parsec init bash)" +[worktree] +shared_cache = ["target", "node_modules", ".venv"] +cache_strategy = "symlink" # alt: "copy" ``` -After sourcing, `parsec switch ` will `cd` into the worktree directly: +**Auth tokens** (set via env vars, all optional): -```bash -$ parsec switch PROJ-1234 -# Now you're in /home/user/myapp.PROJ-1234 ``` - ---- - -## Shell Completions - -Generate tab-completion scripts for your shell: - -```bash -# Zsh — add to ~/.zshrc -eval "$(parsec config completions zsh)" - -# Bash — add to ~/.bashrc -eval "$(parsec config completions bash)" - -# Fish — add to ~/.config/fish/config.fish -parsec config completions fish | source - -# Other shells -parsec config completions elvish -parsec config completions powershell +PARSEC_JIRA_TOKEN PARSEC_GITHUB_TOKEN PARSEC_GITLAB_TOKEN +PARSEC_BITBUCKET_TOKEN GITHUB_TOKEN (fallback) GITLAB_TOKEN (fallback) +PARSEC_OFFLINE=1 — force offline mode globally ``` ---- - -## Man Page - -Install the man page so `man parsec` works: - -```bash -sudo parsec config man -# Man page installed to /usr/local/share/man/man1/parsec.1 - -# Custom directory -parsec config man --dir ~/.local/share/man -``` +Full schema and every option: `parsec config schema` (also published to [schemastore.org](https://schemastore.org)). --- -## Configuration - -Config file: `~/.config/parsec/config.toml` - -```toml -[workspace] -# "sibling" (default) creates worktrees next to repo: ../repo.ticket/ -# "internal" creates inside repo: .parsec/workspaces/ticket/ -layout = "sibling" -base_dir = ".parsec/workspaces" -branch_prefix = "feature/" - -[tracker] -# "jira" | "github" | "gitlab" | "none" -provider = "jira" +## Comparison -[tracker.jira] -base_url = "https://yourcompany.atlassian.net" -# Auth: PARSEC_JIRA_TOKEN or JIRA_PAT env var -# project = "CL" # Default project for board -# board_id = 123 # Default board ID -# assignee = "eric.signal" # Default assignee filter - -[tracker.gitlab] -base_url = "https://gitlab.com" -# Auth: PARSEC_GITLAB_TOKEN env var - -[ship] -auto_pr = true # Create PR/MR on ship -auto_cleanup = true # Remove worktree after ship -draft = false # Create PRs as drafts - -[hooks] -# Commands to run in new worktrees after creation -post_create = ["npm install"] -# Commands to run before shipping (pre-push hooks) -pre_ship = ["cargo test", "cargo clippy"] - -[release] -# branch = "main" # Target release branch (default: main) -# tag_prefix = "v" # Tag prefix (default: "v") -# changelog = true # Generate changelog in release notes - -[policy] -# protected_branches = ["main", "develop", "release/*"] # Branches that cannot be shipped to -# allowed_ship_targets = ["develop"] # Restrict PR target branches -# require_ci = false # Require CI pass before merge - -[tracker.auto_transition] -# on_start = "In Progress" # Transition when `parsec start` runs -# on_ship = "In Review" # Transition when `parsec ship` runs -# on_merge = "Done" # Transition when `parsec merge` runs -``` +| | parsec | GitButler | worktrunk | git worktree | git-town | +|---|---|---|---|---|---| +| Tracker integration | Jira + GitHub + GitLab + Bitbucket | — | — | — | — | +| Physical worktree isolation | ✅ | ❌ (virtual) | ✅ | ✅ | ❌ | +| Cross-worktree conflict detection | ✅ | n/a | ❌ | ❌ | ❌ | +| One-step ship (push + PR + cleanup) | ✅ | ❌ | ❌ | ❌ | ✅ | +| Forges | GitHub + GitLab + Bitbucket | Both | GitHub | — | GitHub, GitLab, Gitea, Bitbucket | +| CI integrations | Actions + GitLab CI + Bitbucket Pipelines | — | — | — | — | +| Operation log + undo | ✅ | ✅ | ❌ | ❌ | partial | +| JSON output | ✅ | ✅ | ❌ | ❌ | ❌ | +| Stacked PRs | ✅ | ✅ | ❌ | ❌ | ✅ | +| GUI | CLI only | Desktop + TUI | CLI | CLI | CLI | --- -## Environment Variables - -| Variable | Description | -|----------|-------------| -| `PARSEC_JIRA_TOKEN` | Jira API token (or personal access token) | -| `JIRA_PAT` | Alternative Jira token variable | -| `JIRA_BASE_URL` | Jira URL (overrides config) | -| `PARSEC_GITHUB_TOKEN` | GitHub token for PR creation | -| `GITHUB_TOKEN` | Fallback GitHub token | -| `GH_TOKEN` | Fallback GitHub token | -| `PARSEC_GITLAB_TOKEN` | GitLab token for MR creation | -| `GITLAB_TOKEN` | Fallback GitLab token | -| `PARSEC_JIRA_PROJECT` | Default Jira project key for `board` | -| `PARSEC_JIRA_BOARD_ID` | Default Jira board ID for `board` | -| `PARSEC_JIRA_ASSIGNEE` | Default assignee filter for `board` | +## Documentation -Token priority: `PARSEC_*_TOKEN` > platform-specific variables. +- 📘 **[Getting Started Guide](https://erishforg.github.io/git-parsec/guide/)** — install, first ship, tracker config, recipes +- 📗 **[Command Reference](https://erishforg.github.io/git-parsec/reference/)** — every command, every flag, with examples +- 🌐 **[Project home](https://erishforg.github.io/git-parsec/)** — features tour and live demo --- -## Error Codes +## Error codes -When using `--json`, errors include a structured error code for programmatic handling: +Every command exits with a structured code. JSON output (`--json`) includes the same code: -| Code | Meaning | Exit Code | -|------|---------|-----------| -| E001 | No authentication token configured | 2 | -| E002 | CI checks failing | 4 | -| E003 | Merge conflicts detected | 3 | -| E004 | PR not mergeable | 5 | -| E005 | Workspace not found | 5 | -| E006 | Workspace already exists | 5 | -| E007 | No active workspaces | 5 | -| E008 | Pre-ship hook failed | 1 | -| E009 | Policy violation | 6 | -| E010 | PR not found | 5 | -| E011 | Tracker not configured | 2 | -| E012 | Ship partially completed | 1 | -| E013 | Cannot undo operation | 1 | +| | | | +|---|---|---| +| `E001` no auth token | `E005` workspace not found | `E009` policy violation | +| `E002` CI failing | `E006` workspace already exists | `E010` PR not found | +| `E003` merge conflict | `E007` no active workspaces | `E011` tracker not configured | +| `E004` PR not mergeable | `E008` pre-ship hook failed | `E012` ship partial | +| | | `E013` cannot undo | ```bash -# JSON error output example $ parsec ship PROJ-1234 --json 2>&1 {"error":{"code":"E001","message":"No GitHub token configured","hint":"Set PARSEC_GITHUB_TOKEN or run gh auth login"}} $ echo $? @@ -1375,62 +207,6 @@ $ echo $? --- -## Comparison with Alternatives - -| Feature | parsec | GitButler | worktrunk | git worktree | git-town | -|---------|--------|-----------|-----------|--------------|----------| -| Ticket tracker integration | Jira + GitHub Issues | No | No | No | No | -| Physical isolation | Yes (worktrees) | No (virtual branches) | Yes (worktrees) | Yes | No | -| Conflict detection | Cross-worktree | N/A | No | No | No | -| One-step ship (push+PR+clean) | Yes | No | No | No | Yes | -| GitHub + GitLab | Both | Both | GitHub | No | GitHub, GitLab, Gitea, Bitbucket | -| Operation history + undo | Yes | Yes | No | No | Yes (undo) | -| JSON output | Yes | Yes | No | No | No | -| CI monitoring | Yes (--watch) | No | No | No | No | -| Stacked PRs | Yes | Yes | No | No | Yes | -| Auto-cleanup merged | Yes | No | No | Manual | No | -| Post-create hooks | Yes | No | Yes | No | No | -| Issue creation from CLI | Yes | No | No | No | No | -| AI token efficiency | Single-command ops | N/A | N/A | N/A | N/A | -| GUI | CLI only | Desktop + TUI | CLI | CLI | CLI | -| Zero config start | Yes | No | Yes | No | No | - ---- - -## FAQ - -**How do I set up parsec with Jira?** -Set `PARSEC_JIRA_TOKEN` (or `JIRA_PAT`) and configure `[tracker.jira]` in `~/.config/parsec/config.toml` with your `base_url` and `project`. Run `parsec config init` for interactive setup, or `parsec doctor` to validate. - -**How do I set up parsec with GitHub Issues?** -Set `provider = "github"` under `[tracker]` in your config. Authentication uses `PARSEC_GITHUB_TOKEN`, `GITHUB_TOKEN`, `GH_TOKEN`, or `gh auth token` automatically. - -**Does parsec support GitLab?** -Yes. parsec supports GitLab for both issue tracking and MR creation. Set `provider = "gitlab"` and configure `[tracker.gitlab]` with your `base_url`. Set `PARSEC_GITLAB_TOKEN` for authentication. - -**Can I use parsec without a ticket tracker?** -Yes. Set `provider = "none"` or use `--title` with `parsec start` to skip tracker lookup entirely. - -**How do stacked PRs work?** -Use `parsec start CHILD --on PARENT` to create dependent worktrees. `parsec ship` automatically sets the correct base branch. `parsec stack --sync` rebases the entire chain. - -**What happens if two worktrees modify the same file?** -`parsec conflicts` detects cross-worktree file overlap before you push. It compares changed files across all active worktrees and warns about collisions. - -**Can I undo a ship or clean?** -Yes. `parsec undo` reverses the last operation. For `ship`, it re-creates the worktree from the branch. For `clean`, it restores from the remote branch if still available. - -**How do I protect branches from accidental shipping?** -Add a `[policy]` section to your config with `protected_branches` and `allowed_ship_targets`. parsec will reject operations that violate these rules. - -**Does parsec work with GitHub Enterprise?** -Yes. parsec auto-detects GitHub Enterprise from the remote URL and routes API calls to the correct host. Token resolution is host-aware. - -**How do AI agents use parsec?** -Every command supports `--json` for structured output. Run `parsec doctor --ai` to get a Markdown document with workflow rules and command patterns optimized for AI agent consumption. - ---- - ## License -MIT +MIT — see [LICENSE](LICENSE). diff --git a/docs/guide/index.html b/docs/guide/index.html index fb60ee0..f73627d 100644 --- a/docs/guide/index.html +++ b/docs/guide/index.html @@ -3,16 +3,64 @@ - Guide — git-parsec - - + Getting Started Guide — git-parsec | Install, configure, ship in 5 minutes + + - + + - - + + + + + + + + + + + + + + + @@ -1019,33 +1067,6 @@ } .version-banner a:hover { color: var(--accent-cyan); } - - - - - @@ -1087,6 +1108,7 @@
  • AI Agent Workflows
  • Stacked PRs
  • New Features
  • +
  • Recipes & Examples
  • @@ -1428,7 +1450,7 @@

    Config file reference

    tracker.type Issue tracker backend. - "jira" / "github" / "gitlab" + "jira" / "github" / "gitlab" / "bitbucket" tracker.base_url @@ -1736,6 +1758,179 @@

    Pre-ship hooks

    + +
    +
    +

    Recipes & Examples

    + # +
    + +

    + End-to-end examples for the workflow patterns parsec is built around — Bitbucket Cloud setup, history compression, stacked PR navigation, PR templates, offline / headless mode, observability via JSONL, editor autocomplete via the JSON Schema, and worktree build cache sharing. Each recipe is self-contained — copy the snippets and adapt to your repo. +

    + +

    Bitbucket Cloud — full PR lifecycle

    +

    + parsec now speaks Bitbucket Cloud's API: parsec ship opens PRs, parsec pr-status reports CI from Bitbucket Pipelines, parsec ci tails build status, and parsec merge merges from the terminal. Tracker integration uses the same tracker.bitbucket config block. +

    + +
    +
    +
    + Bitbucket setup +
    +
    +# Auth via env var +$ export PARSEC_BITBUCKET_TOKEN="<app-password>" +  +# Configure in ~/.config/parsec/config.toml +[tracker] +provider = "bitbucket" +[tracker.bitbucket] +workspace = "my-team" +  +$ parsec ship CL-2208 + PR opened: bitbucket.org/my-team/repo/pull-requests/142 + Bitbucket Pipelines: BUILD #318 in_progress +
    +
    + +

    Compress branch history with parsec compress

    +

    + Squash a branch's commits into one tidy commit before shipping. Co-author trailers from squashed commits are preserved automatically. +

    + +
    +
    +
    + parsec compress +
    +
    +# Squash all branch commits into one +$ parsec compress + Compressed 7 commits into one on feature/PROJ-123 +  +# With a custom message +$ parsec compress -m "feat: add user authentication" +  +# Compose: tidy history then ship +$ parsec compress && parsec ship +
    +
    + +

    Stack navigation comments

    +

    + When you ship a stacked PR, parsec auto-posts "← previous PR" / "next PR →" navigation comments on every PR in the stack. Reviewers can walk the chain without leaving the PR view. +

    + +

    PR template auto-fill — ship --template

    +

    + Use the repository's .github/PULL_REQUEST_TEMPLATE.md (or the first match under .github/PULL_REQUEST_TEMPLATE/) as the PR description automatically. Combine with ship.template in config.toml to make it the default. +

    + +
    +
    +
    + ship --template +
    +
    +$ parsec ship PROJ-123 --template +Loaded .github/PULL_REQUEST_TEMPLATE.md (348 chars) + PR opened with template body +
    +
    + +

    Offline mode — --offline / [workspace].offline

    +

    + Skip all network operations: tracker lookups, PR creation, fetches. Use a global --offline flag, the PARSEC_OFFLINE=1 env var, or set offline = true under [workspace] in config.toml. Per-command escapes (--no-pr, --no-tracker) remain available for finer control. +

    + +
    +
    +
    + offline mode +
    +
    +# Per-invocation +$ parsec start CL-2208 --offline --title "Add login retry" +  +# Persistent — flight mode +[workspace] +offline = true +
    +
    + +

    Observability — execution IDs + JSONL export

    +

    + Every command run gets a unique execution ID and per-step timing. parsec log --export emits one JSON object per line for tooling and AI agents to consume. Combined with --json on individual commands, parsec is fully introspectable. +

    + +
    +
    +
    + parsec log --export +
    +
    +$ parsec log --export | jq 'select(.duration_ms > 1000)' +{ + "execution_id": "01HQ3D9V7Z2...", + "op": "ship", + "ticket": "PROJ-123", + "steps": [ + {"name":"push","ms":820}, + {"name":"create_pr","ms":1305}, + {"name":"cleanup","ms":42} + ], + "duration_ms": 2167 +} +
    +
    + +

    Config JSON Schema — editor autocomplete

    +

    + The schema for config.toml is published to schemastore.org, so VS Code, IntelliJ, Helix, and any editor with schemastore integration auto-complete and validate every key. parsec config schema emits the schema for offline use. +

    + +
    +
    +
    + config schema +
    +
    +$ parsec config schema > parsec-schema.json +  +# Pin schema in your config for editor support +#:schema https://json.schemastore.org/parsec.json +
    +
    + +

    Worktree build cache sharing — [worktree].shared_cache

    +

    + New worktrees can reuse target/, node_modules/, .venv/, etc. from the main repo via symlink (default) or recursive copy. Eliminates cold-build cost on parsec start for any project with significant dependency caches. +

    + +
    +
    +
    + [worktree] config +
    +
    +[worktree] +shared_cache = ["target", "node_modules", ".venv"] +# "symlink" (default) — fast, zero-disk; parallel build of same artifact may race +# "copy" — independent caches per worktree, no race risk, more disk +cache_strategy = "symlink" +
    +
    + +

    Draft-by-default — ship.draft

    +

    + Set [ship].draft = true in config.toml to open every PR as a draft, or pass --draft per ship. Useful for iterative WIP review flows where you want CI feedback before requesting human review. +

    +
    + diff --git a/docs/index.html b/docs/index.html index 3773010..a12bcf4 100644 --- a/docs/index.html +++ b/docs/index.html @@ -3,36 +3,52 @@ - git-parsec — Full-lifecycle worktree management - - + git-parsec — From ticket to PR. One command. | Git worktree lifecycle manager + + + - - + + + + + + + - - + + + + + + + + + + @@ -926,19 +973,6 @@ } .version-banner a:hover { color: var(--accent-cyan); } - - - - @@ -984,6 +1018,7 @@
  • create
  • new-issue
  • rename
  • +
  • compress
  • release
  • @@ -1065,6 +1100,7 @@

    Global Options — available on every command

    -q / --quiet suppress non-essential output
    --repo <PATH> target repository path
    --dry-run preview changes without executing
    +
    --offline skip all network ops (tracker, PR, fetch)
    @@ -1914,6 +1950,7 @@

    History

    --last <N>Show only the last N operations. + --exportEmit the log as JSONL (one JSON object per line). Each entry includes execution_id and per-step timing for observability/debugging by tooling and AI agents. @@ -1930,6 +1967,11 @@

    History

    2024-01-15 09:05 start PROJ-123 worktree created 2024-01-14 17:44 start PROJ-125 worktree created 2024-01-14 16:30 clean 3 worktrees removed +  +# JSONL export — one JSON object per line, with execution_id and per-step timing +$ parsec log --export +{"execution_id":"01HQ3D8R2K8...","op":"start","ticket":"PROJ-123","steps":[{"name":"fetch_title","ms":214},{"name":"create_worktree","ms":98}],"duration_ms":312} +{"execution_id":"01HQ3D9V7Z2...","op":"ship","ticket":"PROJ-123","steps":[{"name":"push","ms":820},{"name":"create_pr","ms":1305},{"name":"cleanup","ms":42}],"duration_ms":2167} @@ -2095,6 +2137,7 @@

    Setup

    showDisplay current configuration (redacts sensitive tokens). manOpen the parsec manual in your pager. completions <SHELL>Generate shell completion script for zsh, bash, or fish. + schemaOutput the JSON Schema for config.toml. The schema is also published to schemastore.org so editors auto-complete configuration files. shellDeprecated. Use parsec init <SHELL> instead. @@ -2118,6 +2161,9 @@

    Setup

      # Show current config $ parsec config show +  +# Output the JSON Schema (also at https://json.schemastore.org/parsec.json) +$ parsec config schema > parsec-schema.json @@ -2327,6 +2373,62 @@

    Setup

    + +
    +
    + compress + Squash all branch commits into one + # +
    +

    + Resets the branch to the merge-base with the base branch and re-commits all changes as a single commit. Co-author trailers from squashed commits are preserved. Useful before parsec ship to keep PR history tidy. +

    +
    + Usage + parsec compress [TICKET] [OPTIONS] +
    + +
    + + + + + + + +
    ArgumentDescription
    [TICKET]Optional. Auto-detects the current worktree's ticket if omitted.
    +
    + +
    + + + + + + + +
    OptionDescription
    -m, --message <TEXT>Custom commit message. Default: combines all squashed commit messages.
    +
    + +
    +
    +
    + parsec compress +
    +
    +# Compress current worktree's branch +$ parsec compress + Compressed 7 commits into one on feature/PROJ-1234 +  +# Compress with custom message +$ parsec compress PROJ-1234 -m "feat: add user authentication" +  +# Combine with ship +$ parsec compress && parsec ship +
    +
    +
    +
    diff --git a/docs/robots.txt b/docs/robots.txt index 4e53dda..891fa51 100644 --- a/docs/robots.txt +++ b/docs/robots.txt @@ -1,4 +1,27 @@ User-agent: * Allow: / +# Explicitly welcome major AI/LLM crawlers — git-parsec is open-source +# and benefits from being surfaced in AI-generated answers. +User-agent: GPTBot +Allow: / + +User-agent: ChatGPT-User +Allow: / + +User-agent: ClaudeBot +Allow: / + +User-agent: anthropic-ai +Allow: / + +User-agent: PerplexityBot +Allow: / + +User-agent: Google-Extended +Allow: / + +User-agent: CCBot +Allow: / + Sitemap: https://erishforg.github.io/git-parsec/sitemap.xml diff --git a/docs/sitemap.xml b/docs/sitemap.xml index 380a220..47968a2 100644 --- a/docs/sitemap.xml +++ b/docs/sitemap.xml @@ -2,22 +2,34 @@ https://erishforg.github.io/git-parsec/ - 2026-04-22 + 2026-05-04 weekly 1.0 https://erishforg.github.io/git-parsec/guide/ - 2026-04-22 + 2026-05-04 weekly 0.8 https://erishforg.github.io/git-parsec/reference/ - 2026-04-22 + 2026-05-04 weekly 0.8 + + https://erishforg.github.io/git-parsec/llms.txt + 2026-05-04 + weekly + 0.7 + + + https://erishforg.github.io/git-parsec/llms-full.txt + 2026-05-04 + weekly + 0.7 + https://erishforg.github.io/git-parsec/v/0.3.3/ 2026-04-23 diff --git a/schema/parsec-config.schema.json b/schema/parsec-config.schema.json new file mode 100644 index 0000000..e5ddf70 --- /dev/null +++ b/schema/parsec-config.schema.json @@ -0,0 +1,298 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "https://raw.githubusercontent.com/erishforG/git-parsec/main/schema/parsec-config.schema.json", + "title": "Parsec Configuration", + "description": "Configuration file for git-parsec CLI (https://github.com/erishforG/git-parsec)", + "type": "object", + "properties": { + "workspace": { + "type": "object", + "description": "Worktree layout and branch settings", + "properties": { + "layout": { + "type": "string", + "enum": ["sibling", "internal"], + "default": "sibling", + "description": "Worktree layout: sibling (../repo.ticket/) or internal (.parsec/workspaces/ticket/)" + }, + "base_dir": { + "type": "string", + "default": ".parsec/workspaces", + "description": "Base directory for internal layout worktrees" + }, + "branch_prefix": { + "type": "string", + "default": "feature/", + "description": "Prefix for new worktree branches" + }, + "default_base": { + "type": "string", + "description": "Default base branch for worktree creation (e.g. develop)" + }, + "offline": { + "type": "boolean", + "default": false, + "description": "When true, skip all network operations by default" + } + }, + "additionalProperties": false + }, + "worktree": { + "type": "object", + "description": "Build cache sharing for new worktrees", + "properties": { + "shared_cache": { + "type": "array", + "items": { "type": "string" }, + "default": [], + "description": "Directories to share from the main repo into new worktrees (e.g. target, node_modules, .venv)" + }, + "cache_strategy": { + "type": "string", + "enum": ["symlink", "copy"], + "default": "symlink", + "description": "How to share cache directories: symlink (fast, shared state) or copy (independent state)" + } + }, + "additionalProperties": false + }, + "tracker": { + "type": "object", + "description": "Issue tracker integration settings", + "properties": { + "provider": { + "type": "string", + "enum": ["none", "jira", "github", "gitlab"], + "default": "none", + "description": "Issue tracker provider" + }, + "jira": { + "type": "object", + "description": "Jira-specific configuration", + "properties": { + "base_url": { + "type": "string", + "description": "Jira base URL (e.g. https://yourorg.atlassian.net)" + }, + "email": { + "type": "string", + "description": "Jira account email" + }, + "project": { + "type": "string", + "description": "Default Jira project key" + }, + "board_id": { + "type": "integer", + "description": "Default Jira board ID" + }, + "assignee": { + "type": "string", + "description": "Default assignee for board/inbox filters" + }, + "token": { + "type": "string", + "description": "Jira API token (prefer PARSEC_JIRA_TOKEN env var)" + } + }, + "required": ["base_url"] + }, + "gitlab": { + "type": "object", + "description": "GitLab-specific configuration", + "properties": { + "base_url": { + "type": "string", + "description": "GitLab base URL (e.g. https://gitlab.com)" + } + }, + "required": ["base_url"] + }, + "auto_transition": { + "type": "object", + "description": "Automatic ticket status transitions", + "properties": { + "on_start": { + "type": "string", + "description": "Target status when running parsec start (e.g. In Progress)" + }, + "on_ship": { + "type": "string", + "description": "Target status when running parsec ship (e.g. In Review)" + }, + "on_merge": { + "type": "string", + "description": "Target status when running parsec merge (e.g. Done)" + } + }, + "additionalProperties": false + }, + "comment_on_ship": { + "type": "boolean", + "default": false, + "description": "Auto-post PR link as comment on the ticket during parsec ship" + } + }, + "additionalProperties": false + }, + "ship": { + "type": "object", + "description": "PR/MR creation settings", + "properties": { + "auto_pr": { + "type": "boolean", + "default": true, + "description": "Automatically open a PR when shipping" + }, + "auto_cleanup": { + "type": "boolean", + "default": true, + "description": "Clean up worktree after shipping" + }, + "draft": { + "type": "boolean", + "default": false, + "description": "Create PRs as drafts by default" + }, + "default_base": { + "type": "string", + "description": "Default target base branch for PRs" + }, + "default_reviewers": { + "type": "array", + "items": { "type": "string" }, + "default": [], + "description": "Default reviewers to request on PRs (GitHub usernames)" + }, + "default_labels": { + "type": "array", + "items": { "type": "string" }, + "default": [], + "description": "Default labels to apply to PRs" + } + }, + "additionalProperties": false + }, + "hooks": { + "type": "object", + "description": "Lifecycle hook commands", + "properties": { + "post_create": { + "type": "array", + "items": { "type": "string" }, + "default": [], + "description": "Commands to run after creating a worktree" + }, + "pre_ship": { + "type": "array", + "items": { "type": "string" }, + "default": [], + "description": "Commands to run before shipping a worktree" + } + }, + "additionalProperties": false + }, + "release": { + "type": "object", + "description": "Release workflow settings", + "properties": { + "branch": { + "type": "string", + "default": "main", + "description": "Target branch for release" + }, + "tag_prefix": { + "type": "string", + "default": "v", + "description": "Tag prefix (e.g. v for v0.3.0)" + }, + "changelog": { + "type": "boolean", + "default": true, + "description": "Auto-generate changelog for releases" + } + }, + "additionalProperties": false + }, + "policy": { + "type": "object", + "description": "Branch policy and guardrails", + "properties": { + "protected_branches": { + "type": "array", + "items": { "type": "string" }, + "default": [], + "description": "Branches that cannot be used as ship targets (supports glob with *)" + }, + "allowed_ship_targets": { + "type": "array", + "items": { "type": "string" }, + "default": [], + "description": "Branches allowed as ship targets (if empty, all non-protected are allowed)" + }, + "require_ci": { + "type": "boolean", + "default": false, + "description": "Require CI to pass before parsec merge" + } + }, + "additionalProperties": false + }, + "github": { + "type": "object", + "description": "Per-host GitHub tokens (keys are hostnames)", + "additionalProperties": { + "type": "object", + "properties": { + "token": { + "type": "string", + "description": "Personal access token for this GitHub host" + } + }, + "additionalProperties": false + } + }, + "repos": { + "type": "object", + "description": "Per-repo configuration overrides (keys are owner/repo)", + "additionalProperties": { + "type": "object", + "properties": { + "tracker": { + "type": "object", + "description": "Tracker overrides for this repo", + "properties": { + "provider": { + "type": "string", + "enum": ["none", "jira", "github", "gitlab"], + "description": "Override tracker provider for this repo" + }, + "jira": { + "type": "object", + "properties": { + "base_url": { "type": "string" }, + "email": { "type": "string" }, + "project": { "type": "string" }, + "board_id": { "type": "integer" }, + "assignee": { "type": "string" }, + "token": { "type": "string" } + }, + "required": ["base_url"] + }, + "gitlab": { + "type": "object", + "properties": { + "base_url": { "type": "string" } + }, + "required": ["base_url"] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false +} diff --git a/src/bitbucket/mod.rs b/src/bitbucket/mod.rs new file mode 100644 index 0000000..1466d6f --- /dev/null +++ b/src/bitbucket/mod.rs @@ -0,0 +1,752 @@ +//! Bitbucket Cloud REST API v2 integration. +//! +//! Provides PR creation, status, merge, CI pipeline monitoring, +//! and branch-based PR lookup for Bitbucket Cloud repositories. + +use std::time::Duration; + +use anyhow::{bail, Context, Result}; +use reqwest::Client; +use serde::Deserialize; + +// --------------------------------------------------------------------------- +// Data types +// --------------------------------------------------------------------------- + +/// Result of PR creation +#[derive(Debug, Clone)] +#[allow(dead_code)] +pub struct PrResult { + pub url: String, + pub id: u64, +} + +/// PR status information +#[derive(Debug, Clone)] +pub struct PrStatus { + pub id: u64, + pub title: String, + pub state: String, + pub url: String, +} + +/// Result of merging a PR +#[derive(Debug, Clone)] +pub struct MergeResult { + pub merged: bool, + pub message: String, +} + +/// A single pipeline step/result +#[derive(Debug, Clone)] +pub struct PipelineStatus { + pub name: String, + pub state: String, + pub result: Option, + pub url: Option, +} + +/// A PR participant (reviewer/commenter) used for review_status mapping. +#[derive(Debug, Clone)] +pub struct Participant { + /// Bitbucket review state: "approved", "changes_requested", or None. + pub state: Option, + /// Convenience boolean flag from the Bitbucket API. + pub approved: bool, + /// Role: "REVIEWER" or "PARTICIPANT". + pub role: Option, +} + +/// Parsed Bitbucket remote info +#[derive(Debug, Clone)] +pub struct BitbucketRemote { + pub workspace: String, + pub repo_slug: String, +} + +// --------------------------------------------------------------------------- +// API response types (private) +// --------------------------------------------------------------------------- + +#[derive(Deserialize)] +struct ApiPr { + id: Option, + title: Option, + state: Option, + links: Option, + #[serde(default)] + source: Option, + #[serde(default)] + participants: Option>, +} + +#[derive(Deserialize)] +struct ApiPrEndpoint { + branch: Option, +} + +#[derive(Deserialize)] +struct ApiBranch { + name: Option, +} + +#[derive(Deserialize)] +struct ApiParticipant { + #[serde(default)] + state: Option, + #[serde(default)] + approved: Option, + #[serde(default)] + role: Option, +} + +#[derive(Deserialize)] +struct ApiLinks { + html: Option, +} + +#[derive(Deserialize)] +struct ApiHref { + href: Option, +} + +#[derive(Deserialize)] +struct ApiPrList { + values: Option>, +} + +#[derive(Deserialize)] +#[allow(dead_code)] +struct ApiPipeline { + uuid: Option, + state: Option, + target: Option, +} + +#[derive(Deserialize)] +#[allow(dead_code)] +struct ApiPipelineState { + name: Option, + result: Option, +} + +#[derive(Deserialize)] +#[allow(dead_code)] +struct ApiPipelineResult { + name: Option, +} + +#[derive(Deserialize)] +#[allow(dead_code)] +struct ApiPipelineTarget { + ref_name: Option, +} + +#[derive(Deserialize)] +#[allow(dead_code)] +struct ApiPipelineList { + values: Option>, +} + +// --------------------------------------------------------------------------- +// Remote URL parsing +// --------------------------------------------------------------------------- + +/// Parse a Bitbucket Cloud remote URL into BitbucketRemote. +/// Supports SSH and HTTPS forms for bitbucket.org. +pub fn parse_bitbucket_remote(url: &str) -> Option { + // SSH: git@bitbucket.org:workspace/repo.git + if url.starts_with("git@bitbucket.org:") { + let path = url.strip_prefix("git@bitbucket.org:")?; + let path = path.trim_end_matches(".git"); + let mut parts = path.splitn(2, '/'); + let workspace = parts.next()?.to_owned(); + let repo_slug = parts.next()?.to_owned(); + return Some(BitbucketRemote { + workspace, + repo_slug, + }); + } + + // HTTPS: https://bitbucket.org/workspace/repo.git + let rest = url + .strip_prefix("https://bitbucket.org/") + .or_else(|| url.strip_prefix("http://bitbucket.org/"))?; + let path = rest.trim_end_matches(".git"); + let mut parts = path.splitn(2, '/'); + let workspace = parts.next()?.to_owned(); + let repo_slug = parts.next()?.to_owned(); + Some(BitbucketRemote { + workspace, + repo_slug, + }) +} + +/// Check if a remote URL is a Bitbucket Cloud URL. +pub fn is_bitbucket_remote(url: &str) -> bool { + url.contains("bitbucket.org") +} + +// --------------------------------------------------------------------------- +// BitbucketClient +// --------------------------------------------------------------------------- + +/// Default Bitbucket Cloud API base URL (without trailing slash). +const DEFAULT_API_BASE: &str = "https://api.bitbucket.org/2.0"; + +/// Authenticated Bitbucket Cloud API client. +pub struct BitbucketClient { + client: Client, + remote: BitbucketRemote, + token: String, + api_base: String, +} + +impl BitbucketClient { + /// Create a new client for the given remote URL. + /// Returns `Ok(None)` when no Bitbucket token is available or URL is not Bitbucket. + pub fn new(remote_url: &str) -> Result> { + if !is_bitbucket_remote(remote_url) { + return Ok(None); + } + + let remote = parse_bitbucket_remote(remote_url).ok_or_else(|| { + anyhow::anyhow!( + "could not parse workspace/repo from Bitbucket remote URL: {}", + remote_url + ) + })?; + + let token = match crate::env::bitbucket_token() { + Some(t) => t, + None => return Ok(None), + }; + + let client = Client::builder() + .timeout(Duration::from_secs(30)) + .connect_timeout(Duration::from_secs(10)) + .user_agent("git-parsec") + .build() + .context("failed to build HTTP client")?; + + let api_base = + crate::env::bitbucket_api_base().unwrap_or_else(|| DEFAULT_API_BASE.to_string()); + + Ok(Some(Self { + client, + remote, + token, + api_base, + })) + } + + /// Access the parsed remote info. + pub fn remote(&self) -> &BitbucketRemote { + &self.remote + } + + /// Repo API path prefix. + fn repo_url(&self) -> String { + format!( + "{}/repositories/{}/{}", + self.api_base, self.remote.workspace, self.remote.repo_slug + ) + } + + fn auth_get(&self, url: &str) -> reqwest::RequestBuilder { + self.client + .get(url) + .bearer_auth(&self.token) + .header("Accept", "application/json") + } + + fn auth_post(&self, url: &str) -> reqwest::RequestBuilder { + self.client + .post(url) + .bearer_auth(&self.token) + .header("Accept", "application/json") + .header("Content-Type", "application/json") + } + + // -- API methods --------------------------------------------------------- + + /// Create a pull request. + pub async fn create_pr( + &self, + branch: &str, + base: &str, + title: &str, + description: &str, + _draft: bool, // Bitbucket Cloud doesn't support draft PRs natively + ) -> Result { + let url = format!("{}/pullrequests", self.repo_url()); + + let payload = serde_json::json!({ + "title": title, + "description": description, + "source": { + "branch": { "name": branch } + }, + "destination": { + "branch": { "name": base } + }, + "close_source_branch": true + }); + + let response = self + .auth_post(&url) + .json(&payload) + .send() + .await + .context("Failed to send PR creation request to Bitbucket")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + bail!("Bitbucket API returned {}: {}", status, body); + } + + let pr: ApiPr = response + .json() + .await + .context("Failed to parse Bitbucket API response")?; + + let id = pr.id.unwrap_or(0); + let html_url = pr + .links + .and_then(|l| l.html) + .and_then(|h| h.href) + .unwrap_or_else(|| { + format!( + "https://bitbucket.org/{}/{}/pull-requests/{}", + self.remote.workspace, self.remote.repo_slug, id + ) + }); + + Ok(PrResult { url: html_url, id }) + } + + /// Find an open PR by source branch name. + pub async fn find_pr_by_branch(&self, branch: &str) -> Result> { + let url = format!( + "{}/pullrequests?q=source.branch.name=\"{}\" AND state=\"OPEN\"", + self.repo_url(), + branch + ); + + let response = self + .auth_get(&url) + .send() + .await + .context("Failed to query Bitbucket PRs")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + bail!("Bitbucket API returned {}: {}", status, body); + } + + let list: ApiPrList = response.json().await?; + Ok(list.values.and_then(|v| v.first().and_then(|pr| pr.id))) + } + + /// Get PR status by ID. + pub async fn get_pr_status(&self, pr_id: u64) -> Result { + let url = format!("{}/pullrequests/{}", self.repo_url(), pr_id); + + let response = self + .auth_get(&url) + .send() + .await + .context("Failed to fetch Bitbucket PR")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + bail!("Bitbucket API returned {}: {}", status, body); + } + + let pr: ApiPr = response.json().await?; + let id = pr.id.unwrap_or(pr_id); + let html_url = pr + .links + .and_then(|l| l.html) + .and_then(|h| h.href) + .unwrap_or_default(); + + Ok(PrStatus { + id, + title: pr.title.unwrap_or_default(), + state: pr.state.unwrap_or_else(|| "unknown".to_string()), + url: html_url, + }) + } + + /// Merge a PR. + pub async fn merge_pr(&self, pr_id: u64, strategy: &str) -> Result { + let url = format!("{}/pullrequests/{}/merge", self.repo_url(), pr_id); + + // Bitbucket merge strategies: merge_commit, squash, fast_forward + let bb_strategy = match strategy { + "squash" => "squash", + "rebase" => "fast_forward", + _ => "merge_commit", + }; + + let payload = serde_json::json!({ + "merge_strategy": bb_strategy, + "close_source_branch": true + }); + + let response = self + .auth_post(&url) + .json(&payload) + .send() + .await + .context("Failed to merge Bitbucket PR")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + bail!("Bitbucket merge failed ({}): {}", status, body); + } + + Ok(MergeResult { + merged: true, + message: format!("PR #{} merged via {}", pr_id, bb_strategy), + }) + } + + /// Get pipeline status for a branch. + pub async fn get_pipelines(&self, branch: &str) -> Result> { + let url = format!( + "{}/pipelines/?sort=-created_on&pagelen=5&target.ref_name={}", + self.repo_url(), + branch + ); + + let response = self + .auth_get(&url) + .send() + .await + .context("Failed to fetch Bitbucket pipelines")?; + + if !response.status().is_success() { + // Pipelines may not be enabled — return empty + return Ok(Vec::new()); + } + + let list: ApiPipelineList = response.json().await?; + let pipelines = list + .values + .unwrap_or_default() + .into_iter() + .map(|p| { + let state_name = p + .state + .as_ref() + .and_then(|s| s.name.clone()) + .unwrap_or_else(|| "unknown".to_string()); + let result_name = p + .state + .as_ref() + .and_then(|s| s.result.as_ref()) + .and_then(|r| r.name.clone()); + let uuid = p.uuid.unwrap_or_default(); + let ref_name = p + .target + .and_then(|t| t.ref_name) + .unwrap_or_else(|| branch.to_string()); + let pipeline_url = format!( + "https://bitbucket.org/{}/{}/pipelines/results/{}", + self.remote.workspace, + self.remote.repo_slug, + uuid.trim_matches(|c| c == '{' || c == '}') + ); + PipelineStatus { + name: format!("pipeline ({})", ref_name), + state: state_name, + result: result_name, + url: Some(pipeline_url), + } + }) + .collect(); + + Ok(pipelines) + } + + /// Get the latest pipeline (most recently created) for a branch, if any. + pub async fn get_latest_pipeline_for_branch( + &self, + branch: &str, + ) -> Result> { + Ok(self.get_pipelines(branch).await?.into_iter().next()) + } + + /// Fetch the source branch name for a PR. + pub async fn get_pr_source_branch(&self, pr_id: u64) -> Result> { + let url = format!("{}/pullrequests/{}", self.repo_url(), pr_id); + let response = self + .auth_get(&url) + .send() + .await + .context("Failed to fetch Bitbucket PR")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + bail!("Bitbucket API returned {}: {}", status, body); + } + + let pr: ApiPr = response.json().await?; + Ok(pr + .source + .and_then(|s| s.branch) + .and_then(|b| b.name) + .filter(|n| !n.is_empty())) + } + + /// Fetch participants for a PR. Returns an empty vec when the PR has no participants + /// or the API call fails (callers may interpret this as "unknown / pending"). + pub async fn get_pr_participants(&self, pr_id: u64) -> Result> { + let url = format!("{}/pullrequests/{}", self.repo_url(), pr_id); + let response = self + .auth_get(&url) + .send() + .await + .context("Failed to fetch Bitbucket PR")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + bail!("Bitbucket API returned {}: {}", status, body); + } + + let pr: ApiPr = response.json().await?; + Ok(pr + .participants + .unwrap_or_default() + .into_iter() + .map(|p| Participant { + state: p.state, + approved: p.approved.unwrap_or(false), + role: p.role, + }) + .collect()) + } +} + +// --------------------------------------------------------------------------- +// Pure mapping functions (forge-agnostic vocabulary) +// --------------------------------------------------------------------------- + +/// Map a Bitbucket pipeline (state + optional result) to the same `ci_status` +/// vocabulary that the GitHub path emits: `passing` | `failing` | `pending` +/// | `no checks` | `unknown`. +/// +/// Bitbucket pipeline `state.name` values: `PENDING`, `IN_PROGRESS`, +/// `COMPLETED`, `HALTED`, `STOPPED`. When `state.name == "COMPLETED"`, +/// `state.result.name` is one of `SUCCESSFUL`, `FAILED`, `ERROR`, +/// `STOPPED`, `EXPIRED`. +pub fn pipeline_to_ci_status(state: &str, result: Option<&str>) -> String { + match state.to_ascii_uppercase().as_str() { + "COMPLETED" => match result.map(|r| r.to_ascii_uppercase()).as_deref() { + Some("SUCCESSFUL") => "passing".to_string(), + Some("FAILED") | Some("ERROR") | Some("STOPPED") | Some("EXPIRED") => { + "failing".to_string() + } + _ => "unknown".to_string(), + }, + "PENDING" | "IN_PROGRESS" | "HALTED" => "pending".to_string(), + _ => "unknown".to_string(), + } +} + +/// Convenience wrapper: map an optional `PipelineStatus` to a `ci_status` string. +/// `None` → `"no checks"` (consistent with GitHub's empty-checks rendering). +pub fn pipeline_status_to_ci_string(p: Option<&PipelineStatus>) -> String { + match p { + Some(p) => pipeline_to_ci_status(&p.state, p.result.as_deref()), + None => "no checks".to_string(), + } +} + +/// Map Bitbucket PR participants to the same `review_status` vocabulary the +/// GitHub path emits: `approved` | `changes_requested` | `pending` | `no reviews`. +/// +/// - Any participant with `state == "changes_requested"` → `changes_requested`. +/// - Else any participant with `approved == true` (or `state == "approved"`) → `approved`. +/// - Else if there are any reviewer-role participants → `pending`. +/// - Else → `no reviews`. +pub fn participants_to_review_status(participants: &[Participant]) -> String { + if participants + .iter() + .any(|p| p.state.as_deref() == Some("changes_requested")) + { + return "changes_requested".to_string(); + } + if participants + .iter() + .any(|p| p.approved || p.state.as_deref() == Some("approved")) + { + return "approved".to_string(); + } + if participants + .iter() + .any(|p| p.role.as_deref() == Some("REVIEWER")) + { + return "pending".to_string(); + } + "no reviews".to_string() +} + +#[cfg(test)] +mod tests { + use super::*; + + // -- pipeline_to_ci_status ------------------------------------------------- + + #[test] + fn pipeline_completed_successful_is_passing() { + assert_eq!( + pipeline_to_ci_status("COMPLETED", Some("SUCCESSFUL")), + "passing" + ); + } + + #[test] + fn pipeline_completed_failed_is_failing() { + assert_eq!( + pipeline_to_ci_status("COMPLETED", Some("FAILED")), + "failing" + ); + } + + #[test] + fn pipeline_completed_error_is_failing() { + assert_eq!(pipeline_to_ci_status("COMPLETED", Some("ERROR")), "failing"); + } + + #[test] + fn pipeline_completed_expired_is_failing() { + assert_eq!( + pipeline_to_ci_status("COMPLETED", Some("EXPIRED")), + "failing" + ); + } + + #[test] + fn pipeline_in_progress_is_pending() { + assert_eq!(pipeline_to_ci_status("IN_PROGRESS", None), "pending"); + } + + #[test] + fn pipeline_pending_is_pending() { + assert_eq!(pipeline_to_ci_status("PENDING", None), "pending"); + } + + #[test] + fn pipeline_halted_is_pending() { + assert_eq!(pipeline_to_ci_status("HALTED", None), "pending"); + } + + #[test] + fn pipeline_unknown_state_is_unknown() { + assert_eq!(pipeline_to_ci_status("WAT", None), "unknown"); + } + + #[test] + fn pipeline_state_is_case_insensitive() { + assert_eq!( + pipeline_to_ci_status("completed", Some("successful")), + "passing" + ); + } + + #[test] + fn pipeline_status_to_ci_string_none_is_no_checks() { + assert_eq!(pipeline_status_to_ci_string(None), "no checks"); + } + + #[test] + fn pipeline_status_to_ci_string_passes_through() { + let p = PipelineStatus { + name: "x".into(), + state: "COMPLETED".into(), + result: Some("SUCCESSFUL".into()), + url: None, + }; + assert_eq!(pipeline_status_to_ci_string(Some(&p)), "passing"); + } + + // -- participants_to_review_status ----------------------------------------- + + fn p(state: Option<&str>, approved: bool, role: Option<&str>) -> Participant { + Participant { + state: state.map(|s| s.to_string()), + approved, + role: role.map(|s| s.to_string()), + } + } + + #[test] + fn empty_participants_is_no_reviews() { + assert_eq!(participants_to_review_status(&[]), "no reviews"); + } + + #[test] + fn changes_requested_dominates() { + let parts = vec![ + p(Some("approved"), true, Some("REVIEWER")), + p(Some("changes_requested"), false, Some("REVIEWER")), + ]; + assert_eq!(participants_to_review_status(&parts), "changes_requested"); + } + + #[test] + fn approved_state() { + let parts = vec![p(Some("approved"), true, Some("REVIEWER"))]; + assert_eq!(participants_to_review_status(&parts), "approved"); + } + + #[test] + fn approved_via_boolean_only() { + let parts = vec![p(None, true, Some("REVIEWER"))]; + assert_eq!(participants_to_review_status(&parts), "approved"); + } + + #[test] + fn reviewer_no_action_is_pending() { + let parts = vec![p(None, false, Some("REVIEWER"))]; + assert_eq!(participants_to_review_status(&parts), "pending"); + } + + #[test] + fn only_non_reviewer_participants_is_no_reviews() { + // PR author / commenter shows up as PARTICIPANT with no review state. + let parts = vec![p(None, false, Some("PARTICIPANT"))]; + assert_eq!(participants_to_review_status(&parts), "no reviews"); + } + + // -- remote URL parsing (existing behavior, sanity-check) ------------------ + + #[test] + fn parse_https_remote() { + let r = parse_bitbucket_remote("https://bitbucket.org/myws/myrepo.git").unwrap(); + assert_eq!(r.workspace, "myws"); + assert_eq!(r.repo_slug, "myrepo"); + } + + #[test] + fn parse_ssh_remote() { + let r = parse_bitbucket_remote("git@bitbucket.org:myws/myrepo.git").unwrap(); + assert_eq!(r.workspace, "myws"); + assert_eq!(r.repo_slug, "myrepo"); + } + + #[test] + fn is_bitbucket_remote_detects_url() { + assert!(is_bitbucket_remote("git@bitbucket.org:foo/bar.git")); + assert!(!is_bitbucket_remote("git@github.com:foo/bar.git")); + } +} diff --git a/src/cli/commands/ci.rs b/src/cli/commands/ci.rs index 459ed09..f20cb3c 100644 --- a/src/cli/commands/ci.rs +++ b/src/cli/commands/ci.rs @@ -2,6 +2,7 @@ use std::path::Path; use anyhow::{Context, Result}; +use crate::bitbucket; use crate::config::ParsecConfig; use crate::errors::ErrorCode; use crate::git; @@ -9,20 +10,38 @@ use crate::github; use crate::output::{self, Mode}; use crate::worktree::WorktreeManager; +/// Forge backend selected for `parsec ci` based on the origin remote URL. +enum Forge { + GitHub(github::GitHubClient), + Bitbucket(bitbucket::BitbucketClient), +} + pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mode) -> Result<()> { let config = ParsecConfig::load()?; let repo_root = git::get_main_repo_root(repo).or_else(|_| git::get_repo_root(repo))?; let remote_url = git::run_output(repo, &["remote", "get-url", "origin"])?; - let gh = github::GitHubClient::new(&remote_url, &config)? - .ok_or_else(|| anyhow::anyhow!("no GitHub token found. Set PARSEC_GITHUB_TOKEN."))?; + + // Dispatch on remote type — GitHub takes priority when both tokens exist. + let forge = if let Some(gh) = github::GitHubClient::new(&remote_url, &config)? { + Forge::GitHub(gh) + } else if let Some(bb) = bitbucket::BitbucketClient::new(&remote_url)? { + Forge::Bitbucket(bb) + } else { + bail_code!( + ErrorCode::E001, + "no forge token found. Set PARSEC_GITHUB_TOKEN or PARSEC_BITBUCKET_TOKEN." + ); + }; + let oplog = crate::oplog::OpLog::load(&repo_root)?; let manager = WorktreeManager::new(repo, &config)?; - // Collect (ticket_id, pr_number) pairs to check + // Collect (ticket_id, pr_number) pairs to check. Bitbucket "PR id" and + // GitHub "PR number" share the same numeric encoding in the oplog (last + // path segment of the URL), so the resolution logic is forge-agnostic. let mut targets: Vec<(String, u64)> = Vec::new(); if all { - // All shipped entries with PR numbers from oplog let entries: Vec<_> = oplog .get_entries(None) .into_iter() @@ -41,10 +60,8 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod } targets = entries; } else if !tickets.is_empty() { - // Multiple tickets specified for t in tickets { let ticket_id = t.to_string(); - // First check if there's a shipped PR in the oplog let shipped_pr = oplog .get_entries(Some(&ticket_id)) .into_iter() @@ -58,11 +75,14 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod if let Some(pr_number) = shipped_pr { targets.push((ticket_id, pr_number)); } else { - // Not shipped yet — try to find an open PR by branch name let ws = manager.get(&ticket_id).with_context(|| { format!("ticket {ticket_id} not found in active workspaces or oplog") })?; - match gh.find_pr_by_branch(&ws.branch).await? { + let found = match &forge { + Forge::GitHub(gh) => gh.find_pr_by_branch(&ws.branch).await?, + Forge::Bitbucket(bb) => bb.find_pr_by_branch(&ws.branch).await?, + }; + match found { Some(pr_number) => targets.push((ticket_id, pr_number)), None => { bail_code!( @@ -85,7 +105,6 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod })?; let ticket_id = found.ticket; - // First check if there's a shipped PR in the oplog let shipped_pr = oplog .get_entries(Some(&ticket_id)) .into_iter() @@ -99,11 +118,14 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod if let Some(pr_number) = shipped_pr { targets.push((ticket_id, pr_number)); } else { - // Not shipped yet — try to find an open PR by branch name let ws = manager.get(&ticket_id).with_context(|| { format!("ticket {ticket_id} not found in active workspaces or oplog") })?; - match gh.find_pr_by_branch(&ws.branch).await? { + let pr_lookup = match &forge { + Forge::GitHub(gh) => gh.find_pr_by_branch(&ws.branch).await?, + Forge::Bitbucket(bb) => bb.find_pr_by_branch(&ws.branch).await?, + }; + match pr_lookup { Some(pr_number) => targets.push((ticket_id, pr_number)), None => { anyhow::bail!( @@ -118,11 +140,13 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod let mut statuses: Vec<(String, crate::github::CiStatus)> = Vec::new(); for (ticket_id, pr_number) in &targets { - let ci = gh.get_check_runs(*pr_number).await?; + let ci = match &forge { + Forge::GitHub(gh) => gh.get_check_runs(*pr_number).await?, + Forge::Bitbucket(bb) => fetch_bitbucket_ci(bb, *pr_number).await?, + }; statuses.push((ticket_id.clone(), ci)); } - // In watch + human mode, clear screen before redraw if watch && mode == Mode::Human { print!("\x1B[2J\x1B[H"); } @@ -130,8 +154,6 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod output::print_ci_status(&statuses, mode); if !watch || mode != Mode::Human { - // JSON/quiet mode prints once even with --watch - // Determine exit code based on overall status let has_failure = statuses.iter().any(|(_t, ci)| ci.overall == "failing"); if has_failure { bail_code!( @@ -146,7 +168,6 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod return Ok(()); } - // Check if all checks are completed let all_completed = statuses .iter() .all(|(_t, ci)| ci.checks.iter().all(|c| c.status == "completed")); @@ -169,3 +190,61 @@ pub async fn ci(repo: &Path, tickets: &[&str], watch: bool, all: bool, mode: Mod tokio::time::sleep(std::time::Duration::from_secs(5)).await; } } + +/// Fetch the latest pipeline for the PR's source branch and shape it into the +/// same `CiStatus` struct GitHub emits, so the renderer stays forge-agnostic. +async fn fetch_bitbucket_ci( + bb: &bitbucket::BitbucketClient, + pr_id: u64, +) -> Result { + let branch = bb.get_pr_source_branch(pr_id).await?.unwrap_or_default(); + + // No branch resolvable → return an empty CiStatus rather than erroring; + // matches the behaviour of GitHub's "no checks" path. + if branch.is_empty() { + return Ok(crate::github::CiStatus { + pr_number: pr_id, + head_sha: String::new(), + overall: "no checks".to_string(), + checks: Vec::new(), + }); + } + + let pipeline = bb.get_latest_pipeline_for_branch(&branch).await?; + let overall = bitbucket::pipeline_status_to_ci_string(pipeline.as_ref()); + + // Project a single CheckRun representing the pipeline so that --watch's + // "all completed" check works the same way it does for GitHub. Pipelines + // in pending/in_progress map to status "in_progress"; everything else to + // "completed". + let checks: Vec = match pipeline { + Some(p) => { + let upper = p.state.to_ascii_uppercase(); + let status = match upper.as_str() { + "PENDING" | "IN_PROGRESS" | "HALTED" => "in_progress", + _ => "completed", + }; + let conclusion = match overall.as_str() { + "passing" => Some("success".to_string()), + "failing" => Some("failure".to_string()), + _ => None, + }; + vec![crate::github::CheckRun { + name: p.name, + status: status.to_string(), + conclusion, + started_at: None, + completed_at: None, + html_url: p.url, + }] + } + None => Vec::new(), + }; + + Ok(crate::github::CiStatus { + pr_number: pr_id, + head_sha: String::new(), + overall, + checks, + }) +} diff --git a/src/cli/commands/compress.rs b/src/cli/commands/compress.rs new file mode 100644 index 0000000..57c54f9 --- /dev/null +++ b/src/cli/commands/compress.rs @@ -0,0 +1,103 @@ +use std::path::Path; + +use anyhow::Result; + +use crate::config::ParsecConfig; +use crate::git; +use crate::output::Mode; +use crate::worktree::WorktreeManager; + +pub async fn compress( + repo: &Path, + ticket: Option<&str>, + message: Option, + mode: Mode, +) -> Result<()> { + let config = ParsecConfig::load()?; + let manager = WorktreeManager::new(repo, &config)?; + + // Resolve ticket from arg or current worktree + let ticket = match ticket { + Some(t) => t.to_string(), + None => { + let cwd = std::env::current_dir()?; + let workspaces = manager.list()?; + workspaces + .iter() + .find(|w| cwd.starts_with(&w.path)) + .map(|w| w.ticket.clone()) + .ok_or_else(|| anyhow::anyhow!("Not in a parsec worktree. Specify a ticket."))? + } + }; + + let workspace = manager.get(&ticket)?; + + // Find merge-base with the base branch + let merge_base = git::run_output( + &workspace.path, + &["merge-base", "HEAD", &workspace.base_branch], + )?; + + // Count commits to squash + let log_output = git::run_output( + &workspace.path, + &["rev-list", "--count", &format!("{}..HEAD", merge_base)], + )?; + let commit_count: u64 = log_output.parse().unwrap_or(0); + + if commit_count <= 1 { + if mode == Mode::Human { + println!( + "Nothing to compress — branch has {} commit(s) since base.", + commit_count + ); + } + return Ok(()); + } + + // Get the default commit message (combine all commit messages) + let combined_msg = if let Some(ref msg) = message { + msg.clone() + } else { + git::run_output( + &workspace.path, + &["log", "--format=%s", &format!("{}..HEAD", merge_base)], + )? + }; + + // Soft reset to merge-base + git::run(&workspace.path, &["reset", "--soft", &merge_base])?; + + // Recommit with combined or custom message + let final_message = if message.is_some() { + combined_msg + } else { + // Use first commit message as primary, rest as bullet points + let lines: Vec<&str> = combined_msg.lines().collect(); + if lines.len() == 1 { + lines[0].to_string() + } else { + format!( + "{}\n\nSquashed {} commits:\n{}", + lines[0], + commit_count, + lines + .iter() + .map(|l| format!("- {}", l)) + .collect::>() + .join("\n") + ) + } + }; + + git::run(&workspace.path, &["commit", "-m", &final_message])?; + + if mode == Mode::Human { + println!( + "Compressed {} commits into 1 for ticket {}.", + commit_count, ticket + ); + } + + Ok(()) +} diff --git a/src/cli/commands/config.rs b/src/cli/commands/config.rs index 06a8f5c..ca5b8ed 100644 --- a/src/cli/commands/config.rs +++ b/src/cli/commands/config.rs @@ -245,3 +245,9 @@ pub async fn config_completions(shell: clap_complete::Shell) -> Result<()> { clap_complete::generate(shell, &mut cmd, "parsec", &mut std::io::stdout()); Ok(()) } + +pub async fn config_schema() -> Result<()> { + let schema = include_str!("../../../schema/parsec-config.schema.json"); + println!("{}", schema); + Ok(()) +} diff --git a/src/cli/commands/history.rs b/src/cli/commands/history.rs index 8bb82f1..abf8b29 100644 --- a/src/cli/commands/history.rs +++ b/src/cli/commands/history.rs @@ -18,6 +18,17 @@ pub async fn log(repo: &Path, ticket: Option<&str>, last: usize, mode: Mode) -> Ok(()) } +pub async fn log_export(repo: &Path) -> Result<()> { + let repo_root = git::get_main_repo_root(repo).or_else(|_| git::get_repo_root(repo))?; + let raw = crate::execlog::read_raw(&repo_root)?; + if raw.is_empty() { + eprintln!("No execution log entries. Run some commands first."); + } else { + print!("{}", raw); + } + Ok(()) +} + pub async fn undo(repo: &Path, dry_run: bool, mode: Mode) -> Result<()> { let config = ParsecConfig::load()?; let repo_root = git::get_main_repo_root(repo).or_else(|_| git::get_repo_root(repo))?; diff --git a/src/cli/commands/mod.rs b/src/cli/commands/mod.rs index 4d7d608..5f7a5b0 100644 --- a/src/cli/commands/mod.rs +++ b/src/cli/commands/mod.rs @@ -1,4 +1,5 @@ mod ci; +mod compress; mod config; mod diff; mod doctor; @@ -11,6 +12,7 @@ mod tracker_cmds; mod workspace; pub use ci::*; +pub use compress::*; pub use config::*; pub use diff::*; pub use doctor::*; diff --git a/src/cli/commands/pr.rs b/src/cli/commands/pr.rs index c56b197..6907aaa 100644 --- a/src/cli/commands/pr.rs +++ b/src/cli/commands/pr.rs @@ -1,8 +1,9 @@ use std::path::Path; -use anyhow::{Context, Result}; +use anyhow::{bail, Context, Result}; use colored::Colorize; +use crate::bitbucket; use crate::config::ParsecConfig; use crate::errors::ErrorCode; use crate::git; @@ -140,27 +141,68 @@ pub async fn pr_status(repo: &Path, ticket: Option<&str>, mode: Mode) -> Result< all_entries.push((ws.ticket.clone(), pr_number, String::new())); } } + } else if let Some(bb) = bitbucket::BitbucketClient::new(&remote_url)? { + for ws in &workspaces { + if let Ok(Some(pr_id)) = bb.find_pr_by_branch(&ws.branch).await { + all_entries.push((ws.ticket.clone(), pr_id, String::new())); + } + } } if all_entries.is_empty() { if let Some(t) = ticket { - bail_code!(ErrorCode::E010, "no PR found for {t}. Ship it first with `parsec ship {t}`, or check your GitHub token."); + bail_code!(ErrorCode::E010, "no PR found for {t}. Ship it first with `parsec ship {t}`, or check your forge token."); } else { - bail_code!(ErrorCode::E010, "no PRs found. Ship a ticket first with `parsec ship`, or check your GitHub token."); + bail_code!(ErrorCode::E010, "no PRs found. Ship a ticket first with `parsec ship`, or check your forge token."); } } } - let gh = github::GitHubClient::new(&remote_url, &config)?.ok_or_else(|| { - anyhow::Error::from(crate::errors::ParsecError::new( - ErrorCode::E001, - "no GitHub token found. Set PARSEC_GITHUB_TOKEN.", - )) - })?; + // Try GitHub first, then Bitbucket let mut statuses = Vec::new(); - for (ticket_id, pr_number, _url) in &all_entries { - let status = gh.get_pr_status(*pr_number).await?; - statuses.push((ticket_id.clone(), status)); + if let Some(gh) = github::GitHubClient::new(&remote_url, &config)? { + for (ticket_id, pr_number, _url) in &all_entries { + let status = gh.get_pr_status(*pr_number).await?; + statuses.push((ticket_id.clone(), status)); + } + } else if let Some(bb) = bitbucket::BitbucketClient::new(&remote_url)? { + for (ticket_id, pr_id, _url) in &all_entries { + let bb_status = bb.get_pr_status(*pr_id).await?; + + // Resolve CI from Bitbucket Pipelines for the PR's source branch. + // Any failure (no token scope, pipelines disabled, network) falls + // back to "unknown" rather than failing the whole pr-status call. + let ci_status = match bb.get_pr_source_branch(*pr_id).await { + Ok(Some(branch)) => match bb.get_latest_pipeline_for_branch(&branch).await { + Ok(pipeline) => bitbucket::pipeline_status_to_ci_string(pipeline.as_ref()), + Err(_) => "unknown".to_string(), + }, + _ => "unknown".to_string(), + }; + + let review_status = match bb.get_pr_participants(*pr_id).await { + Ok(participants) => bitbucket::participants_to_review_status(&participants), + Err(_) => "unknown".to_string(), + }; + + statuses.push(( + ticket_id.clone(), + github::PrStatus { + number: bb_status.id, + title: bb_status.title, + state: bb_status.state.to_lowercase(), + mergeable: None, + ci_status, + review_status, + url: bb_status.url, + }, + )); + } + } else { + bail_code!( + ErrorCode::E001, + "no forge token found. Set PARSEC_GITHUB_TOKEN or PARSEC_BITBUCKET_TOKEN." + ); } output::print_pr_status(&statuses, mode); @@ -178,15 +220,20 @@ pub async fn merge( let config = ParsecConfig::load()?; let repo_root = git::get_main_repo_root(repo).or_else(|_| git::get_repo_root(repo))?; let remote_url = git::run_output(repo, &["remote", "get-url", "origin"])?; - let gh = github::GitHubClient::new(&remote_url, &config)?.ok_or_else(|| { - anyhow::Error::from(crate::errors::ParsecError::new( - ErrorCode::E001, - "no GitHub token found. Set PARSEC_GITHUB_TOKEN.", - )) - })?; let oplog = crate::oplog::OpLog::load(&repo_root)?; let manager = WorktreeManager::new(repo, &config)?; + // Detect forge: GitHub or Bitbucket + let has_github = github::GitHubClient::new(&remote_url, &config)?.is_some(); + let has_bitbucket = !has_github && bitbucket::BitbucketClient::new(&remote_url)?.is_some(); + + if !has_github && !has_bitbucket { + bail_code!( + ErrorCode::E001, + "no forge token found. Set PARSEC_GITHUB_TOKEN or PARSEC_BITBUCKET_TOKEN." + ); + } + // Resolve ticket let ticket_id = if let Some(t) = ticket { t.to_string() @@ -218,17 +265,74 @@ pub async fn merge( let ws = manager.get(&ticket_id).with_context(|| { format!("ticket {ticket_id} not found in active workspaces or oplog") })?; - gh.find_pr_by_branch(&ws.branch) - .await? - .ok_or_else(|| { + if has_github { + let gh = github::GitHubClient::new(&remote_url, &config)?.unwrap(); + gh.find_pr_by_branch(&ws.branch).await?.ok_or_else(|| { anyhow::anyhow!( - "no open PR found for {ticket_id} (branch '{}'). Either ship it with `parsec ship {ticket_id}`, or check that PARSEC_GITHUB_TOKEN is set.", + "no open PR found for {ticket_id} (branch '{}'). Ship it first.", ws.branch ) })? + } else { + let bb = bitbucket::BitbucketClient::new(&remote_url)?.unwrap(); + bb.find_pr_by_branch(&ws.branch).await?.ok_or_else(|| { + anyhow::anyhow!( + "no open PR found for {ticket_id} (branch '{}'). Ship it first.", + ws.branch + ) + })? + } } }; + // Bitbucket merge path + if has_bitbucket { + let bb = bitbucket::BitbucketClient::new(&remote_url)?.unwrap(); + let method = if rebase { "rebase" } else { "squash" }; + match bb.merge_pr(pr_number, method).await { + Ok(mr) => { + if mode == Mode::Human { + println!("Merged PR #{} ({})", pr_number, mr.message); + } else if mode == Mode::Json { + println!( + "{}", + serde_json::json!({ + "ticket": ticket_id, + "pr_number": pr_number, + "merged": mr.merged, + "method": method, + }) + ); + } + } + Err(e) => { + bail!("Bitbucket merge failed: {e}"); + } + } + + // Auto-transition ticket status + if let Some(ref auto) = config.tracker.auto_transition { + if let Some(ref status) = auto.on_merge { + tracker::try_transition(&config, &ticket_id, status).await; + } + } + + if let Err(e) = crate::oplog::record( + &repo_root, + crate::oplog::OpKind::Clean, + Some(&ticket_id), + &format!("Merged PR #{} ({})", pr_number, method), + None, + ) { + eprintln!("warning: failed to write oplog: {e}"); + } + + return Ok(()); + } + + // GitHub merge path + let gh = github::GitHubClient::new(&remote_url, &config)?.unwrap(); + // Idempotency: check if PR is already merged/closed if let Ok(status) = gh.get_pr_status(pr_number).await { if status.state == "closed" { diff --git a/src/cli/commands/ship.rs b/src/cli/commands/ship.rs index c19a9a2..4c62abc 100644 --- a/src/cli/commands/ship.rs +++ b/src/cli/commands/ship.rs @@ -2,6 +2,7 @@ use std::path::Path; use anyhow::{Context, Result}; +use crate::bitbucket; use crate::config::ParsecConfig; use crate::errors::ErrorCode; use crate::git; @@ -22,8 +23,11 @@ pub async fn ship( skip_hooks: bool, reviewers: Vec, labels: Vec, + template: Option, mode: Mode, ) -> Result<()> { + crate::execlog::set_ticket(ticket); + let mut config = ParsecConfig::load()?; let manager = WorktreeManager::new(repo, &config)?; config.resolve_for_repo(manager.repo_root()); @@ -77,6 +81,7 @@ pub async fn ship( // Phase 1: Push only (don't clean up yet) // Idempotency: if workspace is already gone (cleaned up after a prior ship), // treat push as a no-op — the branch is already on the remote. + let push_start = std::time::Instant::now(); let mut result = match manager.ship_push(ticket) { Ok(r) => r, Err(e) => { @@ -111,6 +116,8 @@ pub async fn ship( } }; + crate::execlog::record_step("push", "ok", push_start.elapsed().as_millis() as u64, None); + // Resolve base branch: --base CLI > config default_base > worktree's base_branch if let Some(base) = base_override { result.base_branch = base; @@ -133,8 +140,9 @@ pub async fn ship( } // Phase 2: Create PR/MR (async) + let pr_start = std::time::Instant::now(); let mut pr_failed = false; - if !no_pr && config.ship.auto_pr { + if !no_pr && config.ship.auto_pr && !crate::env::is_offline() { let (ticket_title, ticket_url) = match tracker::fetch_ticket(&config, ticket, Some(manager.repo_root())).await { Ok(Some(t)) => (Some(t.title), t.url), @@ -155,11 +163,18 @@ pub async fn ship( // Gather stack context for PR body (#234) let stack_info = gather_stack_info(&manager, ticket); + // Resolve PR template (#233) + let template_content = resolve_template( + manager.repo_root(), + template.as_deref().or(config.ship.template.as_deref()), + ); + let pr_body = build_pr_body( &result.ticket, effective_title, ticket_url.as_deref(), stack_info.as_ref(), + template_content.as_deref(), ); let remote_url = git::get_remote_url(manager.repo_root()); @@ -207,8 +222,38 @@ pub async fn ship( } } } + } else if let Some(bb) = bitbucket::BitbucketClient::new(remote_url)? { + // No GitHub token — try Bitbucket + if let Ok(Some(existing_pr)) = bb.find_pr_by_branch(&result.branch).await { + let pr_url = format!( + "https://bitbucket.org/{}/{}/pull-requests/{}", + bb.remote().workspace, + bb.remote().repo_slug, + existing_pr + ); + result.pr_url = Some(pr_url); + } else { + match bb + .create_pr( + &result.branch, + &result.base_branch, + &pr_title, + &pr_body, + draft || config.ship.draft, + ) + .await + { + Ok(pr) => { + result.pr_url = Some(pr.url); + } + Err(e) => { + eprintln!("error: Bitbucket PR creation failed: {e}"); + pr_failed = true; + } + } + } } else { - // No GitHub token — try GitLab + // No GitHub/Bitbucket token — try GitLab match gitlab::create_mr( remote_url, &result.branch, @@ -225,7 +270,7 @@ pub async fn ship( Ok(None) => { eprintln!( "note: PR/MR creation skipped — no token found.\n \ - Set PARSEC_GITHUB_TOKEN or PARSEC_GITLAB_TOKEN to enable." + Set PARSEC_GITHUB_TOKEN, PARSEC_BITBUCKET_TOKEN, or PARSEC_GITLAB_TOKEN to enable." ); pr_failed = true; } @@ -238,8 +283,15 @@ pub async fn ship( } } + crate::execlog::record_step( + "create_pr", + if pr_failed { "error" } else { "ok" }, + pr_start.elapsed().as_millis() as u64, + result.pr_url.clone(), + ); + // Auto-comment PR link on the ticket if configured - if config.tracker.comment_on_ship { + if config.tracker.comment_on_ship && !crate::env::is_offline() { if let Some(ref pr_url) = result.pr_url { let comment_body = format!("PR opened: {}", pr_url); if let Err(e) = @@ -357,6 +409,7 @@ fn build_pr_body( title: Option<&str>, ticket_url: Option<&str>, stack_info: Option<&StackPrInfo>, + template_content: Option<&str>, ) -> String { let mut body = String::new(); @@ -391,7 +444,46 @@ fn build_pr_body( body.push('\n'); } + // Include PR template content (#233) + if let Some(tmpl) = template_content { + body.push_str("---\n\n"); + body.push_str(tmpl); + body.push('\n'); + } + body.push_str(&format!("Shipped via `parsec ship {ticket}`\n")); body } + +/// Resolve PR template content from explicit path or auto-detection. +fn resolve_template(repo_root: &Path, explicit_path: Option<&str>) -> Option { + if let Some(path) = explicit_path { + let full_path = if std::path::Path::new(path).is_absolute() { + std::path::PathBuf::from(path) + } else { + repo_root.join(path) + }; + return std::fs::read_to_string(&full_path).ok(); + } + + // Auto-detect common template locations + let candidates = [ + ".github/PULL_REQUEST_TEMPLATE.md", + ".github/pull_request_template.md", + "PULL_REQUEST_TEMPLATE.md", + "pull_request_template.md", + "docs/PULL_REQUEST_TEMPLATE.md", + ]; + + for candidate in &candidates { + let path = repo_root.join(candidate); + if let Ok(content) = std::fs::read_to_string(&path) { + if !content.trim().is_empty() { + return Some(content); + } + } + } + + None +} diff --git a/src/cli/commands/stack.rs b/src/cli/commands/stack.rs index 7793d9b..4004f3d 100644 --- a/src/cli/commands/stack.rs +++ b/src/cli/commands/stack.rs @@ -187,6 +187,7 @@ pub async fn stack_submit(repo: &Path, mode: Mode) -> Result<()> { false, // skip_hooks Vec::new(), // reviewers Vec::new(), // labels + None, // template mode, ) .await diff --git a/src/cli/commands/workspace.rs b/src/cli/commands/workspace.rs index 75499c9..8aef3ed 100644 --- a/src/cli/commands/workspace.rs +++ b/src/cli/commands/workspace.rs @@ -21,6 +21,7 @@ pub async fn start( hook: Option, mode: Mode, ) -> Result<()> { + crate::execlog::set_ticket(ticket); let mut config = ParsecConfig::load()?; let repo_root = git::get_repo_root(repo)?; config.resolve_for_repo(&repo_root); diff --git a/src/cli/mod.rs b/src/cli/mod.rs index de888bf..94f71f8 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -33,6 +33,10 @@ pub struct Cli { /// Preview what would happen without making changes #[arg(long, global = true)] pub dry_run: bool, + + /// Skip all network operations (tracker, PR, fetch) + #[arg(long, global = true)] + pub offline: bool, } #[derive(Subcommand)] @@ -140,6 +144,10 @@ pub enum Command { /// Add labels to the PR (can be specified multiple times) #[arg(long, short = 'l')] label: Vec, + + /// Path to PR body template file + #[arg(long)] + template: Option, }, /// Remove merged or stale worktrees @@ -308,6 +316,10 @@ pub enum Command { /// Show last N entries (default: 20) #[arg(long, short = 'n', default_value = "20")] last: usize, + + /// Export execution log as JSONL (for observability/debugging) + #[arg(long)] + export: bool, }, /// Undo the last parsec operation @@ -466,6 +478,20 @@ pub enum Command { start: bool, }, + /// Squash all branch commits into one + /// + /// Resets the branch to the merge-base with the base branch and + /// re-commits all changes as a single commit. Use --message to + /// set a custom commit message. + Compress { + /// Ticket identifier (auto-detects current worktree if omitted) + ticket: Option, + + /// Custom commit message (default: combines all squashed messages) + #[arg(long, short)] + message: Option, + }, + /// Rename a workspace to a different ticket ID /// /// Changes the ticket ID, renames the branch, and moves the worktree directory. @@ -514,6 +540,11 @@ pub enum ConfigAction { /// Shell type (zsh, bash, fish, elvish, powershell) shell: clap_complete::Shell, }, + /// Output JSON Schema for config.toml + /// + /// Prints the JSON Schema for parsec's configuration format. + /// Useful for IDE autocomplete and validation. + Schema, } pub async fn run(cli: Cli) -> Result<()> { @@ -526,7 +557,51 @@ pub async fn run(cli: Cli) -> Result<()> { output::Mode::Human }; - match cli.command { + // Propagate offline mode via env var so all subsystems can check it + let offline = cli.offline + || crate::config::ParsecConfig::load() + .map(|c| c.workspace.offline) + .unwrap_or(false); + if offline { + std::env::set_var("PARSEC_OFFLINE", "1"); + } + + // Observability: extract command name and set up execution tracking + let cmd_name = match &cli.command { + Command::Start { .. } => "start", + Command::List { .. } => "list", + Command::Status { .. } => "status", + Command::Ticket { .. } => "ticket", + Command::Ship { .. } => "ship", + Command::Clean { .. } => "clean", + Command::Conflicts => "conflicts", + Command::PrStatus { .. } => "pr-status", + Command::Merge { .. } => "merge", + Command::Ci { .. } => "ci", + Command::Diff { .. } => "diff", + Command::Switch { .. } => "switch", + Command::Sync { .. } => "sync", + Command::Open { .. } => "open", + Command::Adopt { .. } => "adopt", + Command::Log { .. } => "log", + Command::Undo { .. } => "undo", + Command::Inbox { .. } => "inbox", + Command::Board { .. } => "board", + Command::Stack { .. } => "stack", + Command::Root => "root", + Command::Init { .. } => "init", + Command::Config { .. } => "config", + Command::Doctor { .. } => "doctor", + Command::Release { .. } => "release", + Command::Create { .. } => "create", + Command::Rename { .. } => "rename", + Command::Compress { .. } => "compress", + }; + let exec_id = crate::execlog::new_execution_id(); + let exec_started_at = chrono::Utc::now(); + let exec_start = std::time::Instant::now(); + + let result = match cli.command { Command::Start { ticket, base, @@ -571,6 +646,7 @@ pub async fn run(cli: Cli) -> Result<()> { skip_hooks, reviewer, label, + template, } => { if cli.dry_run { eprintln!( @@ -592,6 +668,7 @@ pub async fn run(cli: Cli) -> Result<()> { skip_hooks, reviewer, label, + template, output_mode, ) .await @@ -698,8 +775,16 @@ pub async fn run(cli: Cli) -> Result<()> { Command::Switch { ticket } => { commands::switch(&repo_path, ticket.as_deref(), output_mode).await } - Command::Log { ticket, last } => { - commands::log(&repo_path, ticket.as_deref(), last, output_mode).await + Command::Log { + ticket, + last, + export, + } => { + if export { + commands::log_export(&repo_path).await + } else { + commands::log(&repo_path, ticket.as_deref(), last, output_mode).await + } } Command::Undo { dry_run } => commands::undo(&repo_path, dry_run, output_mode).await, Command::Inbox { pick } => commands::inbox(&repo_path, pick, output_mode).await, @@ -736,6 +821,7 @@ pub async fn run(cli: Cli) -> Result<()> { ConfigAction::Shell { shell } => commands::config_shell(&shell, output_mode).await, ConfigAction::Man { dir } => commands::config_man(&dir).await, ConfigAction::Completions { shell } => commands::config_completions(shell).await, + ConfigAction::Schema => commands::config_schema().await, }, Command::Doctor { ai } => { if ai { @@ -793,5 +879,36 @@ pub async fn run(cli: Cli) -> Result<()> { } commands::rename(&repo_path, &old_ticket, &new_ticket, output_mode).await } + Command::Compress { ticket, message } => { + commands::compress(&repo_path, ticket.as_deref(), message, output_mode).await + } + }; + + // Record execution entry (best-effort, never fail the command) + let duration = exec_start.elapsed(); + let steps = crate::execlog::take_steps(); + let ticket = crate::execlog::take_ticket(); + let entry = crate::execlog::ExecEntry { + execution_id: exec_id, + command: cmd_name.to_string(), + ticket, + started_at: exec_started_at, + finished_at: chrono::Utc::now(), + duration_ms: duration.as_millis() as u64, + status: if result.is_ok() { + "ok".to_string() + } else { + "error".to_string() + }, + error: result.as_ref().err().map(|e| format!("{e:#}")), + steps, + }; + // Use repo_path for logging; skip if .parsec dir can't be resolved + if let Ok(root) = crate::git::get_main_repo_root(&repo_path) + .or_else(|_| crate::git::get_repo_root(&repo_path)) + { + let _ = crate::execlog::append(&root, &entry); } + + result } diff --git a/src/config/mod.rs b/src/config/mod.rs index 8fc4636..9e4a140 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -1,5 +1,6 @@ mod settings; +pub use settings::CacheStrategy; pub use settings::ParsecConfig; pub use settings::TrackerProvider; pub use settings::WorktreeLayout; diff --git a/src/config/settings.rs b/src/config/settings.rs index d2393a5..14dda07 100644 --- a/src/config/settings.rs +++ b/src/config/settings.rs @@ -84,6 +84,44 @@ impl std::fmt::Display for WorktreeLayout { } } +// --------------------------------------------------------------------------- +// CacheStrategy +// --------------------------------------------------------------------------- + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "lowercase")] +#[derive(Default)] +pub enum CacheStrategy { + #[default] + Symlink, + Copy, +} + +impl std::fmt::Display for CacheStrategy { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + CacheStrategy::Symlink => write!(f, "symlink"), + CacheStrategy::Copy => write!(f, "copy"), + } + } +} + +// --------------------------------------------------------------------------- +// WorktreeConfig +// --------------------------------------------------------------------------- + +/// Build cache sharing settings for new worktrees. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct WorktreeConfig { + /// Directories to share from the main repo into new worktrees + /// (e.g. ["target", "node_modules", ".venv"]). + #[serde(default)] + pub shared_cache: Vec, + /// How to share the directories: symlink (default) or copy. + #[serde(default)] + pub cache_strategy: CacheStrategy, +} + // --------------------------------------------------------------------------- // WorkspaceConfig // --------------------------------------------------------------------------- @@ -99,6 +137,9 @@ pub struct WorkspaceConfig { /// Default base branch for worktree creation (e.g. "develop") #[serde(default)] pub default_base: Option, + /// When true, skip all network operations by default + #[serde(default)] + pub offline: bool, } impl Default for WorkspaceConfig { @@ -108,6 +149,7 @@ impl Default for WorkspaceConfig { base_dir: default_base_dir(), branch_prefix: default_branch_prefix(), default_base: None, + offline: false, } } } @@ -187,6 +229,9 @@ pub struct ShipConfig { /// Default labels to apply to PRs #[serde(default)] pub default_labels: Vec, + /// Path to PR template file (auto-detected if not set) + #[serde(default)] + pub template: Option, } impl Default for ShipConfig { @@ -198,6 +243,7 @@ impl Default for ShipConfig { default_base: None, default_reviewers: Vec::new(), default_labels: Vec::new(), + template: None, } } } @@ -348,6 +394,8 @@ pub struct ParsecConfig { #[serde(default)] pub workspace: WorkspaceConfig, #[serde(default)] + pub worktree: WorktreeConfig, + #[serde(default)] pub tracker: TrackerConfig, #[serde(default)] pub ship: ShipConfig, @@ -578,3 +626,65 @@ impl ParsecConfig { Ok(config) } } + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn worktree_config_defaults_when_section_missing() { + let config: ParsecConfig = toml::from_str("").unwrap(); + assert!(config.worktree.shared_cache.is_empty()); + assert_eq!(config.worktree.cache_strategy, CacheStrategy::Symlink); + } + + #[test] + fn worktree_config_parses_full_section() { + let toml_str = r#" +[worktree] +shared_cache = ["target", ".venv"] +cache_strategy = "copy" +"#; + let config: ParsecConfig = toml::from_str(toml_str).unwrap(); + assert_eq!( + config.worktree.shared_cache, + vec!["target".to_string(), ".venv".to_string()] + ); + assert_eq!(config.worktree.cache_strategy, CacheStrategy::Copy); + } + + #[test] + fn worktree_config_partial_fields_take_defaults() { + let toml_str = r#" +[worktree] +shared_cache = ["target"] +"#; + let config: ParsecConfig = toml::from_str(toml_str).unwrap(); + assert_eq!(config.worktree.shared_cache, vec!["target".to_string()]); + assert_eq!(config.worktree.cache_strategy, CacheStrategy::Symlink); + } + + #[test] + fn worktree_config_unknown_strategy_is_error() { + let toml_str = r#" +[worktree] +cache_strategy = "hardlink" +"#; + let err = toml::from_str::(toml_str).unwrap_err(); + let msg = err.to_string(); + assert!( + msg.contains("hardlink") || msg.to_lowercase().contains("variant"), + "expected error to mention bad variant, got: {msg}" + ); + } + + #[test] + fn cache_strategy_symlink_is_default() { + let strategy = CacheStrategy::default(); + assert_eq!(strategy, CacheStrategy::Symlink); + } +} diff --git a/src/env.rs b/src/env.rs index 5ab2a1a..de824fa 100644 --- a/src/env.rs +++ b/src/env.rs @@ -70,3 +70,46 @@ pub fn gitlab_token() -> Option { } None } + +// --------------------------------------------------------------------------- +// Bitbucket +// --------------------------------------------------------------------------- + +pub const PARSEC_BITBUCKET_TOKEN: &str = "PARSEC_BITBUCKET_TOKEN"; +pub const BITBUCKET_TOKEN: &str = "BITBUCKET_TOKEN"; +/// Override Bitbucket Cloud API base URL. Useful for tests (mock servers) and +/// future Bitbucket Server / Data Center support. +pub const PARSEC_BITBUCKET_API_BASE: &str = "PARSEC_BITBUCKET_API_BASE"; + +/// Resolve Bitbucket token. Priority: PARSEC_BITBUCKET_TOKEN > BITBUCKET_TOKEN +pub fn bitbucket_token() -> Option { + for var in [PARSEC_BITBUCKET_TOKEN, BITBUCKET_TOKEN] { + if let Ok(token) = std::env::var(var) { + if !token.is_empty() { + return Some(token); + } + } + } + None +} + +/// Bitbucket API base URL override (no trailing slash). Returns None when unset. +pub fn bitbucket_api_base() -> Option { + std::env::var(PARSEC_BITBUCKET_API_BASE) + .ok() + .filter(|v| !v.is_empty()) + .map(|v| v.trim_end_matches('/').to_string()) +} + +// --------------------------------------------------------------------------- +// Offline mode +// --------------------------------------------------------------------------- + +pub const PARSEC_OFFLINE: &str = "PARSEC_OFFLINE"; + +/// Check if offline mode is active (via --offline flag or PARSEC_OFFLINE env var). +pub fn is_offline() -> bool { + std::env::var(PARSEC_OFFLINE) + .map(|v| v == "1" || v == "true") + .unwrap_or(false) +} diff --git a/src/execlog.rs b/src/execlog.rs new file mode 100644 index 0000000..b7793c2 --- /dev/null +++ b/src/execlog.rs @@ -0,0 +1,125 @@ +//! Lightweight execution log for observability. +//! +//! Each parsec command invocation is recorded as an `ExecEntry` with a unique +//! execution ID, timing, and optional step-level detail. Entries are stored as +//! newline-delimited JSON (JSONL) in `.parsec/execlog.jsonl`. + +use anyhow::Result; +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use std::cell::RefCell; +use std::fs::{self, OpenOptions}; +use std::io::Write; +use std::path::{Path, PathBuf}; + +/// A single phase within a command execution. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ExecStep { + pub phase: String, + pub status: String, + pub duration_ms: u64, + #[serde(skip_serializing_if = "Option::is_none")] + pub detail: Option, +} + +/// A complete command execution record. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ExecEntry { + pub execution_id: String, + pub command: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub ticket: Option, + pub started_at: DateTime, + pub finished_at: DateTime, + pub duration_ms: u64, + pub status: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub steps: Vec, +} + +// --------------------------------------------------------------------------- +// Thread-local accumulators +// --------------------------------------------------------------------------- + +thread_local! { + static CURRENT_STEPS: RefCell> = const { RefCell::new(Vec::new()) }; + static CURRENT_TICKET: RefCell> = const { RefCell::new(None) }; +} + +/// Record a step in the current execution context. +pub fn record_step(phase: &str, status: &str, duration_ms: u64, detail: Option) { + CURRENT_STEPS.with(|steps| { + steps.borrow_mut().push(ExecStep { + phase: phase.to_string(), + status: status.to_string(), + duration_ms, + detail, + }); + }); +} + +/// Set the ticket for the current execution (called from commands). +pub fn set_ticket(ticket: &str) { + CURRENT_TICKET.with(|t| *t.borrow_mut() = Some(ticket.to_string())); +} + +/// Take all accumulated steps (clears the accumulator). +pub fn take_steps() -> Vec { + CURRENT_STEPS.with(|steps| std::mem::take(&mut *steps.borrow_mut())) +} + +/// Take the ticket set during execution. +pub fn take_ticket() -> Option { + CURRENT_TICKET.with(|t| t.borrow_mut().take()) +} + +/// Generate a new execution ID (UUID v4). +pub fn new_execution_id() -> String { + uuid::Uuid::new_v4().to_string() +} + +// --------------------------------------------------------------------------- +// Persistence (JSONL) +// --------------------------------------------------------------------------- + +fn execlog_path(repo_root: &Path) -> PathBuf { + repo_root.join(".parsec").join("execlog.jsonl") +} + +/// Append an execution entry to the JSONL log. +pub fn append(repo_root: &Path, entry: &ExecEntry) -> Result<()> { + let path = execlog_path(repo_root); + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + let line = serde_json::to_string(entry)?; + let mut file = OpenOptions::new().create(true).append(true).open(&path)?; + writeln!(file, "{}", line)?; + Ok(()) +} + +/// Load all execution entries from the JSONL log. +#[allow(dead_code)] +pub fn load(repo_root: &Path) -> Result> { + let path = execlog_path(repo_root); + if !path.exists() { + return Ok(Vec::new()); + } + let contents = fs::read_to_string(&path)?; + Ok(contents + .lines() + .filter(|line| !line.trim().is_empty()) + .filter_map(|line| serde_json::from_str(line).ok()) + .collect()) +} + +/// Read raw JSONL content for export. +pub fn read_raw(repo_root: &Path) -> Result { + let path = execlog_path(repo_root); + if !path.exists() { + return Ok(String::new()); + } + Ok(fs::read_to_string(&path)?) +} diff --git a/src/git/mod.rs b/src/git/mod.rs index 8bfba2d..a2d9467 100644 --- a/src/git/mod.rs +++ b/src/git/mod.rs @@ -283,11 +283,17 @@ pub fn delete_branch(repo: &Path, branch: &str) -> Result<()> { /// Fetch all refs from `origin`. pub fn fetch(repo: &Path) -> Result<()> { + if crate::env::is_offline() { + return Ok(()); + } run(repo, &["fetch", "origin", "--prune"]) } /// Fetch from origin if a remote exists. Non-fatal if no remote configured. pub fn fetch_if_remote(repo: &Path) -> Result<()> { + if crate::env::is_offline() { + return Ok(()); + } // Check if remote exists first let has_remote = std::process::Command::new("git") .args(["remote"]) diff --git a/src/main.rs b/src/main.rs index 8cc97ce..654d49f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,10 +1,12 @@ #[macro_use] mod errors; +mod bitbucket; mod cli; mod config; mod conflict; mod env; +mod execlog; mod git; mod github; mod gitlab; diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 012e183..7afce95 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -115,6 +115,10 @@ pub async fn fetch_ticket( id: &str, repo_root: Option<&Path>, ) -> Result> { + if crate::env::is_offline() { + return Ok(None); + } + // Load atlassian env file for seamless Claude Jira skill integration load_atlassian_env(); @@ -156,6 +160,9 @@ pub async fn fetch_ticket( /// Try to transition a ticket's status. Warns on failure but never blocks. pub async fn try_transition(config: &ParsecConfig, ticket: &str, target_status: &str) { + if crate::env::is_offline() { + return; + } // Only works for Jira currently if !matches!( config.tracker.provider, @@ -197,6 +204,9 @@ pub async fn post_comment( body: &str, repo_root: Option<&Path>, ) -> Result<()> { + if crate::env::is_offline() { + return Ok(()); + } load_atlassian_env(); match config.tracker.provider { diff --git a/src/worktree/cache_share.rs b/src/worktree/cache_share.rs new file mode 100644 index 0000000..a6e4c75 --- /dev/null +++ b/src/worktree/cache_share.rs @@ -0,0 +1,244 @@ +use std::path::Path; + +use crate::config::CacheStrategy; + +/// Share build-cache directories from the main repo into a freshly created +/// worktree. Each entry is processed independently; failures are logged but +/// never propagated, so a flaky cache share never breaks `parsec start`. +/// +/// - Source path is `/`. Missing → skip. +/// - Destination path is `/`. Already exists → skip. +/// - `Symlink` creates a symlink to the absolute source path; `Copy` does a +/// recursive copy using stdlib only (no extra dependency). +pub fn share_cache( + repo_root: &Path, + worktree_path: &Path, + entries: &[String], + strategy: CacheStrategy, +) { + if entries.is_empty() { + return; + } + + for entry in entries { + if entry.is_empty() || entry.contains("..") { + eprintln!("warning: skipping invalid shared_cache entry {:?}", entry); + continue; + } + + let src = repo_root.join(entry); + let dest = worktree_path.join(entry); + + if !src.exists() { + eprintln!( + "info: shared_cache: source '{}' does not exist in main repo, skipping", + entry + ); + continue; + } + + if dest.exists() || dest.symlink_metadata().is_ok() { + eprintln!( + "info: shared_cache: destination '{}' already exists in worktree, skipping", + entry + ); + continue; + } + + // Ensure dest's parent exists (for nested entries like "a/b/target"). + if let Some(parent) = dest.parent() { + if !parent.exists() { + if let Err(e) = std::fs::create_dir_all(parent) { + eprintln!( + "warning: shared_cache: failed to create parent for '{}': {e}", + entry + ); + continue; + } + } + } + + let abs_src = match dunce::canonicalize(&src) { + Ok(p) => p, + Err(e) => { + eprintln!( + "warning: shared_cache: cannot resolve source '{}': {e}", + entry + ); + continue; + } + }; + + let result = match strategy { + CacheStrategy::Symlink => create_symlink(&abs_src, &dest), + CacheStrategy::Copy => copy_recursive(&abs_src, &dest), + }; + + match result { + Ok(()) => { + eprintln!( + "info: shared_cache: {} '{}' from {} -> {}", + strategy, + entry, + abs_src.display(), + dest.display() + ); + } + Err(e) => { + eprintln!( + "warning: shared_cache: failed to share '{}' ({}): {e}", + entry, strategy + ); + } + } + } +} + +#[cfg(unix)] +fn create_symlink(src: &Path, dest: &Path) -> std::io::Result<()> { + std::os::unix::fs::symlink(src, dest) +} + +#[cfg(windows)] +fn create_symlink(src: &Path, dest: &Path) -> std::io::Result<()> { + if src.is_dir() { + std::os::windows::fs::symlink_dir(src, dest) + } else { + std::os::windows::fs::symlink_file(src, dest) + } +} + +fn copy_recursive(src: &Path, dest: &Path) -> std::io::Result<()> { + let metadata = std::fs::symlink_metadata(src)?; + let file_type = metadata.file_type(); + + if file_type.is_symlink() { + // Follow symlinks during copy (resolving once); fall back to plain copy. + let target = std::fs::read_link(src)?; + let resolved = if target.is_absolute() { + target + } else { + src.parent().unwrap_or(Path::new(".")).join(target) + }; + return copy_recursive(&resolved, dest); + } + + if file_type.is_dir() { + std::fs::create_dir_all(dest)?; + for entry in std::fs::read_dir(src)? { + let entry = entry?; + let child_src = entry.path(); + let child_dest = dest.join(entry.file_name()); + copy_recursive(&child_src, &child_dest)?; + } + Ok(()) + } else { + if let Some(parent) = dest.parent() { + std::fs::create_dir_all(parent)?; + } + std::fs::copy(src, dest).map(|_| ()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + use tempfile::TempDir; + + fn read_file(p: &Path) -> String { + fs::read_to_string(p).unwrap() + } + + fn make_dirs() -> (TempDir, std::path::PathBuf, std::path::PathBuf) { + let tmp = TempDir::new().unwrap(); + let repo = tmp.path().join("repo"); + let wt = tmp.path().join("worktree"); + fs::create_dir_all(&repo).unwrap(); + fs::create_dir_all(&wt).unwrap(); + (tmp, repo, wt) + } + + #[test] + fn symlink_strategy_links_existing_dir() { + let (_tmp, repo, wt) = make_dirs(); + fs::create_dir_all(repo.join("target")).unwrap(); + fs::write(repo.join("target/build.txt"), "hello").unwrap(); + + share_cache(&repo, &wt, &["target".to_string()], CacheStrategy::Symlink); + + let dest = wt.join("target"); + assert!(dest.exists()); + let meta = fs::symlink_metadata(&dest).unwrap(); + assert!(meta.file_type().is_symlink(), "should be a symlink"); + assert_eq!(read_file(&dest.join("build.txt")), "hello"); + } + + #[test] + fn copy_strategy_creates_real_dir() { + let (_tmp, repo, wt) = make_dirs(); + fs::create_dir_all(repo.join("target/nested")).unwrap(); + fs::write(repo.join("target/a.txt"), "alpha").unwrap(); + fs::write(repo.join("target/nested/b.txt"), "beta").unwrap(); + + share_cache(&repo, &wt, &["target".to_string()], CacheStrategy::Copy); + + let dest = wt.join("target"); + assert!(dest.exists()); + let meta = fs::symlink_metadata(&dest).unwrap(); + assert!(!meta.file_type().is_symlink(), "must not be a symlink"); + assert!(meta.is_dir()); + assert_eq!(read_file(&dest.join("a.txt")), "alpha"); + assert_eq!(read_file(&dest.join("nested/b.txt")), "beta"); + } + + #[test] + fn missing_entry_is_skipped_silently() { + let (_tmp, repo, wt) = make_dirs(); + + share_cache( + &repo, + &wt, + &["does-not-exist".to_string()], + CacheStrategy::Symlink, + ); + + assert!(!wt.join("does-not-exist").exists()); + } + + #[test] + fn existing_dest_is_not_overwritten() { + let (_tmp, repo, wt) = make_dirs(); + fs::create_dir_all(repo.join("target")).unwrap(); + fs::write(repo.join("target/from_repo.txt"), "repo").unwrap(); + fs::create_dir_all(wt.join("target")).unwrap(); + fs::write(wt.join("target/preexisting.txt"), "keep").unwrap(); + + share_cache(&repo, &wt, &["target".to_string()], CacheStrategy::Copy); + + // Pre-existing content untouched, repo content not copied in. + assert!(wt.join("target/preexisting.txt").exists()); + assert!(!wt.join("target/from_repo.txt").exists()); + } + + #[test] + fn empty_list_is_noop() { + let (_tmp, repo, wt) = make_dirs(); + share_cache(&repo, &wt, &[], CacheStrategy::Symlink); + // Just verify nothing was created in the worktree. + let entries: Vec<_> = fs::read_dir(&wt).unwrap().collect(); + assert!(entries.is_empty()); + } + + #[test] + fn path_traversal_entries_rejected() { + let (_tmp, repo, wt) = make_dirs(); + fs::create_dir_all(repo.join("evil")).unwrap(); + + share_cache(&repo, &wt, &["../evil".to_string()], CacheStrategy::Symlink); + + // Nothing should have been created. + let entries: Vec<_> = fs::read_dir(&wt).unwrap().collect(); + assert!(entries.is_empty()); + } +} diff --git a/src/worktree/manager.rs b/src/worktree/manager.rs index 4783dc5..bde0ffd 100644 --- a/src/worktree/manager.rs +++ b/src/worktree/manager.rs @@ -131,6 +131,15 @@ impl WorktreeManager { .save(&self.repo_root) .context("failed to save parsec state")?; + // Share build-cache directories from the main repo into the new worktree. + // Failures are logged but never propagated — the worktree itself succeeded. + super::cache_share::share_cache( + &self.repo_root, + &worktree_path, + &self.config.worktree.shared_cache, + self.config.worktree.cache_strategy, + ); + // Run post-create hooks if !self.config.hooks.post_create.is_empty() { let skip_prompt = std::env::var("PARSEC_YES") diff --git a/src/worktree/mod.rs b/src/worktree/mod.rs index 9501760..f204a09 100644 --- a/src/worktree/mod.rs +++ b/src/worktree/mod.rs @@ -1,3 +1,4 @@ +mod cache_share; mod lifecycle; mod manager; diff --git a/tests/bitbucket_integration_tests.rs b/tests/bitbucket_integration_tests.rs new file mode 100644 index 0000000..833fb24 --- /dev/null +++ b/tests/bitbucket_integration_tests.rs @@ -0,0 +1,458 @@ +//! End-to-end tests that exercise the Bitbucket Cloud code path of `parsec ci` +//! and `parsec pr-status` against a mocked Bitbucket API server. +//! +//! These tests verify (a) the dispatch logic actually picks the Bitbucket path +//! instead of GitHub when the origin remote is Bitbucket, and (b) the response +//! mapping (ci_status, review_status) reflects the live API payload. + +use assert_cmd::Command; +use mockito::{Matcher, Server, ServerGuard}; +use std::process::Command as StdCommand; +use tempfile::TempDir; + +const WORKSPACE: &str = "fakews"; +const REPO_SLUG: &str = "fakerepo"; + +/// Initialize a git repo whose `origin` points at a Bitbucket Cloud URL. +/// No actual remote backs the URL — these tests only exercise API calls, +/// never `git fetch` / `git push`. +fn setup_bitbucket_repo() -> TempDir { + let dir = TempDir::new().unwrap(); + let p = dir.path(); + + StdCommand::new("git") + .args(["init"]) + .current_dir(p) + .output() + .unwrap(); + StdCommand::new("git") + .args(["config", "user.name", "Test"]) + .current_dir(p) + .output() + .unwrap(); + StdCommand::new("git") + .args(["config", "user.email", "test@test.com"]) + .current_dir(p) + .output() + .unwrap(); + StdCommand::new("git") + .args(["checkout", "-b", "main"]) + .current_dir(p) + .output() + .unwrap(); + StdCommand::new("git") + .args(["commit", "--allow-empty", "-m", "init"]) + .current_dir(p) + .output() + .unwrap(); + StdCommand::new("git") + .args([ + "remote", + "add", + "origin", + &format!("git@bitbucket.org:{}/{}.git", WORKSPACE, REPO_SLUG), + ]) + .current_dir(p) + .output() + .unwrap(); + + dir +} + +/// Drop a fake oplog Ship entry so `parsec pr-status` / `parsec ci` resolve +/// the PR number from the log without needing a live workspace. +fn write_oplog_ship_entry(repo: &std::path::Path, ticket: &str, pr_number: u64) { + let parsec_dir = repo.join(".parsec"); + std::fs::create_dir_all(&parsec_dir).unwrap(); + let body = serde_json::json!({ + "entries": [{ + "id": 1, + "op": "ship", + "ticket": ticket, + "detail": format!( + "Shipped branch 'feature/{0}' -> https://bitbucket.org/{1}/{2}/pull-requests/{3}", + ticket, WORKSPACE, REPO_SLUG, pr_number + ), + "timestamp": "2024-01-01T00:00:00Z", + "undo_info": null + }] + }); + std::fs::write( + parsec_dir.join("oplog.json"), + serde_json::to_string_pretty(&body).unwrap(), + ) + .unwrap(); +} + +fn parsec(server: &ServerGuard) -> Command { + let mut cmd = Command::cargo_bin("parsec").unwrap(); + // Isolate from any user-level config (e.g. existing default_base) so the + // subprocess sees only the env we provide. + cmd.env("PARSEC_CONFIG_DIR", "/tmp/parsec-test-nonexistent") + .env("PARSEC_BITBUCKET_TOKEN", "fake-token-for-test") + .env("PARSEC_BITBUCKET_API_BASE", server.url()) + // Defensive: don't let an inherited GitHub token cause the dispatcher + // to pick the GitHub forge for our bitbucket.org-style remote. + .env_remove("PARSEC_GITHUB_TOKEN") + .env_remove("GITHUB_TOKEN") + .env_remove("GH_TOKEN"); + cmd +} + +/// Build the API path prefix used in mock URLs. +fn pr_path(pr_id: u64) -> String { + format!( + "/repositories/{}/{}/pullrequests/{}", + WORKSPACE, REPO_SLUG, pr_id + ) +} + +fn pipelines_path() -> String { + format!("/repositories/{}/{}/pipelines/", WORKSPACE, REPO_SLUG) +} + +// --------------------------------------------------------------------------- +// pr-status +// --------------------------------------------------------------------------- + +#[test] +fn pr_status_bitbucket_maps_ci_and_review_from_api() { + let repo = setup_bitbucket_repo(); + let repo_path = repo.path().to_str().unwrap(); + + let mut server = Server::new(); + + // PR JSON is reused for get_pr_status, get_pr_source_branch, and + // get_pr_participants — they all hit the same endpoint. Two reviewers: + // one approved, one no-action → review_status == "approved". + let pr_body = serde_json::json!({ + "id": 42, + "title": "Add Bitbucket pipelines support", + "state": "OPEN", + "links": { "html": { "href": "https://bitbucket.org/fakews/fakerepo/pull-requests/42" } }, + "source": { "branch": { "name": "feature/BB-1" } }, + "participants": [ + { "state": "approved", "approved": true, "role": "REVIEWER" }, + { "state": null, "approved": false, "role": "REVIEWER" } + ] + }); + let pr_mock = server + .mock("GET", pr_path(42).as_str()) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pr_body.to_string()) + .expect_at_least(2) // status + source-branch + participants + .create(); + + // Pipeline for the source branch: COMPLETED + SUCCESSFUL → ci_status "passing". + let pipelines_body = serde_json::json!({ + "values": [{ + "uuid": "{abc-123}", + "state": { "name": "COMPLETED", "result": { "name": "SUCCESSFUL" } }, + "target": { "ref_name": "feature/BB-1" } + }] + }); + let pipeline_mock = server + .mock("GET", pipelines_path().as_str()) + .match_query(Matcher::AllOf(vec![Matcher::UrlEncoded( + "target.ref_name".into(), + "feature/BB-1".into(), + )])) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pipelines_body.to_string()) + .create(); + + write_oplog_ship_entry(repo.path(), "BB-1", 42); + + let output = parsec(&server) + .args(["--json", "pr-status", "BB-1", "--repo", repo_path]) + .output() + .unwrap(); + + let stdout = String::from_utf8(output.stdout.clone()).unwrap(); + let stderr = String::from_utf8(output.stderr.clone()).unwrap(); + assert!( + output.status.success(), + "pr-status should succeed.\nstdout:\n{stdout}\nstderr:\n{stderr}", + ); + + let parsed: serde_json::Value = + serde_json::from_str(&stdout).expect("pr-status --json must produce valid JSON"); + let arr = parsed.as_array().expect("output should be a JSON array"); + assert_eq!(arr.len(), 1); + let entry = &arr[0]; + assert_eq!(entry["ticket"], "BB-1"); + assert_eq!(entry["pr_number"], 42); + assert_eq!(entry["state"], "open"); + assert_eq!( + entry["ci_status"], "passing", + "ci_status should come from the Bitbucket Pipelines mock" + ); + assert_eq!( + entry["review_status"], "approved", + "review_status should reflect the participants payload" + ); + + pr_mock.assert(); + pipeline_mock.assert(); +} + +#[test] +fn pr_status_bitbucket_no_pipeline_yet_is_no_checks() { + let repo = setup_bitbucket_repo(); + let repo_path = repo.path().to_str().unwrap(); + + let mut server = Server::new(); + + let pr_body = serde_json::json!({ + "id": 7, + "title": "Edge case PR", + "state": "OPEN", + "links": { "html": { "href": "https://bitbucket.org/fakews/fakerepo/pull-requests/7" } }, + "source": { "branch": { "name": "feature/BB-7" } }, + "participants": [] + }); + let _pr_mock = server + .mock("GET", pr_path(7).as_str()) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pr_body.to_string()) + .expect_at_least(2) + .create(); + + // No pipeline runs yet for this branch. + let _pipeline_mock = server + .mock("GET", pipelines_path().as_str()) + .match_query(Matcher::UrlEncoded( + "target.ref_name".into(), + "feature/BB-7".into(), + )) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(r#"{"values":[]}"#) + .create(); + + write_oplog_ship_entry(repo.path(), "BB-7", 7); + + let output = parsec(&server) + .args(["--json", "pr-status", "BB-7", "--repo", repo_path]) + .output() + .unwrap(); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).unwrap(); + let parsed: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + let entry = &parsed.as_array().unwrap()[0]; + assert_eq!( + entry["ci_status"], "no checks", + "no pipeline runs → ci_status \"no checks\"" + ); + assert_eq!( + entry["review_status"], "no reviews", + "no participants → review_status \"no reviews\"" + ); +} + +#[test] +fn pr_status_bitbucket_changes_requested_review() { + let repo = setup_bitbucket_repo(); + let repo_path = repo.path().to_str().unwrap(); + + let mut server = Server::new(); + + let pr_body = serde_json::json!({ + "id": 9, + "title": "Needs work", + "state": "OPEN", + "links": { "html": { "href": "https://bitbucket.org/fakews/fakerepo/pull-requests/9" } }, + "source": { "branch": { "name": "feature/BB-9" } }, + "participants": [ + { "state": "approved", "approved": true, "role": "REVIEWER" }, + { "state": "changes_requested", "approved": false, "role": "REVIEWER" } + ] + }); + let _pr_mock = server + .mock("GET", pr_path(9).as_str()) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pr_body.to_string()) + .expect_at_least(2) + .create(); + + let pipelines_body = serde_json::json!({ + "values": [{ + "uuid": "{xyz-9}", + "state": { "name": "COMPLETED", "result": { "name": "FAILED" } }, + "target": { "ref_name": "feature/BB-9" } + }] + }); + let _pipeline_mock = server + .mock("GET", pipelines_path().as_str()) + .match_query(Matcher::UrlEncoded( + "target.ref_name".into(), + "feature/BB-9".into(), + )) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pipelines_body.to_string()) + .create(); + + write_oplog_ship_entry(repo.path(), "BB-9", 9); + + let output = parsec(&server) + .args(["--json", "pr-status", "BB-9", "--repo", repo_path]) + .output() + .unwrap(); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).unwrap(); + let parsed: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + let entry = &parsed.as_array().unwrap()[0]; + assert_eq!(entry["ci_status"], "failing"); + assert_eq!(entry["review_status"], "changes_requested"); +} + +// --------------------------------------------------------------------------- +// ci +// --------------------------------------------------------------------------- + +#[test] +fn ci_bitbucket_uses_pipelines_endpoint() { + let repo = setup_bitbucket_repo(); + let repo_path = repo.path().to_str().unwrap(); + + let mut server = Server::new(); + + // PR endpoint must respond so `fetch_bitbucket_ci` can resolve the source branch. + let pr_body = serde_json::json!({ + "id": 100, + "title": "CI test", + "state": "OPEN", + "links": { "html": { "href": "https://bitbucket.org/fakews/fakerepo/pull-requests/100" } }, + "source": { "branch": { "name": "feature/CI-1" } } + }); + let _pr_mock = server + .mock("GET", pr_path(100).as_str()) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pr_body.to_string()) + .create(); + + // Pipeline run that's still in progress. + let pipelines_body = serde_json::json!({ + "values": [{ + "uuid": "{ci-1}", + "state": { "name": "IN_PROGRESS", "result": null }, + "target": { "ref_name": "feature/CI-1" } + }] + }); + let pipeline_mock = server + .mock("GET", pipelines_path().as_str()) + .match_query(Matcher::UrlEncoded( + "target.ref_name".into(), + "feature/CI-1".into(), + )) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pipelines_body.to_string()) + .expect_at_least(1) + .create(); + + // Crucial: assert that the GitHub commit-status / check-runs endpoints are + // never hit. Mockito returns 501 for unmatched paths by default; that + // would blow up the request. We use a catch-all for /repos/* to detect + // accidental GitHub dispatch and fail loudly. + let github_mock = server + .mock("GET", Matcher::Regex("^/repos/.*".into())) + .with_status(500) + .with_body("github endpoint should not be hit for a Bitbucket remote") + .expect(0) + .create(); + + write_oplog_ship_entry(repo.path(), "CI-1", 100); + + let output = parsec(&server) + .args(["--json", "ci", "CI-1", "--repo", repo_path]) + .output() + .unwrap(); + + let stdout = String::from_utf8(output.stdout.clone()).unwrap(); + let stderr = String::from_utf8(output.stderr.clone()).unwrap(); + assert!( + output.status.success(), + "ci should succeed for an in-progress pipeline.\nstdout:\n{stdout}\nstderr:\n{stderr}", + ); + + let parsed: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + let entry = &parsed.as_array().unwrap()[0]; + assert_eq!(entry["ticket"], "CI-1"); + assert_eq!(entry["pr_number"], 100); + assert_eq!( + entry["overall"], "pending", + "in-progress pipeline → overall \"pending\"" + ); + + pipeline_mock.assert(); + github_mock.assert(); +} + +#[test] +fn ci_bitbucket_failing_pipeline_exits_nonzero() { + let repo = setup_bitbucket_repo(); + let repo_path = repo.path().to_str().unwrap(); + + let mut server = Server::new(); + + let pr_body = serde_json::json!({ + "id": 200, + "title": "Broken build", + "state": "OPEN", + "links": { "html": { "href": "https://bitbucket.org/fakews/fakerepo/pull-requests/200" } }, + "source": { "branch": { "name": "feature/CI-2" } } + }); + let _pr_mock = server + .mock("GET", pr_path(200).as_str()) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pr_body.to_string()) + .create(); + + let pipelines_body = serde_json::json!({ + "values": [{ + "uuid": "{ci-2}", + "state": { "name": "COMPLETED", "result": { "name": "FAILED" } }, + "target": { "ref_name": "feature/CI-2" } + }] + }); + let _pipeline_mock = server + .mock("GET", pipelines_path().as_str()) + .match_query(Matcher::UrlEncoded( + "target.ref_name".into(), + "feature/CI-2".into(), + )) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(pipelines_body.to_string()) + .create(); + + write_oplog_ship_entry(repo.path(), "CI-2", 200); + + let output = parsec(&server) + .args(["--json", "ci", "CI-2", "--repo", repo_path]) + .output() + .unwrap(); + + // Failing CI is a hard error (E002) — exit code is non-zero, but the JSON + // status line is printed to stdout before the error JSON is appended. + assert!( + !output.status.success(), + "failing pipeline should exit non-zero" + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + // First line: the CI status array. Second line: the JSON error envelope. + let first_line = stdout.lines().next().expect("expected at least one line"); + let parsed: serde_json::Value = serde_json::from_str(first_line).unwrap(); + let entry = &parsed.as_array().unwrap()[0]; + assert_eq!(entry["overall"], "failing"); +} diff --git a/tests/cli_tests.rs b/tests/cli_tests.rs index a4b7622..3bc79ee 100644 --- a/tests/cli_tests.rs +++ b/tests/cli_tests.rs @@ -702,3 +702,494 @@ fn test_root_prints_repo_path() { .success() .stdout(predicate::str::is_empty().not()); } + +// --------------------------------------------------------------------------- +// quiet mode +// --------------------------------------------------------------------------- + +#[test] +fn test_quiet_mode_suppresses_output() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + parsec() + .args(["start", "QUIET-001", "--repo", repo_path]) + .assert() + .success(); + + // --quiet list should produce no stdout output (empty or whitespace-only). + let output = parsec() + .args(["--quiet", "list", "--repo", repo_path]) + .output() + .unwrap(); + assert!(output.status.success()); + assert!( + String::from_utf8(output.stdout).unwrap().trim().is_empty(), + "quiet mode should suppress normal output" + ); +} + +// --------------------------------------------------------------------------- +// start --title +// --------------------------------------------------------------------------- + +#[test] +fn test_start_with_title() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + parsec() + .args([ + "start", + "TITLE-001", + "--title", + "My Custom Title", + "--repo", + repo_path, + ]) + .assert() + .success(); + + // The title should be stored in state.json. + let state_path = repo.path().join(".parsec").join("state.json"); + let contents = std::fs::read_to_string(&state_path).unwrap(); + assert!( + contents.contains("My Custom Title"), + "state.json should store the custom title" + ); +} + +// --------------------------------------------------------------------------- +// start --base (custom base branch) +// --------------------------------------------------------------------------- + +#[test] +fn test_start_with_base_branch() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + // Create and push a "develop" branch. + StdCommand::new("git") + .args(["checkout", "-b", "develop"]) + .current_dir(repo.path()) + .output() + .unwrap(); + StdCommand::new("git") + .args(["commit", "--allow-empty", "-m", "develop init"]) + .current_dir(repo.path()) + .output() + .unwrap(); + StdCommand::new("git") + .args(["push", "origin", "develop"]) + .current_dir(repo.path()) + .output() + .unwrap(); + StdCommand::new("git") + .args(["checkout", "main"]) + .current_dir(repo.path()) + .output() + .unwrap(); + + parsec() + .args([ + "start", "BASE-001", "--base", "develop", "--repo", repo_path, + ]) + .assert() + .success(); + + // Verify the worktree was created with develop as base. + let state_path = repo.path().join(".parsec").join("state.json"); + let contents = std::fs::read_to_string(&state_path).unwrap(); + let state: serde_json::Value = serde_json::from_str(&contents).unwrap(); + assert_eq!( + state["workspaces"]["BASE-001"]["base_branch"] + .as_str() + .unwrap(), + "develop" + ); +} + +// --------------------------------------------------------------------------- +// start --on (stacked worktrees) +// --------------------------------------------------------------------------- + +#[test] +fn test_start_stacked_on_parent() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + // Start a parent worktree. + parsec() + .args(["start", "STACK-PARENT", "--repo", repo_path]) + .assert() + .success(); + + // Start a child stacked on the parent. + parsec() + .args([ + "start", + "STACK-CHILD", + "--on", + "STACK-PARENT", + "--repo", + repo_path, + ]) + .assert() + .success(); + + // Verify parent_ticket is set in state.json. + let state_path = repo.path().join(".parsec").join("state.json"); + let contents = std::fs::read_to_string(&state_path).unwrap(); + let state: serde_json::Value = serde_json::from_str(&contents).unwrap(); + assert_eq!( + state["workspaces"]["STACK-CHILD"]["parent_ticket"] + .as_str() + .unwrap(), + "STACK-PARENT" + ); +} + +// --------------------------------------------------------------------------- +// ship --dry-run +// --------------------------------------------------------------------------- + +#[test] +fn test_ship_dry_run() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + parsec() + .args(["start", "DRY-SHIP", "--repo", repo_path]) + .assert() + .success(); + + // --dry-run should succeed without actually shipping. + parsec() + .args(["--dry-run", "ship", "DRY-SHIP", "--repo", repo_path]) + .assert() + .success(); + + // The worktree should still be listed (not cleaned up). + parsec() + .args(["list", "--repo", repo_path]) + .assert() + .success() + .stdout(predicate::str::contains("DRY-SHIP")); +} + +// --------------------------------------------------------------------------- +// doctor +// --------------------------------------------------------------------------- + +#[test] +fn test_doctor_succeeds() { + let repo = setup_repo(); + parsec() + .args(["doctor", "--repo", repo.path().to_str().unwrap()]) + .assert() + .success(); +} + +// --------------------------------------------------------------------------- +// log --ticket filter +// --------------------------------------------------------------------------- + +#[test] +fn test_log_filter_by_ticket() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + parsec() + .args(["start", "LOGF-A", "--repo", repo_path]) + .assert() + .success(); + + parsec() + .args(["start", "LOGF-B", "--repo", repo_path]) + .assert() + .success(); + + // Filter log to LOGF-A only (ticket is a positional arg). + let output = parsec() + .args(["log", "LOGF-A", "--repo", repo_path]) + .output() + .unwrap(); + + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("LOGF-A"), "filtered log should show LOGF-A"); + assert!( + !stdout.contains("LOGF-B"), + "filtered log should not show LOGF-B" + ); +} + +// --------------------------------------------------------------------------- +// clean --orphans +// --------------------------------------------------------------------------- + +#[test] +fn test_clean_orphans() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + parsec() + .args(["start", "ORPHAN-001", "--repo", repo_path]) + .assert() + .success(); + + // Manually delete the worktree directory to create an orphan. + let state_path = repo.path().join(".parsec").join("state.json"); + let state_contents = std::fs::read_to_string(&state_path).unwrap(); + let state: serde_json::Value = serde_json::from_str(&state_contents).unwrap(); + let wt_path = state["workspaces"]["ORPHAN-001"]["path"].as_str().unwrap(); + + // Remove the worktree directory and prune git worktree list. + std::fs::remove_dir_all(wt_path).unwrap(); + StdCommand::new("git") + .args(["worktree", "prune"]) + .current_dir(repo.path()) + .output() + .unwrap(); + + // clean --orphans should remove the stale entry. + parsec() + .args(["clean", "--orphans", "--repo", repo_path]) + .assert() + .success(); + + // The orphaned workspace should be gone. + parsec() + .args(["list", "--repo", repo_path]) + .assert() + .success() + .stdout(predicate::str::contains("ORPHAN-001").not()); +} + +// --------------------------------------------------------------------------- +// rename +// --------------------------------------------------------------------------- + +#[test] +fn test_rename_ticket() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + parsec() + .args(["start", "OLD-NAME", "--repo", repo_path]) + .assert() + .success(); + + parsec() + .args(["rename", "OLD-NAME", "NEW-NAME", "--repo", repo_path]) + .assert() + .success(); + + // OLD-NAME gone, NEW-NAME present. + parsec() + .args(["list", "--repo", repo_path]) + .assert() + .success() + .stdout(predicate::str::contains("OLD-NAME").not()) + .stdout(predicate::str::contains("NEW-NAME")); +} + +// --------------------------------------------------------------------------- +// start --branch (existing branch) +// --------------------------------------------------------------------------- + +#[test] +fn test_start_with_existing_branch() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path().to_str().unwrap(); + + // Create an existing branch. + StdCommand::new("git") + .args(["branch", "my-existing-branch"]) + .current_dir(repo.path()) + .output() + .unwrap(); + + parsec() + .args([ + "start", + "EXIST-001", + "--branch", + "my-existing-branch", + "--repo", + repo_path, + ]) + .assert() + .success(); + + // Should be listed with the correct branch. + let state_path = repo.path().join(".parsec").join("state.json"); + let contents = std::fs::read_to_string(&state_path).unwrap(); + assert!(contents.contains("my-existing-branch")); +} + +// --------------------------------------------------------------------------- +// shared_cache (issue #207) +// --------------------------------------------------------------------------- + +/// Build a custom config dir containing a config.toml with the given body and +/// return its path. Caller must keep the TempDir alive. +fn write_config_toml(body: &str) -> TempDir { + let dir = TempDir::new().unwrap(); + std::fs::write(dir.path().join("config.toml"), body).unwrap(); + dir +} + +#[test] +fn test_shared_cache_symlink_creates_link() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path(); + + // Pre-populate a `target/` directory in the main repo with a build artifact. + std::fs::create_dir_all(repo_path.join("target")).unwrap(); + std::fs::write(repo_path.join("target").join("artifact.txt"), "pre-built").unwrap(); + + let config_dir = write_config_toml( + r#" +[worktree] +shared_cache = ["target"] +cache_strategy = "symlink" +"#, + ); + + let mut cmd = Command::cargo_bin("parsec").unwrap(); + cmd.env("PARSEC_CONFIG_DIR", config_dir.path()) + .args(["start", "CACHE-1", "--repo", repo_path.to_str().unwrap()]) + .assert() + .success(); + + // Worktree path follows sibling layout: /.CACHE-1 + let repo_name = repo_path.file_name().unwrap().to_string_lossy().to_string(); + let wt_path = repo_path + .parent() + .unwrap() + .join(format!("{}.CACHE-1", repo_name)); + let dest = wt_path.join("target"); + + assert!(dest.exists(), "worktree should have shared target/"); + let meta = std::fs::symlink_metadata(&dest).unwrap(); + assert!( + meta.file_type().is_symlink(), + "symlink strategy must produce a symlink, got: {:?}", + meta.file_type() + ); + let contents = std::fs::read_to_string(dest.join("artifact.txt")).unwrap(); + assert_eq!(contents, "pre-built"); +} + +#[test] +fn test_shared_cache_copy_creates_real_dir() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path(); + + std::fs::create_dir_all(repo_path.join("target").join("nested")).unwrap(); + std::fs::write(repo_path.join("target").join("a.txt"), "alpha").unwrap(); + std::fs::write( + repo_path.join("target").join("nested").join("b.txt"), + "beta", + ) + .unwrap(); + + let config_dir = write_config_toml( + r#" +[worktree] +shared_cache = ["target"] +cache_strategy = "copy" +"#, + ); + + let mut cmd = Command::cargo_bin("parsec").unwrap(); + cmd.env("PARSEC_CONFIG_DIR", config_dir.path()) + .args(["start", "CACHE-2", "--repo", repo_path.to_str().unwrap()]) + .assert() + .success(); + + let repo_name = repo_path.file_name().unwrap().to_string_lossy().to_string(); + let wt_path = repo_path + .parent() + .unwrap() + .join(format!("{}.CACHE-2", repo_name)); + let dest = wt_path.join("target"); + + assert!(dest.exists()); + let meta = std::fs::symlink_metadata(&dest).unwrap(); + assert!( + !meta.file_type().is_symlink(), + "copy strategy must NOT produce a symlink" + ); + assert!(meta.is_dir()); + assert_eq!( + std::fs::read_to_string(dest.join("a.txt")).unwrap(), + "alpha" + ); + assert_eq!( + std::fs::read_to_string(dest.join("nested").join("b.txt")).unwrap(), + "beta" + ); +} + +#[test] +fn test_shared_cache_missing_entry_skipped() { + let (repo, _bare) = setup_repo_with_remote(); + let repo_path = repo.path(); + + // Don't pre-create `.venv` in the main repo. + let config_dir = write_config_toml( + r#" +[worktree] +shared_cache = [".venv"] +cache_strategy = "symlink" +"#, + ); + + let mut cmd = Command::cargo_bin("parsec").unwrap(); + cmd.env("PARSEC_CONFIG_DIR", config_dir.path()) + .args(["start", "CACHE-3", "--repo", repo_path.to_str().unwrap()]) + .assert() + .success(); + + let repo_name = repo_path.file_name().unwrap().to_string_lossy().to_string(); + let wt_path = repo_path + .parent() + .unwrap() + .join(format!("{}.CACHE-3", repo_name)); + + // Worktree was created (start succeeded), but `.venv` was simply skipped. + assert!(wt_path.exists(), "worktree should still be created"); + assert!( + !wt_path.join(".venv").exists(), + "missing source should NOT be linked into worktree" + ); +} + +// --------------------------------------------------------------------------- +// JSON error format +// --------------------------------------------------------------------------- + +#[test] +fn test_json_error_format() { + let repo = setup_repo(); + let output = parsec() + .args([ + "--json", + "ship", + "NONEXIST", + "--repo", + repo.path().to_str().unwrap(), + ]) + .output() + .unwrap(); + + assert!(!output.status.success()); + + let stdout = String::from_utf8(output.stdout).unwrap(); + let parsed: serde_json::Value = + serde_json::from_str(&stdout).expect("JSON error output must be parseable"); + assert!(parsed["error"].as_bool().unwrap()); + assert!(parsed.get("code").is_some()); + assert!(parsed.get("message").is_some()); +}