diff --git a/src/ai_rules/agents/base.py b/src/ai_rules/agents/base.py index 2c5b7cd..67922f9 100644 --- a/src/ai_rules/agents/base.py +++ b/src/ai_rules/agents/base.py @@ -57,6 +57,13 @@ def preserved_fields(self) -> list[str]: """ return [] + @property + def needs_cache(self) -> bool: + """Whether this agent needs a cache file (has overrides or preserved fields).""" + return self.agent_id in self.config.settings_overrides or bool( + self.preserved_fields + ) + @cached_property @abstractmethod def symlinks(self) -> list[tuple[Path, Path]]: @@ -96,11 +103,11 @@ def build_merged_settings( load_config_file, ) - if self.agent_id not in self.config.settings_overrides: + if not self.needs_cache: return None cache_path = self.config.get_merged_settings_path( - self.agent_id, self.config_file_name + self.agent_id, self.config_file_name, force=True ) if not force_rebuild and cache_path and cache_path.exists(): @@ -162,11 +169,11 @@ def is_cache_stale(self) -> bool: Returns: True if cache needs rebuilding, False otherwise """ - if self.agent_id not in self.config.settings_overrides: + if not self.needs_cache: return False cache_path = self.config.get_merged_settings_path( - self.agent_id, self.config_file_name + self.agent_id, self.config_file_name, force=True ) if not cache_path or not cache_path.exists(): return True @@ -206,7 +213,7 @@ def get_cache_diff(self) -> str | None: from ai_rules.config import CONFIG_PARSE_ERRORS, load_config_file - if self.agent_id not in self.config.settings_overrides: + if not self.needs_cache: return None config_format = self.config_file_format @@ -221,7 +228,7 @@ def get_cache_diff(self) -> str | None: return None cache_path = self.config.get_merged_settings_path( - self.agent_id, self.config_file_name + self.agent_id, self.config_file_name, force=True ) cache_exists = cache_path and cache_path.exists() diff --git a/src/ai_rules/agents/claude.py b/src/ai_rules/agents/claude.py index 89b8d57..c44b82b 100644 --- a/src/ai_rules/agents/claude.py +++ b/src/ai_rules/agents/claude.py @@ -51,7 +51,7 @@ def symlinks(self) -> list[tuple[Path, Path]]: settings_file = self.config_dir / "claude" / "settings.json" if settings_file.exists(): target_file = self.config.get_settings_file_for_symlink( - "claude", settings_file + "claude", settings_file, force=bool(self.preserved_fields) ) result.append((Path("~/.claude/settings.json"), target_file)) diff --git a/src/ai_rules/agents/codex.py b/src/ai_rules/agents/codex.py index 6ad9255..e213a28 100644 --- a/src/ai_rules/agents/codex.py +++ b/src/ai_rules/agents/codex.py @@ -48,7 +48,7 @@ def symlinks(self) -> list[tuple[Path, Path]]: config_file = self.config_dir / "codex" / "config.toml" if config_file.exists(): target_file = self.config.get_settings_file_for_symlink( - "codex", config_file + "codex", config_file, force=bool(self.preserved_fields) ) result.append((Path("~/.codex/config.toml"), target_file)) diff --git a/src/ai_rules/agents/gemini.py b/src/ai_rules/agents/gemini.py index a37c1ef..f032909 100644 --- a/src/ai_rules/agents/gemini.py +++ b/src/ai_rules/agents/gemini.py @@ -48,7 +48,7 @@ def symlinks(self) -> list[tuple[Path, Path]]: config_file = self.config_dir / "gemini" / "settings.json" if config_file.exists(): target_file = self.config.get_settings_file_for_symlink( - "gemini", config_file + "gemini", config_file, force=bool(self.preserved_fields) ) result.append((Path("~/.gemini/settings.json"), target_file)) diff --git a/src/ai_rules/agents/goose.py b/src/ai_rules/agents/goose.py index f66ca65..7182232 100644 --- a/src/ai_rules/agents/goose.py +++ b/src/ai_rules/agents/goose.py @@ -48,7 +48,7 @@ def symlinks(self) -> list[tuple[Path, Path]]: config_file = self.config_dir / "goose" / "config.yaml" if config_file.exists(): target_file = self.config.get_settings_file_for_symlink( - "goose", config_file + "goose", config_file, force=bool(self.preserved_fields) ) result.append((Path("~/.config/goose/config.yaml"), target_file)) diff --git a/src/ai_rules/bootstrap/__init__.py b/src/ai_rules/bootstrap/__init__.py index e459322..c7ca514 100644 --- a/src/ai_rules/bootstrap/__init__.py +++ b/src/ai_rules/bootstrap/__init__.py @@ -10,6 +10,7 @@ from .installer import ( UV_NOT_FOUND_ERROR, ToolSource, + ensure_basic_memory_installed, ensure_statusline_installed, get_tool_config_dir, get_tool_source, @@ -34,6 +35,7 @@ "parse_version", "UV_NOT_FOUND_ERROR", "ToolSource", + "ensure_basic_memory_installed", "ensure_statusline_installed", "get_tool_config_dir", "get_tool_source", diff --git a/src/ai_rules/bootstrap/installer.py b/src/ai_rules/bootstrap/installer.py index 6edc835..7605743 100644 --- a/src/ai_rules/bootstrap/installer.py +++ b/src/ai_rules/bootstrap/installer.py @@ -37,6 +37,7 @@ def make_github_install_url(repo: str) -> str: UV_NOT_FOUND_ERROR = "uv not found in PATH. Install from https://docs.astral.sh/uv/" GITHUB_REPO = "wpfleger96/ai-rules" STATUSLINE_GITHUB_REPO = "wpfleger96/claude-code-status-line" +BASIC_MEMORY_GITHUB_REPO = "basicmachines-co/basic-memory" def _validate_package_name(package_name: str) -> bool: @@ -318,3 +319,142 @@ def ensure_statusline_installed( return "failed", None except Exception: return "failed", None + + +def _run_basic_memory_setup() -> None: + """Run the idempotent basic-memory setup script (git init, GitHub remote). + + Reads basic_memory config from ~/.ai-rules-config.yaml and passes + as env vars to the setup script. + """ + setup_script = ( + Path(__file__).parent.parent + / "config" + / "claude" + / "hooks" + / "basic-memory-setup.sh" + ) + if not setup_script.exists(): + return + + env = dict(os.environ) + try: + import yaml + + user_config_path = Path.home() / ".ai-rules-config.yaml" + if user_config_path.exists(): + with open(user_config_path) as f: + user_config = yaml.safe_load(f) or {} + bm_config = user_config.get("basic_memory", {}) + if bm_config.get("repo"): + env["BASIC_MEMORY_WIKI_REPO"] = bm_config["repo"] + if bm_config.get("path"): + env["BASIC_MEMORY_HOME"] = str(Path(bm_config["path"]).expanduser()) + except Exception: + pass + + try: + subprocess.run( + ["bash", str(setup_script)], + timeout=60, + capture_output=True, + env=env, + ) + except (subprocess.TimeoutExpired, Exception): + pass + + +def _is_basic_memory_configured(config: object) -> bool: + """Check if basic-memory is configured in the merged MCP config. + + Checks both profile mcp_overrides and the base mcps.json file. + """ + if hasattr(config, "mcp_overrides") and "basic-memory" in config.mcp_overrides: + return True + + try: + import importlib.resources + + config_pkg = importlib.resources.files("ai_rules") / "config" + for mcps_path in [ + config_pkg / "mcps.json", + config_pkg / "claude" / "mcps.json", + ]: + traversable = mcps_path + if hasattr(traversable, "is_file") and traversable.is_file(): + import json + + data = json.loads(traversable.read_text()) + if "basic-memory" in data: + return True + except Exception: + pass + + return False + + +def ensure_basic_memory_installed( + dry_run: bool = False, + from_github: bool = False, + config: object | None = None, +) -> tuple[str, str | None]: + """Install or upgrade basic-memory if needed. Runs setup script after. Fails open. + + Args: + dry_run: If True, show what would be done without executing + from_github: Install from GitHub instead of PyPI + config: Config object; if provided and basic-memory is not configured, skip + + Returns: + Tuple of (status, message) where status is: + "already_installed", "installed", "upgraded", "upgrade_available", "failed", or "skipped" + """ + if config is not None and not _is_basic_memory_configured(config): + return "skipped", None + if is_command_available("basic-memory"): + try: + from ai_rules.bootstrap.updater import ( + check_tool_updates, + get_tool_by_id, + perform_tool_upgrade, + ) + + bm_tool = get_tool_by_id("basic-memory") + if bm_tool: + update_info = check_tool_updates(bm_tool, timeout=10) + if update_info and update_info.has_update: + if dry_run: + return ( + "upgrade_available", + f"Would upgrade basic-memory {update_info.current_version} → {update_info.latest_version}", + ) + success, msg, _ = perform_tool_upgrade(bm_tool) + if success: + return ( + "upgraded", + f"{update_info.current_version} → {update_info.latest_version}", + ) + except Exception: + pass + if not dry_run: + _run_basic_memory_setup() + return "already_installed", None + + try: + success, message = install_tool( + "basic-memory", + from_github=from_github, + github_url=make_github_install_url(BASIC_MEMORY_GITHUB_REPO) + if from_github + else None, + force=False, + dry_run=dry_run, + ) + if success: + if not dry_run: + _run_basic_memory_setup() + return "installed", message if dry_run else None + else: + return "failed", None + except Exception: + return "failed", None diff --git a/src/ai_rules/bootstrap/updater.py b/src/ai_rules/bootstrap/updater.py index c829bd8..115b219 100644 --- a/src/ai_rules/bootstrap/updater.py +++ b/src/ai_rules/bootstrap/updater.py @@ -11,10 +11,12 @@ from dataclasses import dataclass from .installer import ( + BASIC_MEMORY_GITHUB_REPO, GITHUB_REPO, STATUSLINE_GITHUB_REPO, UV_NOT_FOUND_ERROR, ToolSource, + _is_basic_memory_configured, _validate_package_name, get_tool_source, get_tool_version, @@ -388,6 +390,19 @@ def perform_tool_upgrade(tool: ToolSpec) -> tuple[bool, str, bool]: return False, f"Unexpected error: {e}", False +def _is_basic_memory_configured_for_active_profile() -> bool: + """Check if basic-memory is configured for the currently active profile.""" + try: + from ai_rules.config import Config + from ai_rules.state import get_active_profile + + profile = get_active_profile() or "default" + config = Config.load(profile=profile) + return _is_basic_memory_configured(config) + except Exception: + return False + + UPDATABLE_TOOLS: list[ToolSpec] = [ ToolSpec( tool_id="ai-rules", @@ -405,6 +420,15 @@ def perform_tool_upgrade(tool: ToolSpec) -> tuple[bool, str, bool]: is_installed=lambda: is_command_available("claude-statusline"), github_repo=STATUSLINE_GITHUB_REPO, ), + ToolSpec( + tool_id="basic-memory", + package_name="basic-memory", + display_name="basic-memory", + get_version=lambda: get_tool_version("basic-memory"), + is_installed=lambda: is_command_available("basic-memory"), + github_repo=BASIC_MEMORY_GITHUB_REPO, + is_enabled=lambda: _is_basic_memory_configured_for_active_profile(), + ), ] diff --git a/src/ai_rules/cli.py b/src/ai_rules/cli.py index 8d273ce..b2b1cab 100644 --- a/src/ai_rules/cli.py +++ b/src/ai_rules/cli.py @@ -443,6 +443,20 @@ def version_callback(ctx: click.Context, param: click.Parameter, value: bool) -> except Exception as e: logger.debug(f"Failed to get statusline version: {e}") + try: + from ai_rules.bootstrap import get_tool_version, is_command_available + + if is_command_available("basic-memory"): + bm_version = get_tool_version("basic-memory") + if bm_version: + console.print(f"basic-memory, version {bm_version}") + else: + console.print( + "basic-memory, version [dim](installed, version unknown)[/dim]" + ) + except Exception as e: + logger.debug(f"Failed to get basic-memory version: {e}") + try: from ai_rules.bootstrap import check_tool_updates, get_tool_by_id @@ -982,7 +996,10 @@ def install( from rich.console import Console from rich.prompt import Confirm - from ai_rules.bootstrap import ensure_statusline_installed + from ai_rules.bootstrap import ( + ensure_basic_memory_installed, + ensure_statusline_installed, + ) from ai_rules.config import Config console = Console() @@ -1032,6 +1049,19 @@ def install( console.print(f"[red]Error:[/red] {e}") sys.exit(1) + bm_result, bm_message = ensure_basic_memory_installed( + dry_run=dry_run, config=config + ) + if bm_result == "installed": + if dry_run and bm_message: + console.print(f"[dim]{bm_message}[/dim]\n") + else: + console.print("[green]✓[/green] Installed basic-memory\n") + elif bm_result == "failed": + console.print( + "[yellow]⚠[/yellow] Failed to install basic-memory (continuing anyway)\n" + ) + if not dry_run: set_active_profile(profile) @@ -1092,7 +1122,8 @@ def install( console.print("[bold]Dry run mode - no changes will be made[/bold]\n") if not dry_run: - orphaned = config.cleanup_orphaned_cache() + agents_needing_cache = {a.agent_id for a in selected_agents if a.needs_cache} + orphaned = config.cleanup_orphaned_cache(agents_needing_cache) if orphaned: console.print( f"[dim]✓ Cleaned up orphaned cache for: {', '.join(orphaned)}[/dim]" @@ -1326,7 +1357,7 @@ def status(agents: str | None) -> None: for target, _ in excluded_symlinks: console.print(f" [dim]○[/dim] {target} [dim](excluded by config)[/dim]") - if agent.agent_id in config.settings_overrides: + if agent.needs_cache: if agent.is_cache_stale(): console.print(" [yellow]⚠[/yellow] Cached settings are stale") diff_output = agent.get_cache_diff() @@ -1575,12 +1606,18 @@ def status(agents: str | None) -> None: console.print("[bold cyan]Optional Tools[/bold cyan]\n") from ai_rules.bootstrap import is_command_available - statusline_missing = False + optional_tools_missing = False if is_command_available("claude-statusline"): console.print(" [green]✓[/green] claude-statusline installed") else: console.print(" [yellow]○[/yellow] claude-statusline not installed") - statusline_missing = True + optional_tools_missing = True + + if is_command_available("basic-memory"): + console.print(" [green]✓[/green] basic-memory installed") + else: + console.print(" [yellow]○[/yellow] basic-memory not installed") + optional_tools_missing = True console.print() @@ -1620,7 +1657,7 @@ def status(agents: str | None) -> None: else: console.print("[yellow]💡 Run 'ai-rules install' to fix issues[/yellow]") sys.exit(1) - elif statusline_missing: + elif optional_tools_missing: console.print("[green]All symlinks are correct![/green]") console.print( "[yellow]💡 Run 'ai-rules install' to install optional tools[/yellow]" @@ -1749,7 +1786,7 @@ def list_agents_cmd() -> None: ) @click.option( "--only", - type=click.Choice(["ai-rules", "statusline"]), + type=click.Choice(["ai-rules", "statusline", "basic-memory"]), help="Only upgrade specific tool", ) def upgrade( @@ -2128,7 +2165,7 @@ def diff(agents: str | None) -> None: agent_has_diff = True cache_is_stale = False - if agent.agent_id in config.settings_overrides: + if agent.needs_cache: cache_is_stale = agent.is_cache_stale() if cache_is_stale: agent_has_diff = True @@ -2523,7 +2560,13 @@ def config_show(merged: bool, agent: str | None) -> None: agents_to_show = [agent] if agent else ["claude", "codex", "gemini", "goose"] for agent_name in agents_to_show: - if agent_name not in cfg.settings_overrides: + has_overrides = agent_name in cfg.settings_overrides + cache_path = cfg.get_merged_settings_path( + agent_name, "settings.json", force=True + ) + has_cache = cache_path and cache_path.exists() + + if not has_overrides and not has_cache: console.print( f"[dim]{agent_name}: No overrides (using base settings)[/dim]\n" ) @@ -2556,7 +2599,8 @@ def config_show(merged: bool, agent: str | None) -> None: merged_settings = cfg.merge_settings(agent_name, base_settings) overridden_keys = [] - for key in cfg.settings_overrides[agent_name]: + agent_overrides = cfg.settings_overrides.get(agent_name, {}) + for key in agent_overrides: if key in base_settings: old_val = base_settings[key] new_val = merged_settings[key] @@ -2577,9 +2621,10 @@ def config_show(merged: bool, agent: str | None) -> None: console.print( f" [yellow]⚠[/yellow] No base settings found at {base_path}" ) - console.print( - f" [dim]Overrides: {cfg.settings_overrides[agent_name]}[/dim]" - ) + if has_overrides: + console.print( + f" [dim]Overrides: {cfg.settings_overrides[agent_name]}[/dim]" + ) console.print() else: diff --git a/src/ai_rules/config.py b/src/ai_rules/config.py index d2d27b7..644b169 100644 --- a/src/ai_rules/config.py +++ b/src/ai_rules/config.py @@ -626,20 +626,21 @@ def merge_settings( return deep_merge(base_settings, self.settings_overrides[agent]) def get_merged_settings_path( - self, agent: str, config_file_name: str + self, agent: str, config_file_name: str, *, force: bool = False ) -> Path | None: """Get the path to cached merged settings for an agent. - Returns None if agent has no overrides (should use base file directly). + Returns None if agent has no overrides and force is False. Args: agent: Agent name (e.g., 'claude', 'goose') config_file_name: Config file name (e.g., 'settings.json') + force: Return cache path even without overrides (for preserved_fields) Returns: - Path to cached merged settings file, or None if no overrides exist + Path to cached merged settings file, or None """ - if agent not in self.settings_overrides: + if not force and agent not in self.settings_overrides: return None cache_dir = self.get_cache_dir() / agent @@ -647,35 +648,34 @@ def get_merged_settings_path( return cache_dir / config_file_name def get_settings_file_for_symlink( - self, agent: str, base_settings_path: Path + self, agent: str, base_settings_path: Path, *, force: bool = False ) -> Path: """Get the appropriate settings file to use for symlinking. - Returns cached merged settings if overrides exist and cache is valid, - otherwise returns the base settings file. + Returns cached merged settings if overrides or force is set and cache + exists, otherwise returns the base settings file. This method does NOT build the cache - use build_merged_settings for that. Args: agent: Agent name (e.g., 'claude', 'goose') base_settings_path: Path to base settings file + force: Use cache even without overrides (for preserved_fields) Returns: Path to settings file to use (either cached or base) """ - if agent not in self.settings_overrides: + if not force and agent not in self.settings_overrides: return base_settings_path - cache_path = self.get_merged_settings_path(agent, base_settings_path.name) + cache_path = self.get_merged_settings_path( + agent, base_settings_path.name, force=force + ) if cache_path and cache_path.exists(): return cache_path return base_settings_path - # NOTE: is_cache_stale(), get_cache_diff(), and build_merged_settings() - # have been moved to Agent base class (agents/base.py) where they can - # access agent-specific metadata (config_file_format, preserved_fields). - @staticmethod def load_user_config() -> dict[str, Any]: """Load user config file with defaults. @@ -703,8 +703,12 @@ def save_user_config(data: dict[str, Any]) -> None: with open(user_config_path, "w") as f: yaml.dump(data, f, default_flow_style=False, sort_keys=False) - def cleanup_orphaned_cache(self) -> list[str]: - """Remove cache files for agents that no longer have overrides. + def cleanup_orphaned_cache(self, agents_needing_cache: set[str]) -> list[str]: + """Remove cache files for agents that no longer need them. + + Args: + agents_needing_cache: Set of agent IDs that need caches (overrides + or preserved_fields). Callers must compute this via Agent.needs_cache. Returns: List of agent IDs whose caches were removed @@ -717,7 +721,7 @@ def cleanup_orphaned_cache(self) -> list[str]: for agent_dir in cache_dir.iterdir(): if agent_dir.is_dir(): agent_id = agent_dir.name - if agent_id not in self.settings_overrides: + if agent_id not in agents_needing_cache: shutil.rmtree(agent_dir) removed.append(agent_id) diff --git a/src/ai_rules/config/AGENTS.md b/src/ai_rules/config/AGENTS.md index 6acb8f2..cdbe080 100644 --- a/src/ai_rules/config/AGENTS.md +++ b/src/ai_rules/config/AGENTS.md @@ -133,6 +133,9 @@ Three similar lines > premature abstraction | No helpers for one-time ops | Only **Why:** LLMs confidently generate plausible-sounding but incorrect assumptions. Explicit verification prevents wasted work and builds trust through transparency. +### Persistent Knowledge Base (basic-memory) +A persistent knowledge base exists at `~/basic-memory/`. Use basic-memory MCP tools (`search_notes`, `build_context`) for cross-project context. When learning something worth persisting, invoke the `/kb` skill for formatting conventions before calling `write_note`. + --- ## Technical Standards @@ -190,7 +193,7 @@ def test_calls_hash_password(): - Worktree path: `~/Development//.worktrees//` - Branch name sanitization: replace `/`, `\`, `:` with `-` - `feature/auth` → `feature-auth` - - `user/wpfleger/fix-bug` → `user-wpfleger-fix-bug` + - `user/jsmith/fix-bug` → `user-jsmith-fix-bug` **`gh` CLI: appropriate vs. preferred-local:** @@ -205,13 +208,13 @@ def test_calls_hash_password(): ``` # ❌ Inefficient: reading code piecemeal through API -gh api repos/squareup/goosed-slackbot/contents/src/main.py -gh pr view 180 --json files # then fetching each file via gh +gh api repos/acme/my-service/contents/src/main.py +gh pr view 42 --json files # then fetching each file via gh # ✅ Efficient: check PR branch, then explore locally -gh pr view 180 --repo squareup/goosed-slackbot --json headRefName +gh pr view 42 --repo acme/my-service --json headRefName # → branch: feature/slack-events -# → explore ~/Development/goosed-slackbot/.worktrees/feature-slack-events/ +# → explore ~/Development/my-service/.worktrees/feature-slack-events/ ``` **Workflow when given PR URLs:** diff --git a/src/ai_rules/config/claude/hooks/basic-memory-post-write.sh b/src/ai_rules/config/claude/hooks/basic-memory-post-write.sh new file mode 100755 index 0000000..9b5ce37 --- /dev/null +++ b/src/ai_rules/config/claude/hooks/basic-memory-post-write.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# Commit + push knowledge base changes after a basic-memory write. +# Called by PostToolUse hook on mcp__basic-memory__* tools. +WIKI_DIR="${BASIC_MEMORY_HOME:-$HOME/basic-memory}" +[ -d "$WIKI_DIR/.git" ] || exit 0 +cd "$WIKI_DIR" +git add -A +git diff --cached --quiet && exit 0 +git commit -m "auto: update knowledge base" +nohup git push >/dev/null 2>&1 & +exit 0 diff --git a/src/ai_rules/config/claude/hooks/basic-memory-runner.sh b/src/ai_rules/config/claude/hooks/basic-memory-runner.sh new file mode 100755 index 0000000..aac3898 --- /dev/null +++ b/src/ai_rules/config/claude/hooks/basic-memory-runner.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# Wrapper: sync pending changes, pull latest, then launch basic-memory MCP server. +# Used as the MCP command in mcps.json for all agents. +export PATH="$HOME/.local/bin:$PATH" +WIKI_DIR="${BASIC_MEMORY_HOME:-$HOME/basic-memory}" +if [ -d "$WIKI_DIR/.git" ]; then + cd "$WIKI_DIR" + git push 2>/dev/null || true + git pull --rebase --autostash >/dev/null 2>&1 || git rebase --abort >/dev/null 2>&1 +fi +exec basic-memory mcp "$@" diff --git a/src/ai_rules/config/claude/hooks/basic-memory-setup.sh b/src/ai_rules/config/claude/hooks/basic-memory-setup.sh new file mode 100755 index 0000000..af7e840 --- /dev/null +++ b/src/ai_rules/config/claude/hooks/basic-memory-setup.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# Idempotent setup: ensure ~/basic-memory is a git repo with GitHub remote. +# Safe to run on every `ai-rules install` / `ai-rules upgrade`. +# Handles: fresh install, second machine, repo migration, graceful degradation. +set -euo pipefail + +WIKI_DIR="${BASIC_MEMORY_HOME:-$HOME/basic-memory}" +REPO_NAME="${BASIC_MEMORY_WIKI_REPO:-basic-memory-wiki}" + +resolve_repo() { + local name="$1" + case "$name" in + */*) echo "$name" ;; + *) + local user + user=$(gh api user --jq '.login' 2>/dev/null) || return 1 + [ -n "$user" ] && echo "$user/$name" || return 1 + ;; + esac +} + +check_gh() { + command -v gh >/dev/null 2>&1 || { echo "⚠ gh CLI not found — skipping GitHub remote setup. Knowledge base works locally."; return 1; } + gh auth status >/dev/null 2>&1 || { echo "⚠ gh not authenticated — run 'gh auth login' for cross-machine sync."; return 1; } + return 0 +} + +mkdir -p "$WIKI_DIR" +for dir in repos projects patterns decisions preferences references references/block people feedback; do + mkdir -p "$WIKI_DIR/$dir" + [ -f "$WIKI_DIR/$dir/.gitkeep" ] || touch "$WIKI_DIR/$dir/.gitkeep" +done + +if [ ! -d "$WIKI_DIR/.git" ]; then + cd "$WIKI_DIR" + if ! git config user.name >/dev/null 2>&1; then + git config user.name "AI Agent" + git config user.email "agent@local" + fi + git init + git add -A + git commit -m "feat: initialize knowledge base" --allow-empty +fi + +cd "$WIKI_DIR" + +CURRENT_REMOTE=$(git remote get-url origin 2>/dev/null || echo "") + +if [ -z "$CURRENT_REMOTE" ]; then + check_gh || exit 0 + FULL_REPO=$(resolve_repo "$REPO_NAME") || { echo "⚠ Could not detect GitHub username — skipping remote setup."; exit 0; } + + if gh repo view "$FULL_REPO" >/dev/null 2>&1; then + git remote add origin "git@github.com:$FULL_REPO.git" + git fetch origin + git branch --set-upstream-to=origin/main main 2>/dev/null || true + git pull --rebase --autostash >/dev/null 2>&1 || git rebase --abort >/dev/null 2>&1 + else + gh repo create "$FULL_REPO" --private --description "Persistent LLM knowledge base" + git remote add origin "git@github.com:$FULL_REPO.git" + fi + git push -u origin main 2>/dev/null || true + +else + check_gh || exit 0 + FULL_REPO=$(resolve_repo "$REPO_NAME") || exit 0 + EXPECTED_URL="git@github.com:$FULL_REPO.git" + + if [ "$CURRENT_REMOTE" != "$EXPECTED_URL" ]; then + echo "Migrating knowledge base remote: $CURRENT_REMOTE → $EXPECTED_URL" + + git fetch origin 2>/dev/null || true + git pull --rebase --autostash >/dev/null 2>&1 || git rebase --abort >/dev/null 2>&1 + + if ! gh repo view "$FULL_REPO" >/dev/null 2>&1; then + gh repo create "$FULL_REPO" --private --description "Persistent LLM knowledge base" + fi + + git remote set-url origin "$EXPECTED_URL" + git push -u origin main 2>/dev/null || git push -u origin main --force 2>/dev/null || true + + OLD_REPO=$(echo "$CURRENT_REMOTE" | sed 's|git@github.com:||;s|\.git$||') + echo "✓ Migrated knowledge base to $FULL_REPO" + echo "⚠ Old repo '$OLD_REPO' still exists on GitHub. Delete manually if no longer needed." + exit 0 + fi +fi + +echo "✓ Knowledge base ready at $WIKI_DIR" diff --git a/src/ai_rules/config/claude/settings.json b/src/ai_rules/config/claude/settings.json index 70b7d88..7945ef8 100644 --- a/src/ai_rules/config/claude/settings.json +++ b/src/ai_rules/config/claude/settings.json @@ -2,11 +2,10 @@ "cleanupPeriodDays": 99999, "env": { "ANTHROPIC_DEFAULT_SONNET_MODEL": "claude-sonnet-4-6", - "ANTHROPIC_DEFAULT_OPUS_MODEL": "claude-opus-4-6", + "ANTHROPIC_DEFAULT_OPUS_MODEL": "claude-opus-4-7", "ANTHROPIC_DEFAULT_HAIKU_MODEL": "claude-haiku-4-5-20251001", "CLAUDE_CODE_SUBAGENT_MODEL": "claude-sonnet-4-6", - "CLAUDE_CODE_DISABLE_ADAPTIVE_THINKING": "1", - "CLAUDE_CODE_EFFORT_LEVEL": "max" + "CLAUDE_CODE_EFFORT_LEVEL": "xhigh" }, "attribution": { "commit": "", @@ -15,7 +14,6 @@ "permissions": { "allow": [ "Bash(awk:*)", - "Bash(beads:*)", "Bash(bundle:*)", "Bash(cargo build:*)", "Bash(cargo check:*)", @@ -90,7 +88,6 @@ "Bash(mv:*)", "Bash(npm:*)", "Bash(pdftotext:*)", - "Bash(perles:*)", "Bash(python:*)", "Bash(python3:*)", "Bash(readlink:*)", @@ -138,9 +135,7 @@ "type": "command", "command": "claude-statusline" }, - "enabledPlugins": { - "plugin-dev@claude-plugins-official": true - }, "alwaysThinkingEnabled": true, + "showThinkingSummaries": true, "skipDangerousModePermissionPrompt": true } diff --git a/src/ai_rules/config/mcps.json b/src/ai_rules/config/mcps.json new file mode 100644 index 0000000..0967ef4 --- /dev/null +++ b/src/ai_rules/config/mcps.json @@ -0,0 +1 @@ +{} diff --git a/src/ai_rules/config/profiles/default.yaml b/src/ai_rules/config/profiles/default.yaml index 5f98e6a..e78cc34 100644 --- a/src/ai_rules/config/profiles/default.yaml +++ b/src/ai_rules/config/profiles/default.yaml @@ -4,9 +4,6 @@ extends: null settings_overrides: {} exclude_symlinks: [] mcp_overrides: {} -plugins: - - name: plugin-dev - marketplace: claude-plugins-official marketplaces: - name: cc-marketplace source: ananddtyagi/cc-marketplace diff --git a/src/ai_rules/config/profiles/personal.yaml b/src/ai_rules/config/profiles/personal.yaml new file mode 100644 index 0000000..2ab11f9 --- /dev/null +++ b/src/ai_rules/config/profiles/personal.yaml @@ -0,0 +1,8 @@ +name: personal +description: Personal configuration with additional tools and integrations +extends: default +settings_overrides: + claude: + model: opusplan +exclude_symlinks: [] +mcp_overrides: {} diff --git a/src/ai_rules/config/profiles/work.yaml b/src/ai_rules/config/profiles/work.yaml index f3c2722..c4efa34 100644 --- a/src/ai_rules/config/profiles/work.yaml +++ b/src/ai_rules/config/profiles/work.yaml @@ -1,13 +1,14 @@ name: work -description: Work laptop with extended context model -extends: null +description: Work laptop with extended context models +extends: personal settings_overrides: claude: env: ANTHROPIC_DEFAULT_SONNET_MODEL: "claude-sonnet-4-6[1m]" CLAUDE_CODE_SUBAGENT_MODEL: "claude-sonnet-4-6[1m]" - ANTHROPIC_DEFAULT_OPUS_MODEL: "claude-opus-4-6[1m]" - model: opusplan + ANTHROPIC_DEFAULT_OPUS_MODEL: "claude-opus-4-7[1m]" + CLAUDE_CODE_EFFORT_LEVEL: "max" + model: opus gemini: model: name: "gemini-3.1-pro-preview" @@ -16,13 +17,5 @@ settings_overrides: selectedType: "gemini-api-key" ui: useFullWidth: true - codex: - model: "gpt-5.2-codex" exclude_symlinks: [] mcp_overrides: {} -plugins: - - name: plugin-dev - marketplace: claude-plugins-official -marketplaces: - - name: cc-marketplace - source: ananddtyagi/cc-marketplace diff --git a/src/ai_rules/config/skills/code-reviewer/SKILL.md b/src/ai_rules/config/skills/code-reviewer/SKILL.md index 9517a87..d0c64c6 100644 --- a/src/ai_rules/config/skills/code-reviewer/SKILL.md +++ b/src/ai_rules/config/skills/code-reviewer/SKILL.md @@ -121,14 +121,14 @@ Structure your response as: #### Step 3: Launch CLIs in Background -Use a **single Bash call with `run_in_background=true`**. Substitute the work directory path from Step 1 for `$WORK_DIR` in the command (use the literal path, not a variable reference). This fires the command and returns immediately so you can continue to Phase 1 while the CLIs execute. When the background task completes, the inline delimited output arrives via the background notification — Phase 5 parses it from there. The prompt is piped via stdin rather than passed as a CLI argument — large diffs exceed the OS `ARG_MAX` limit (~256KB on macOS). +Use a **single Bash call with `run_in_background=true`**. Substitute the work directory path from Step 1 for `$WORK_DIR` in the command (use the literal path, not a variable reference). This fires the command and returns immediately so you can continue to Phase 1 while the CLIs execute. When the background task completes, the inline delimited output arrives via the background notification — Phase 5 parses it from there. The prompt is written to a temp file — do NOT pass via stdin (causes Codex to echo the full prompt to stderr) or as a CLI argument (exceeds `ARG_MAX`). Instead, close stdin with `< /dev/null` (prevents non-TTY hang) and instruct Codex to `cat` the file. ```bash WORK_DIR="" [ -d "$WORK_DIR" ] || { echo "ERROR: WORK_DIR does not exist: $WORK_DIR"; exit 1; } # Check CLI availability -CODEX_AVAILABLE=$(command -v codex >/dev/null 2>&1 && [ -f ~/.env/openai.key ] && echo "yes" || echo "no") +CODEX_AVAILABLE=$(command -v codex >/dev/null 2>&1 && echo "yes" || echo "no") GEMINI_AVAILABLE=$(command -v gemini >/dev/null 2>&1 && [ -f ~/.env/gemini_cli.key ] && echo "yes" || echo "no") REPO_ROOT=$(git rev-parse --show-toplevel 2>/dev/null || pwd) @@ -148,10 +148,10 @@ CODEX_EXIT="-1"; GEMINI_EXIT="-1" # Launch Codex (background) if [ "$CODEX_AVAILABLE" = "yes" ]; then CODEX_RAN="yes" - OPENAI_API_KEY=$(cat ~/.env/openai.key) timeout 300 codex exec -C "$REPO_ROOT" \ + timeout 300 codex exec -C "$REPO_ROOT" \ --dangerously-bypass-approvals-and-sandbox \ - "Follow the review instructions below." \ - < "$WORK_DIR/prompt.txt" \ + "Run cat \"$WORK_DIR/prompt.txt\" and follow the instructions in the output." \ + < /dev/null \ > "$CODEX_OUT" 2>"$CODEX_ERR" & CODEX_PID=$! fi diff --git a/src/ai_rules/config/skills/crossfire/SKILL.md b/src/ai_rules/config/skills/crossfire/SKILL.md index ad0fc4c..de31f61 100644 --- a/src/ai_rules/config/skills/crossfire/SKILL.md +++ b/src/ai_rules/config/skills/crossfire/SKILL.md @@ -15,7 +15,9 @@ metadata: - Uncommitted changes: !`git status --porcelain 2>/dev/null | wc -l | xargs` - PLAN files: !`sh -c 'COMMON=$(git rev-parse --path-format=absolute --git-common-dir 2>/dev/null); if [ -z "$COMMON" ]; then exit 0; fi; PROJECT_ROOT=$(dirname "$COMMON"); cd "$PROJECT_ROOT" && for f in PLAN__*.md; do [ -f "$f" ] && echo "$f"; done' 2>/dev/null | head -5` -You are a crossfire review coordinator. Your job is to identify the artifact the user wants reviewed, then run independent reviews via Codex (GPT) and Gemini CLIs in parallel, and synthesize a consensus report. +# Run Crossfire Review + +Detect what to review from `${ARGS}` using the Artifact Detection rules below, then proceed immediately to Orchestration. ## Artifact Detection @@ -51,14 +53,14 @@ If no args provided: --- -After determining the artifact (and optional review focus), proceed to **Orchestration**. +Once you have the artifact and optional review focus, proceed immediately to **Orchestration** without waiting for further user input. ## Orchestration ### Step 1: Check CLI Availability ```bash -CODEX_AVAILABLE=$(command -v codex >/dev/null 2>&1 && [ -f ~/.env/openai.key ] && echo "yes" || echo "no") +CODEX_AVAILABLE=$(command -v codex >/dev/null 2>&1 && echo "yes" || echo "no") GEMINI_AVAILABLE=$(command -v gemini >/dev/null 2>&1 && [ -f ~/.env/gemini_cli.key ] && echo "yes" || echo "no") ``` @@ -118,7 +120,7 @@ Structure your response as: ### Step 3: Write Prompt and Launch CLIs -Write the full prompt to a temp file. Do NOT pass it as a command-line argument — large artifacts (diffs, plans) exceed the OS `ARG_MAX` limit (~256KB on macOS). By writing to a file and giving the CLI a short instruction to read it, the command line stays small. +Write the full prompt to a temp file. Do NOT pass it as a command-line argument or via stdin — large artifacts exceed the OS `ARG_MAX` limit, and stdin piping causes Codex to echo the full prompt to stderr. Instead, write to a file, close stdin with `< /dev/null` (prevents non-TTY hang), and instruct the CLI to `cat` the file. ```bash PROMPT_DIR=$(mktemp -d /tmp/crossfire-prompt-XXXXXX) @@ -137,10 +139,10 @@ trap 'rm -rf "$PROMPT_DIR" "$CODEX_OUT" "$CODEX_ERR" "$GEMINI_OUT"' EXIT INT TER # Codex (background) — only if available if [ "$CODEX_AVAILABLE" = "yes" ]; then - OPENAI_API_KEY=$(cat ~/.env/openai.key) timeout 300 codex exec -C "$REPO_ROOT" \ + timeout 300 codex exec -C "$REPO_ROOT" \ --dangerously-bypass-approvals-and-sandbox \ - "Follow the review instructions below." \ - < "$PROMPT_FILE" \ + "Run cat \"$PROMPT_FILE\" and follow the instructions in the output." \ + < /dev/null \ > "$CODEX_OUT" 2>"$CODEX_ERR" & CODEX_PID=$! fi diff --git a/src/ai_rules/config/skills/kb/SKILL.md b/src/ai_rules/config/skills/kb/SKILL.md new file mode 100644 index 0000000..3b59b4f --- /dev/null +++ b/src/ai_rules/config/skills/kb/SKILL.md @@ -0,0 +1,118 @@ +--- +name: kb +description: >- + This skill should be used when the user asks to "save to knowledge base", + "write a note", "persist this", "remember this pattern", "update the KB", + or when the Stop hook instructs the agent to persist session knowledge. + Also use when asking "search knowledge base", "what do we know about", + or needing cross-project context from basic-memory. +--- + +# Knowledge Base (basic-memory) + +A persistent markdown knowledge base at `~/basic-memory/` powered by the basic-memory MCP server. Knowledge persists across sessions, repos, and machines via git sync. Searchable with hybrid BM25 + vector search. + +## Workflow + +1. **Search first** to avoid duplicates: `search_notes(query="topic")` +2. **Read existing** if a related note exists: `read_note(identifier="note-title")` +3. **Write or update**: `write_note(title="...", directory="...", content="...")` +4. **Connect related notes** using `[[wikilinks]]` in the Relations section + +## Directory Guide + +| Directory | Use for | Example titles | +|-----------|---------|----------------| +| `repos/` | Per-repo commands, gotchas, patterns | "ai-rules", "goosed-slackbot" | +| `patterns/` | Reusable technical knowledge | "uv-run-not-direct-invocation" | +| `decisions/` | ADRs -- why something was chosen | "raw-sql-over-sqlalchemy" | +| `preferences/` | User working style, conventions | "test-docstring-conventions" | +| `references/` | External knowledge, company info | "block-ci-cd-pipeline" | +| `references/block/` | Block/Square-specific knowledge | "service-registry-conventions" | +| `people/` | Teammates, communication context | "tyler-sprout-expert" | +| `feedback/` | Corrections, lessons from mistakes | "no-assertions-without-verification" | +| `projects/` | Multi-repo initiative context | "slackbot-kotlin-migration" | + +## Note Format + +Every note uses YAML frontmatter + structured observations + relations: + +```markdown +--- +title: Note Title +type: note +tags: [tag1, tag2] +--- + +# Note Title + +## Observations +- [fact] Concrete, verified information +- [tip] Practical advice for future use +- [method] How to do something specific +- [preference] User's stated preference or convention +- [decision] A choice that was made and why + +## Relations +- related_to [[other-note-title]] +- depends_on [[prerequisite-note]] +``` + +### Observation Categories + +- **`[fact]`** -- verified, objective information (e.g., "Uses Gradle wrapper, not Maven") +- **`[tip]`** -- practical guidance (e.g., "Check Justfile before guessing build commands") +- **`[method]`** -- how to do something (e.g., "Run `just check-all` for full validation") +- **`[preference]`** -- user's stated preference (e.g., "Never add docstrings to test functions") +- **`[decision]`** -- a choice with rationale (e.g., "Chose raw SQL for performance over ORM convenience") + +### Tags + +Use lowercase, hyphenated tags in frontmatter. Common tags: `python`, `kotlin`, `rust`, `block`, `testing`, `ci-cd`, `architecture`, `gotcha`. + +### Wikilinks + +Connect related notes with `[[note-title]]` in the Relations section. Use typed relations: +- `related_to [[note]]` -- general connection +- `depends_on [[note]]` -- prerequisite +- `supersedes [[note]]` -- replaces older knowledge +- `contradicts [[note]]` -- conflicting information (flag for review) + +## When to Write vs Search + +**Write a note when:** +- A decision was made about architecture or approach +- A repo-specific gotcha or non-obvious command was discovered +- Company-specific knowledge was learned (Block internals, service names, team conventions) +- The user corrected a previous assumption +- A reusable pattern was identified + +**Search instead when:** +- Starting work in an unfamiliar repo -- `search_notes(query="repo-name")` +- Encountering a pattern already seen -- `search_notes(query="topic")` +- Needing cross-project context -- `build_context(url="memory://note-title")` + +**Do NOT write:** +- Session-specific ephemeral context (what files were edited this session) +- Information obvious from the codebase (README content, import paths) +- Speculative or unverified information + +## MCP Tools Quick Reference + +| Tool | Purpose | +|------|---------| +| `search_notes(query, tags?, note_types?)` | Hybrid search across all notes | +| `build_context(url)` | Graph traversal from a note via wikilinks | +| `read_note(identifier)` | Read a specific note by title or permalink | +| `write_note(title, directory, content)` | Create or update a note | +| `edit_note(identifier, find, replace?)` | Partial edit without full rewrite | +| `delete_note(identifier)` | Delete a note (ask user first) | +| `recent_activity(depth?)` | Recently modified notes | +| `list_directory(dir_name?, depth?)` | Browse the folder structure | + +## Additional Resources + +### Reference Files + +For complete note templates with realistic examples for each directory type: +- **`references/note-templates.md`** -- Full templates for repos/, patterns/, decisions/, preferences/, references/, people/, feedback/, projects/ diff --git a/src/ai_rules/config/skills/kb/references/note-templates.md b/src/ai_rules/config/skills/kb/references/note-templates.md new file mode 100644 index 0000000..bf1fb33 --- /dev/null +++ b/src/ai_rules/config/skills/kb/references/note-templates.md @@ -0,0 +1,267 @@ +# Note Templates by Directory + +Complete templates for each knowledge base directory. Copy the relevant template and fill in the content. + +## repos/ -- Repository Guide + +```markdown +--- +title: repo-name +type: note +tags: [language, framework, block] +--- + +# repo-name + +## Observations +- [fact] Brief description of what this repo does +- [method] Primary build/test command (e.g., `just test` or `./gradlew testUnit`) +- [fact] Language and framework details +- [tip] Non-obvious gotchas or conventions +- [fact] Key configuration files to know about + +## Relations +- related_to [[project-name-if-applicable]] +- related_to [[relevant-pattern]] +``` + +**Example:** +```markdown +--- +title: goosed-slackbot +type: note +tags: [kotlin, slackbot, block, migration] +--- + +# goosed-slackbot + +## Observations +- [fact] Slack bot migrating from Python to Kotlin (kgoose) +- [method] Run tests with `just test` (Justfile wraps Gradle) +- [fact] Has 60+ git worktrees for parallel development +- [tip] Check `Justfile` for available commands before guessing +- [tip] Worktree branch names sanitize `/` to `-` + +## Relations +- related_to [[cash-server]] +- related_to [[slackbot-kotlin-migration]] +- related_to [[kotlin-patterns]] +``` + +## patterns/ -- Reusable Technical Pattern + +```markdown +--- +title: pattern-name +type: note +tags: [language, domain] +--- + +# pattern-name + +## Observations +- [fact] What the pattern is +- [method] How to apply it +- [tip] When to use it vs alternatives +- [fact] Why it matters (consequence of ignoring) + +## Relations +- related_to [[relevant-repo]] +- related_to [[related-pattern]] +``` + +**Example:** +```markdown +--- +title: uv-run-not-direct-invocation +type: note +tags: [python, tooling, gotcha] +--- + +# uv-run-not-direct-invocation + +## Observations +- [fact] Always use `uv run pytest` not bare `pytest` in uv-managed projects +- [fact] Always use `uvx ruff check .` not bare `ruff` +- [method] Check for Justfile first -- `just test` wraps the correct invocation +- [tip] Direct tool invocation bypasses project configuration and venv +- [fact] This is the #1 mistake agents make in Python projects + +## Relations +- related_to [[ai-rules]] +- related_to [[python-tooling]] +``` + +## decisions/ -- Architecture Decision Record + +```markdown +--- +title: decision-name +type: note +tags: [domain, scope] +--- + +# decision-name + +## Observations +- [decision] What was decided and the chosen approach +- [fact] What alternatives were considered +- [fact] Why the chosen approach won (key tradeoff) +- [fact] What was explicitly rejected and why +- [tip] Constraints or assumptions this depends on + +## Relations +- related_to [[relevant-project]] +- supersedes [[older-decision-if-any]] +``` + +**Example:** +```markdown +--- +title: raw-sql-over-sqlalchemy +type: note +tags: [architecture, database, python] +--- + +# raw-sql-over-sqlalchemy + +## Observations +- [decision] Use raw SQL (psycopg3) instead of SQLAlchemy ORM for the data layer +- [fact] ORM adds 3-5ms per query, unacceptable for batch operations +- [fact] SQLAlchemy considered but rejected due to N+1 query risk +- [preference] User prefers explicit SQL over magic ORM behavior +- [tip] This decision holds for read-heavy services; write-heavy may revisit + +## Relations +- related_to [[python-patterns]] +- related_to [[data-layer-project]] +``` + +## preferences/ -- User Working Style + +```markdown +--- +title: preference-name +type: note +tags: [domain] +--- + +# preference-name + +## Observations +- [preference] The specific preference or convention +- [fact] Context for why this matters +- [tip] How to apply this in practice + +## Relations +- related_to [[relevant-pattern]] +``` + +**Example:** +```markdown +--- +title: test-docstring-conventions +type: note +tags: [testing, style, python] +--- + +# test-docstring-conventions + +## Observations +- [preference] Never add docstrings to individual test functions +- [preference] Only the test class itself gets a docstring +- [fact] Test function names should be descriptive enough on their own +- [method] Name tests as `test__` + +## Relations +- related_to [[python-patterns]] +- related_to [[testing-conventions]] +``` + +## references/ -- External Knowledge + +```markdown +--- +title: reference-name +type: note +tags: [domain, source] +--- + +# reference-name + +## Observations +- [fact] Key information from the external source +- [method] How to apply this knowledge +- [tip] Gotchas or non-obvious details + +## Relations +- related_to [[relevant-project-or-pattern]] +``` + +For Block-specific knowledge, use `references/block/` as the directory. + +## people/ -- Teammate Context + +```markdown +--- +title: person-name +type: note +tags: [team, domain] +--- + +# person-name + +## Observations +- [fact] Role and primary responsibilities +- [fact] Domain expertise areas +- [tip] Best way to collaborate or communicate +- [fact] Key projects they own or contribute to + +## Relations +- related_to [[project-they-own]] +``` + +## feedback/ -- Corrections and Lessons + +```markdown +--- +title: feedback-name +type: note +tags: [domain] +--- + +# feedback-name + +## Observations +- [fact] What went wrong or what was corrected +- [fact] Why the incorrect assumption was made +- [method] The correct approach going forward +- [tip] How to avoid this mistake in the future + +## Relations +- related_to [[relevant-pattern-or-repo]] +``` + +## projects/ -- Multi-repo Initiative + +```markdown +--- +title: project-name +type: note +tags: [initiative, status] +--- + +# project-name + +## Observations +- [fact] What the initiative is trying to accomplish +- [fact] Which repos are involved +- [fact] Current status and next steps +- [decision] Key architectural choices made +- [tip] Who to ask about specific aspects + +## Relations +- related_to [[repo-1]] +- related_to [[repo-2]] +- related_to [[key-person]] +``` diff --git a/tests/integration/test_status_command.py b/tests/integration/test_status_command.py index 1f1efdc..2f0cb8d 100644 --- a/tests/integration/test_status_command.py +++ b/tests/integration/test_status_command.py @@ -251,7 +251,6 @@ def test_status_passes_when_cache_fresh( yaml.dump(user_config, f) config = Config.load() - ClaudeAgent(test_repo, config).build_merged_settings() config.plugins = [] config.marketplaces = [] @@ -260,6 +259,8 @@ def test_status_passes_when_cache_fresh( gemini = GeminiAgent(test_repo, config) goose = GooseAgent(test_repo, config) shared = SharedAgent(test_repo, config) + for agent in [claude, codex, gemini, goose]: + agent.build_merged_settings() for agent in [claude, codex, gemini, goose, shared]: for target, source in agent.symlinks: target_path = Path(str(target).replace("~", str(mock_home))) @@ -293,6 +294,8 @@ def test_status_no_cache_warning_when_no_overrides( gemini = GeminiAgent(test_repo, config) goose = GooseAgent(test_repo, config) shared = SharedAgent(test_repo, config) + for agent in [claude, codex, gemini, goose]: + agent.build_merged_settings() for agent in [claude, codex, gemini, goose, shared]: for target, source in agent.symlinks: target_path = Path(str(target).replace("~", str(mock_home))) diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 6904cb0..fc0f088 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -266,13 +266,14 @@ def test_build_merged_settings_cache_creation(self, cache_setup): assert cached["model"] == "claude-sonnet-4-5-20250929" - def test_build_merged_settings_without_overrides_returns_none( + def test_build_merged_settings_without_overrides_creates_cache_for_preserved_fields( self, tmp_path, monkeypatch ): - """Test that no cache is created when there are no overrides.""" + """Test that cache is created for agents with preserved_fields even without overrides.""" home = tmp_path / "home" home.mkdir() monkeypatch.setenv("HOME", str(home)) + monkeypatch.setattr(Path, "home", staticmethod(lambda: home)) config_dir = tmp_path / "config" claude_dir = config_dir / "claude" @@ -283,7 +284,8 @@ def test_build_merged_settings_without_overrides_returns_none( agent = ClaudeAgent(config_dir, config) cache_path = agent.build_merged_settings() - assert cache_path is None + assert cache_path is not None + assert cache_path.exists() def test_cache_staleness_when_missing(self, cache_setup): """Test that cache is stale when it doesn't exist.""" @@ -711,22 +713,28 @@ class TestCacheCleanup: """Tests for orphaned cache cleanup functionality.""" def test_cleanup_orphaned_cache(self, tmp_path, monkeypatch): - """Test that orphaned cache files are removed.""" + """Test that orphaned cache files are removed but needed ones are kept.""" home = tmp_path / "home" home.mkdir() monkeypatch.setenv("HOME", str(home)) monkeypatch.setattr(Path, "home", staticmethod(lambda: home)) - cache_dir = home / ".ai-rules" / "cache" / "claude" - cache_dir.mkdir(parents=True) - (cache_dir / "settings.json").write_text('{"orphaned": true}') + claude_cache = home / ".ai-rules" / "cache" / "claude" + claude_cache.mkdir(parents=True) + (claude_cache / "settings.json").write_text('{"active": true}') + + stale_cache = home / ".ai-rules" / "cache" / "old_agent" + stale_cache.mkdir(parents=True) + (stale_cache / "config.json").write_text('{"stale": true}') config = Config(settings_overrides={}) - removed = config.cleanup_orphaned_cache() - assert "claude" in removed - assert not cache_dir.exists() + removed = config.cleanup_orphaned_cache(agents_needing_cache={"claude"}) + assert "old_agent" in removed + assert "claude" not in removed + assert claude_cache.exists() + assert not stale_cache.exists() def test_cleanup_preserves_cache_with_overrides(self, tmp_path, monkeypatch): """Test that cache files with active overrides are preserved.""" @@ -742,7 +750,7 @@ def test_cleanup_preserves_cache_with_overrides(self, tmp_path, monkeypatch): config = Config(settings_overrides={"claude": {"model": "test"}}) - removed = config.cleanup_orphaned_cache() + removed = config.cleanup_orphaned_cache(agents_needing_cache={"claude"}) assert removed == [] assert cache_dir.exists() @@ -755,7 +763,7 @@ def test_cleanup_when_no_cache_dir(self, tmp_path, monkeypatch): monkeypatch.setattr(Path, "home", staticmethod(lambda: home)) config = Config(settings_overrides={}) - removed = config.cleanup_orphaned_cache() + removed = config.cleanup_orphaned_cache(agents_needing_cache=set()) assert removed == [] @@ -894,7 +902,10 @@ def test_validate_codex_override_path_invalid(self, tmp_path): assert not is_valid assert "nonexistent_key" in error - def test_toml_settings_not_stale_without_overrides(self, tmp_path, monkeypatch): + def test_toml_settings_stale_without_cache_when_preserved_fields( + self, tmp_path, monkeypatch + ): + """Agents with preserved_fields report stale when no cache exists.""" home = tmp_path / "home" home.mkdir() monkeypatch.setenv("HOME", str(home)) @@ -903,8 +914,11 @@ def test_toml_settings_not_stale_without_overrides(self, tmp_path, monkeypatch): config_dir = tmp_path / "config" codex_dir = config_dir / "codex" codex_dir.mkdir(parents=True) - (codex_dir / "config.toml").write_text('model = "gpt-5.2-codex"\n') + (codex_dir / "config.toml").write_text('model = "gpt-5.4"\n') config = Config() agent = CodexAgent(config_dir, config) + assert agent.is_cache_stale() + + agent.build_merged_settings() assert not agent.is_cache_stale()