Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 2 additions & 6 deletions autocontext/src/autocontext/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -475,14 +475,10 @@ def list_runs(

settings = load_settings()
store = SQLiteStore(settings.db_path)
with store.connect() as conn:
rows = conn.execute(
"SELECT run_id, scenario, target_generations, executor_mode, status, created_at "
"FROM runs ORDER BY created_at DESC LIMIT 20"
).fetchall()
rows = store.list_runs(limit=20)

if json_output:
result = [dict(row) for row in rows]
result = rows
sys.stdout.write(json.dumps(result) + "\n")
else:
table = Table(title="Recent Runs")
Expand Down
7 changes: 1 addition & 6 deletions autocontext/src/autocontext/mcp/knowledge_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,7 @@ def read_skills(ctx: MtsToolContext, scenario_name: str) -> str:

def list_runs(ctx: MtsToolContext) -> list[dict[str, Any]]:
"""List recent runs from SQLite."""
with ctx.sqlite.connect() as conn:
rows = conn.execute(
"SELECT run_id, scenario, target_generations, executor_mode, status, created_at "
"FROM runs ORDER BY created_at DESC LIMIT 20"
).fetchall()
return [dict(row) for row in rows]
return ctx.sqlite.list_runs(limit=20)


def run_status(ctx: MtsToolContext, run_id: str) -> list[dict[str, Any]]:
Expand Down
15 changes: 2 additions & 13 deletions autocontext/src/autocontext/server/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,22 +149,11 @@ def health() -> dict[str, str]:

@application.get("/api/runs")
def list_runs() -> list[dict[str, Any]]:
with store.connect() as conn:
rows = conn.execute(
"SELECT run_id, scenario, target_generations, executor_mode, status, created_at "
"FROM runs ORDER BY created_at DESC LIMIT 50"
).fetchall()
return [dict(row) for row in rows]
return store.list_runs(limit=50)

@application.get("/api/runs/{run_id}/status")
def run_status(run_id: str) -> list[dict[str, Any]]:
with store.connect() as conn:
rows = conn.execute(
"SELECT generation_index, mean_score, best_score, elo, wins, losses, gate_decision, status "
"FROM generations WHERE run_id = ? ORDER BY generation_index ASC",
(run_id,),
).fetchall()
return [dict(row) for row in rows]
return store.run_status(run_id)

@application.get("/api/runs/{run_id}/replay/{generation}")
def replay(run_id: str, generation: int) -> dict[str, Any]:
Expand Down
9 changes: 2 additions & 7 deletions autocontext/src/autocontext/server/cockpit_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,15 +193,10 @@ def cockpit_delete_notebook(session_id: str, request: Request) -> dict[str, str]
def list_runs(request: Request) -> list[dict[str, Any]]:
"""List recent runs with summary info."""
store = _get_store(request)
with store.connect() as conn:
runs = conn.execute(
"SELECT run_id, scenario, target_generations, status, created_at, updated_at "
"FROM runs ORDER BY created_at DESC LIMIT 50"
).fetchall()
runs = store.list_runs(limit=50)

result: list[dict[str, Any]] = []
for run in runs:
run_dict = dict(run)
for run_dict in runs:
run_id = run_dict["run_id"]
scenario = run_dict["scenario"]

Expand Down
44 changes: 44 additions & 0 deletions autocontext/src/autocontext/storage/sqlite_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -691,6 +691,50 @@ def get_run(self, run_id: str) -> dict[str, Any] | None:
).fetchone()
return dict(row) if row else None

# -- Shared query services (AC-480) --
# These replace duplicated raw SQL in cli.py, mcp/tools.py, and server/ endpoints.

def list_runs(self, *, limit: int = 50) -> list[dict[str, Any]]:
"""List recent runs, newest first."""
with self.connect() as conn:
rows = conn.execute(
"SELECT run_id, scenario, target_generations, executor_mode, status, created_at "
"FROM runs ORDER BY created_at DESC LIMIT ?",
(limit,),
).fetchall()
return [dict(row) for row in rows]

def run_status(self, run_id: str) -> list[dict[str, Any]]:
"""Return per-generation status for a run."""
with self.connect() as conn:
rows = conn.execute(
"""
SELECT generation_index, mean_score, best_score, elo, wins, losses, gate_decision, status
FROM generations
WHERE run_id = ?
ORDER BY generation_index
""",
(run_id,),
).fetchall()
return [dict(row) for row in rows]

def list_solved(self) -> list[dict[str, Any]]:
"""Return best knowledge snapshots per scenario."""
with self.connect() as conn:
rows = conn.execute(
"SELECT scenario, best_score, best_elo, run_id, created_at "
"FROM knowledge_snapshots "
"ORDER BY best_score DESC"
).fetchall()
# Deduplicate: keep only the best per scenario
seen: dict[str, dict[str, Any]] = {}
for row in rows:
d = dict(row)
scn = d["scenario"]
if scn not in seen or d["best_score"] > seen[scn]["best_score"]:
seen[scn] = d
return list(seen.values())

# -- Human feedback --

def insert_human_feedback(
Expand Down
2 changes: 1 addition & 1 deletion autocontext/tests/test_module_size_limits.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

GRANDFATHERED: dict[str, int] = {
# These are large but not yet split — tracked for future refactoring
"storage/sqlite_store.py": 1600,
"storage/sqlite_store.py": 1650,
"storage/artifacts.py": 1300,
"cli.py": 1600,
"mcp/tools.py": 1500,
Expand Down
95 changes: 95 additions & 0 deletions autocontext/tests/test_service_layer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
"""Tests for shared service layer methods on SQLiteStore (AC-480).

Verifies that common query operations are available as methods on SQLiteStore,
so CLI/HTTP/MCP surfaces don't duplicate raw SQL.
"""

from __future__ import annotations

from pathlib import Path

import pytest

from autocontext.storage.sqlite_store import SQLiteStore


@pytest.fixture()
def store(tmp_path: Path) -> SQLiteStore:
db = SQLiteStore(tmp_path / "test.sqlite3")
migrations = Path(__file__).resolve().parent.parent / "migrations"
if migrations.exists():
db.migrate(migrations)
return db


class TestListRuns:
def test_list_runs_empty(self, store: SQLiteStore) -> None:
result = store.list_runs()
assert result == []

def test_list_runs_returns_recent(self, store: SQLiteStore) -> None:
store.create_run("run-1", "grid_ctf", 5, "local")
store.create_run("run-2", "othello", 3, "local")
runs = store.list_runs()
assert len(runs) == 2
assert all("run_id" in r for r in runs)
assert all("scenario" in r for r in runs)

def test_list_runs_respects_limit(self, store: SQLiteStore) -> None:
for i in range(5):
store.create_run(f"run-{i}", "grid_ctf", 1, "local")
runs = store.list_runs(limit=3)
assert len(runs) == 3


class TestRunStatus:
def test_run_status_missing(self, store: SQLiteStore) -> None:
result = store.run_status("nonexistent")
assert result == []

def test_run_status_preserves_generation_status_fields(self, store: SQLiteStore) -> None:
store.create_run("run-1", "grid_ctf", 3, "local")
store.upsert_generation("run-1", 1, 0.40, 0.50, 1000.0, 2, 1, "advance", "completed")
store.upsert_generation("run-1", 2, 0.45, 0.55, 1010.0, 3, 2, "retry", "running")
result = store.run_status("run-1")
assert result == [
{
"generation_index": 1,
"mean_score": 0.40,
"best_score": 0.50,
"elo": 1000.0,
"wins": 2,
"losses": 1,
"gate_decision": "advance",
"status": "completed",
},
{
"generation_index": 2,
"mean_score": 0.45,
"best_score": 0.55,
"elo": 1010.0,
"wins": 3,
"losses": 2,
"gate_decision": "retry",
"status": "running",
},
]


class TestListSolved:
def test_list_solved_empty(self, store: SQLiteStore) -> None:
result = store.list_solved()
assert result == []

def test_list_solved_returns_best_snapshots(self, store: SQLiteStore) -> None:
store.create_run("run-1", "grid_ctf", 1, "local")
store.save_knowledge_snapshot(
scenario="grid_ctf",
run_id="run-1",
best_score=0.9,
best_elo=1500.0,
playbook_hash="abc123",
)
result = store.list_solved()
assert len(result) >= 1
assert result[0]["scenario"] == "grid_ctf"