Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 11 additions & 8 deletions api/ai.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ async def generate_images(
identity = require_identity(authorization)
payload = body.model_dump(mode="python")
payload["base_url"] = resolve_image_base_url(request)
call = LoggedCall(identity, "/v1/images/generations", body.model, "文生图")
call = LoggedCall(identity, "/v1/images/generations", body.model, "文生图", request_text=body.prompt)
await filter_or_log(call, body.prompt)
return await call.run(openai_v1_image_generations.handle, payload)

Expand All @@ -100,7 +100,7 @@ async def edit_images(
stream: bool | None = Form(default=None),
):
identity = require_identity(authorization)
call = LoggedCall(identity, "/v1/images/edits", model, "图生图")
call = LoggedCall(identity, "/v1/images/edits", model, "图生图", request_text=prompt)
if n < 1 or n > 4:
raise HTTPException(status_code=400, detail={"error": "n must be between 1 and 4"})
await filter_or_log(call, prompt)
Expand Down Expand Up @@ -130,17 +130,19 @@ async def create_chat_completion(body: ChatCompletionRequest, authorization: str
identity = require_identity(authorization)
payload = body.model_dump(mode="python")
model = str(payload.get("model") or "auto")
call = LoggedCall(identity, "/v1/chat/completions", model, "文本生成")
await filter_or_log(call, request_text(payload.get("prompt"), payload.get("messages")))
request_preview = request_text(payload.get("prompt"), payload.get("messages"))
call = LoggedCall(identity, "/v1/chat/completions", model, "文本生成", request_text=request_preview)
await filter_or_log(call, request_preview)
return await call.run(openai_v1_chat_complete.handle, payload)

@router.post("/v1/responses")
async def create_response(body: ResponseCreateRequest, authorization: str | None = Header(default=None)):
identity = require_identity(authorization)
payload = body.model_dump(mode="python")
model = str(payload.get("model") or "auto")
call = LoggedCall(identity, "/v1/responses", model, "Responses")
await filter_or_log(call, request_text(payload.get("input"), payload.get("instructions")))
request_preview = request_text(payload.get("input"), payload.get("instructions"))
call = LoggedCall(identity, "/v1/responses", model, "Responses", request_text=request_preview)
await filter_or_log(call, request_preview)
return await call.run(openai_v1_response.handle, payload)

@router.post("/v1/messages")
Expand All @@ -153,8 +155,9 @@ async def create_message(
identity = require_identity(authorization or (f"Bearer {x_api_key}" if x_api_key else None))
payload = body.model_dump(mode="python")
model = str(payload.get("model") or "auto")
call = LoggedCall(identity, "/v1/messages", model, "Messages")
await filter_or_log(call, request_text(payload.get("system"), payload.get("messages"), payload.get("tools")))
request_preview = request_text(payload.get("system"), payload.get("messages"), payload.get("tools"))
call = LoggedCall(identity, "/v1/messages", model, "Messages", request_text=request_preview)
await filter_or_log(call, request_preview)
return await call.run(anthropic_v1_messages.handle, payload, sse="anthropic")

return router
4 changes: 2 additions & 2 deletions api/image_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ async def create_generation_task(
authorization: str | None = Header(default=None),
):
identity = require_identity(authorization)
await filter_or_log(LoggedCall(identity, "/api/image-tasks/generations", body.model, "文生图任务"), body.prompt)
await filter_or_log(LoggedCall(identity, "/api/image-tasks/generations", body.model, "文生图任务", request_text=body.prompt), body.prompt)
try:
return await run_in_threadpool(
image_task_service.submit_generation,
Expand All @@ -73,7 +73,7 @@ async def create_edit_task(
size: str | None = Form(default=None),
):
identity = require_identity(authorization)
await filter_or_log(LoggedCall(identity, "/api/image-tasks/edits", model, "图生图任务"), prompt)
await filter_or_log(LoggedCall(identity, "/api/image-tasks/edits", model, "图生图任务", request_text=prompt), prompt)
uploads = [*(image or []), *(image_list or [])]
if not uploads:
raise HTTPException(status_code=400, detail={"error": "image file is required"})
Expand Down
9 changes: 9 additions & 0 deletions api/system.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@ class ImageDeleteRequest(BaseModel):
all_matching: bool = False


class LogDeleteRequest(BaseModel):
ids: list[str] = []


def create_router(app_version: str) -> APIRouter:
router = APIRouter()

Expand Down Expand Up @@ -73,6 +77,11 @@ async def get_logs(type: str = "", start_date: str = "", end_date: str = "", aut
require_admin(authorization)
return {"items": log_service.list(type=type.strip(), start_date=start_date.strip(), end_date=end_date.strip())}

@router.post("/api/logs/delete")
async def delete_logs(body: LogDeleteRequest, authorization: str | None = Header(default=None)):
require_admin(authorization)
return log_service.delete(body.ids)

@router.post("/api/proxy/test")
async def test_proxy_endpoint(body: ProxyTestRequest, authorization: str | None = Header(default=None)):
require_admin(authorization)
Expand Down
25 changes: 23 additions & 2 deletions services/image_task_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from typing import Any

from services.config import DATA_DIR, config
from services.content_filter import request_text
from services.log_service import LOG_TYPE_CALL, log_service
from services.protocol import openai_v1_image_edit, openai_v1_image_generations

Expand Down Expand Up @@ -232,11 +233,28 @@ def _run_task(
message = _clean(result.get("message")) or "image task returned no image data"
raise RuntimeError(message)
self._update_task(key, status=TASK_STATUS_SUCCESS, data=data, error="")
self._log_call(identity, mode, model, started, "调用完成", urls=_collect_image_urls(data))
self._log_call(
identity,
mode,
model,
started,
"调用完成",
request_preview=request_text(payload.get("prompt")),
urls=_collect_image_urls(data),
)
except Exception as exc:
error_message = str(exc) or "image task failed"
self._update_task(key, status=TASK_STATUS_ERROR, error=error_message, data=[])
self._log_call(identity, mode, model, started, "调用失败", status="failed", error=error_message)
self._log_call(
identity,
mode,
model,
started,
"调用失败",
request_preview=request_text(payload.get("prompt")),
status="failed",
error=error_message,
)

def _log_call(
self,
Expand All @@ -246,6 +264,7 @@ def _log_call(
started: float,
suffix: str,
*,
request_preview: str = "",
status: str = "success",
error: str = "",
urls: list[str] | None = None,
Expand All @@ -263,6 +282,8 @@ def _log_call(
"duration_ms": int((time.time() - started) * 1000),
"status": status,
}
if request_preview:
detail["request_text"] = request_preview
if error:
detail["error"] = error
if urls:
Expand Down
89 changes: 77 additions & 12 deletions services/log_service.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
from __future__ import annotations

import hashlib
import json
import itertools
import time
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
from typing import Any
from uuid import uuid4

from fastapi import HTTPException
from fastapi.concurrency import run_in_threadpool
Expand All @@ -24,38 +26,87 @@ def __init__(self, path: Path):
self.path = path
self.path.parent.mkdir(parents=True, exist_ok=True)

@staticmethod
def _legacy_id(raw_line: str, line_number: int) -> str:
payload = f"{line_number}:{raw_line}".encode("utf-8", errors="ignore")
return hashlib.sha1(payload).hexdigest()[:24]

def _parse_line(self, raw_line: str, line_number: int) -> dict[str, Any] | None:
try:
item = json.loads(raw_line)
except Exception:
return None
if not isinstance(item, dict):
return None
parsed = dict(item)
parsed["id"] = str(parsed.get("id") or self._legacy_id(raw_line, line_number))
return parsed

@staticmethod
def _serialize_item(item: dict[str, Any]) -> str:
return json.dumps(item, ensure_ascii=False, separators=(",", ":"))

@staticmethod
def _matches_filters(item: dict[str, Any], *, type: str = "", start_date: str = "", end_date: str = "") -> bool:
t = str(item.get("time") or "")
day = t[:10]
if type and item.get("type") != type:
return False
if start_date and day < start_date:
return False
if end_date and day > end_date:
return False
return True

def add(self, type: str, summary: str = "", detail: dict[str, Any] | None = None, **data: Any) -> None:
item = {
"id": uuid4().hex,
"time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"type": type,
"summary": summary,
"detail": detail or data,
}
with self.path.open("a", encoding="utf-8") as file:
file.write(json.dumps(item, ensure_ascii=False, separators=(",", ":")) + "\n")
file.write(self._serialize_item(item) + "\n")

def list(self, type: str = "", start_date: str = "", end_date: str = "", limit: int = 200) -> list[dict[str, Any]]:
if not self.path.exists():
return []
items: list[dict[str, Any]] = []
for line in reversed(self.path.read_text(encoding="utf-8").splitlines()):
try:
item = json.loads(line)
except Exception:
continue
t = str(item.get("time") or "")
day = t[:10]
if type and item.get("type") != type:
lines = self.path.read_text(encoding="utf-8").splitlines()
for line_number in range(len(lines) - 1, -1, -1):
item = self._parse_line(lines[line_number], line_number)
if item is None:
continue
if start_date and day < start_date:
continue
if end_date and day > end_date:
if not self._matches_filters(item, type=type, start_date=start_date, end_date=end_date):
continue
items.append(item)
if len(items) >= limit:
break
return items

def delete(self, ids: list[str]) -> dict[str, int]:
target_ids = {str(item or "").strip() for item in ids if str(item or "").strip()}
if not self.path.exists() or not target_ids:
return {"removed": 0}
lines = self.path.read_text(encoding="utf-8").splitlines()
kept_lines: list[str] = []
removed = 0
for line_number, raw_line in enumerate(lines):
item = self._parse_line(raw_line, line_number)
if item is None:
kept_lines.append(raw_line)
continue
if str(item.get("id") or "") in target_ids:
removed += 1
continue
kept_lines.append(self._serialize_item(item))
content = "\n".join(kept_lines)
if content:
content += "\n"
self.path.write_text(content, encoding="utf-8")
return {"removed": removed}


log_service = LogService(DATA_DIR / "logs.jsonl")

Expand All @@ -76,6 +127,16 @@ def _collect_urls(value: object) -> list[str]:
return urls


def _request_excerpt(text: object, limit: int = 1000) -> str:
value = str(text or "").strip()
if not value:
return ""
normalized = " ".join(value.split())
if len(normalized) <= limit:
return normalized
return normalized[: limit - 1].rstrip() + "…"


def _image_error_response(exc: Exception) -> JSONResponse:
message = str(exc)
if "no available image quota" in message.lower():
Expand Down Expand Up @@ -119,6 +180,7 @@ class LoggedCall:
model: str
summary: str
started: float = field(default_factory=time.time)
request_text: str = ""

async def run(self, handler, *args, sse: str = "openai"):
from services.protocol.conversation import ImageGenerationError
Expand Down Expand Up @@ -184,6 +246,9 @@ def log(self, suffix: str, result: object = None, status: str = "success", error
"duration_ms": int((time.time() - self.started) * 1000),
"status": status,
}
request_excerpt = _request_excerpt(self.request_text)
if request_excerpt:
detail["request_text"] = request_excerpt
if error:
detail["error"] = error
collected_urls = [*(urls or []), *_collect_urls(result)]
Expand Down
Loading