diff --git a/.github/workflows/desktop-build.yml b/.github/workflows/desktop-build.yml new file mode 100644 index 0000000..27136a7 --- /dev/null +++ b/.github/workflows/desktop-build.yml @@ -0,0 +1,165 @@ +name: Desktop Build + +on: + push: + branches: [ main, master, feat/desktop-branch ] + tags: + - 'v*.*.*-*' # prerelease: v0.1.0-beta.1 + - 'v[0-9]*.[0-9]*.[0-9]*' # release: v0.1.0 (纯版本号) + paths: + - 'desktop/**' + - '.github/workflows/desktop-build.yml' + pull_request: + branches: [ main, master, feat/desktop-branch ] + paths: + - 'desktop/**' + - '.github/workflows/desktop-build.yml' + workflow_dispatch: + +concurrency: + group: desktop-${{ github.ref }} + cancel-in-progress: true + +jobs: + build: + name: Desktop ${{ matrix.platform }} (${{ matrix.arch }}) + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - os: macos-14 + platform: mac + arch: arm64 + python: python3.10 + asr: funasr-onnx + artifact_path: | + desktop/release/*.dmg + - os: windows-latest + platform: win + arch: x64 + python: python + asr: funasr-onnx + artifact_path: | + desktop/release/*.exe + + defaults: + run: + shell: bash + working-directory: desktop + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 9 + run_install: false + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 20 + cache: pnpm + cache-dependency-path: desktop/pnpm-lock.yaml + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Prepare Python env for ASR backend + working-directory: desktop + run: pnpm run prepare:python + + - name: Print app version + id: meta + run: | + node -e "const v=require('./package.json').version; console.log('app_version='+v)" >> "$GITHUB_OUTPUT" + + + - name: Build desktop package + env: + PYTHON: ${{ matrix.python }} + ASR_IMPL: ${{ matrix.asr }} + ASR_PYTHON_PATH: ${{ matrix.platform == 'win' && format('{0}/desktop/python-env/Scripts/python.exe', github.workspace) || format('{0}/desktop/python-env/bin/python3', github.workspace) }} + run: | + if [ "${{ matrix.platform }}" = "mac" ]; then + pnpm run build:mac -- --${{ matrix.arch }} + else + pnpm run build:win -- --${{ matrix.arch }} + fi + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: livegalgame-desktop-${{ matrix.platform }}-${{ matrix.arch }}-v${{ steps.meta.outputs.app_version }} + path: ${{ matrix.artifact_path }} + if-no-files-found: error + retention-days: 14 + + prerelease: + name: Publish prerelease + runs-on: ubuntu-latest + needs: build + if: | + startsWith(github.ref, 'refs/tags/v') && + contains(github.ref_name, '-') + permissions: + contents: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Download artifacts + uses: actions/download-artifact@v4 + with: + path: dist + + - name: Create prerelease + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh release create "${GITHUB_REF_NAME}" \ + --prerelease \ + --title "${GITHUB_REF_NAME}" \ + --notes "Prerelease ${GITHUB_REF_NAME}" + # 处理带空格的文件名 + find dist -type f -print0 | xargs -0 -I {} gh release upload "${GITHUB_REF_NAME}" "{}" --clobber + + release: + name: Publish release + runs-on: ubuntu-latest + needs: build + if: | + startsWith(github.ref, 'refs/tags/v') && + !contains(github.ref_name, '-') + permissions: + contents: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download artifacts + uses: actions/download-artifact@v4 + with: + path: dist + + - name: Create release + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh release create "${GITHUB_REF_NAME}" \ + --title "${GITHUB_REF_NAME}" \ + --notes "Release ${GITHUB_REF_NAME}" + # 处理带空格的文件名 + find dist -type f -print0 | xargs -0 -I {} gh release upload "${GITHUB_REF_NAME}" "{}" --clobber diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f37f00a..c0d93d3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -4,7 +4,7 @@ on: push: branches: [ main, master ] tags: - - 'v*' + - 'android-v*' pull_request: branches: [ main, master ] workflow_dispatch: diff --git a/.gitignore b/.gitignore index a942b21..1a745b7 100644 --- a/.gitignore +++ b/.gitignore @@ -27,4 +27,36 @@ signing.properties /desktop/data /desktop/.temp /desktop/tests -.cursor \ No newline at end of file +desktop/src/data +desktop/.venv310 +/desktop/release +/desktop/backend/build/ +/desktop/backend/dist/ +.venv +/desktop/memory-service/.venv +/desktop/memory-service/__pycache__ +__pycache__/ +# 忽略所有项目里的 python 环境目录(venv/虚拟环境/bootstrap python等) +**/python-env +**/python-bootstrap +**/.venv*/ +**/venv/ +**/.venv +# Python 缓存文件 +**/__pycache__/ +*.py[cod] +*.pyo +.cursor + +# Test media files +test.mp4 +*.mp4 +*.wav + +# Desktop docs (local only) +desktop/docs/ +**/node_modules + +# Environment variables +.env +.env.* \ No newline at end of file diff --git a/README.md b/README.md index a04ee63..1e09250 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,8 @@ preview +recent preview + --- ## 核心功能 @@ -63,10 +65,6 @@ ### 下载 -[Get it on F-Droid](https://f-droid.org/packages/com.jstone.livegalgame/) - - **GitHub Release**: [点击下载](https://github.com/JStone2934/LiveGalGame/tags) - **夸克网盘**: https://pan.quark.cn/s/1000136902b5 - **百度网盘**: https://pan.baidu.com/s/1Bpt2DZNvjzT6BpKr8RyG-A?pwd=94g6 @@ -77,6 +75,13 @@ **Windows / macOS 应用** - 专为语音聊天场景设计的智能助手 +### macOS 安装提示(未签名) + +- 当前开源版本未做 Apple 官方签名/公证,直接双击 DMG 可能提示“应用已损坏/来自未被认可的开发者”。 +- 推荐做法:在 Finder 中控制键+点按 DMG 内的 `LiveGalGame.app`,选择“打开”,在弹窗中再次“打开”,系统将对本次豁免。(参考 [macguide.leavelet.io](https://macguide.leavelet.io/Bike/appnotopen.html)) +- 如仍被拦截,可在终端移除隔离属性:`sudo xattr -r -d com.apple.quarantine "/Applications/LiveGalGame.app"`(请替换为实际路径)。 +- 临时放宽 Gatekeeper(风险较高,装完建议恢复):`sudo spctl --master-disable`,安装后 `sudo spctl --master-enable`。 + ### 使用流程 1. **创建对话对象** diff --git a/desktop/README.md b/desktop/README.md index b0bfb8a..6e0aa46 100644 --- a/desktop/README.md +++ b/desktop/README.md @@ -78,9 +78,10 @@ pnpm install ### 配置语音识别 ```bash -# 安装 FunASR(推荐,中文识别效果最好) +# 安装 FunASR(推荐,中文识别效果最好,macOS 默认) npm run setup-funasr ``` +- Windows 也使用 FunASR ONNX,无需额外安装 faster-whisper。 ### 启动 @@ -115,6 +116,70 @@ pnpm dev --- +## 📦 桌面端发布规则(GitHub Actions) + +桌面端发布使用 tag 触发,并区分 **预发布** 和 **正式发布**: + +- **预发布(Prerelease)**:`vX.Y.Z-xxx` + 例:`v0.1.0-beta.1` +- **正式发布(Release)**:`vX.Y.Z` + 例:`v0.1.0` + +Android 的发布已独立为 `android-v*` 标签,避免干扰桌面端发布。 + +示例: + +```bash +# 预发布(会生成 GitHub Prerelease) +git tag v0.1.0-beta.1 +git push origin v0.1.0-beta.1 + +# 正式发布(会生成 GitHub Release) +git tag v0.1.0 +git push origin v0.1.0 + +# Android 发布(仅 Android 流程触发) +git tag android-v0.1.0 +git push origin android-v0.1.0 +``` + +> 说明:桌面端产物为 macOS `.dmg` 与 Windows `.exe`,不会生成 APK。 + +--- + +## 🧰 模型下载与缓存目录(HF / ModelScope) + +应用内的语音识别模型(尤其是 FunASR ONNX)会在首次使用/点击下载时自动拉取,并缓存到本机磁盘。为了方便管理、并兼容 Windows / macOS 的默认目录差异,项目默认把缓存放到 Electron 的 `userData` 目录下(不同系统会自动选择合适位置)。 + +如果你希望把模型统一下载到自己指定的盘符/目录(例如放到大硬盘、NAS 挂载目录等),推荐通过环境变量覆盖: + +- `ASR_CACHE_BASE`:ASR 缓存根目录(推荐只改这个) +- `HF_HOME`:HuggingFace 缓存根目录(高级用法) +- `ASR_CACHE_DIR`:HuggingFace hub 目录(高级用法) +- `MODELSCOPE_CACHE`:ModelScope 缓存根目录(注意:实际会写到 `/hub`) + +示例(macOS/Linux): + +```bash +ASR_CACHE_BASE=/data/livegalgame/asr-cache pnpm dev +``` + +示例(Windows PowerShell): + +```powershell +$env:ASR_CACHE_BASE="D:\\LiveGalGame\\asr-cache"; pnpm dev +``` + +如果你想手动使用 ModelScope CLI 把某个模型下载到指定位置(不走应用内下载),确实可以用: + +```bash +modelscope download --model 'Qwen/Qwen2-7B' --local_dir /data/models/Qwen2-7B +``` + +但应用内的 FunASR 模型下载是由 `funasr_onnx` 触发的(不是直接下载单个 Qwen 模型),因此更推荐用上面的环境变量来统一管理缓存位置。 + +--- + ## 🔧 开发者指南 如果你想参与开发或了解技术细节,请查看项目源码: @@ -125,4 +190,3 @@ pnpm dev - `src/db/` - 本地数据存储 欢迎提交 PR!有问题请加 QQ 群:**1074602400** - diff --git a/desktop/backend/asr/asr_baidu_worker.py b/desktop/backend/asr/asr_baidu_worker.py new file mode 100644 index 0000000..4d1a125 --- /dev/null +++ b/desktop/backend/asr/asr_baidu_worker.py @@ -0,0 +1,333 @@ +#!/usr/bin/env python3 +# coding: utf-8 +""" +Baidu Cloud ASR Worker - WebSocket Streaming Mode +实现百度实时语音识别,支持“字随声出”流式反馈。 +""" + +import asyncio +import base64 +import json +import os +import platform +import sys +import time +import uuid +import wave +import io +import threading +from dataclasses import dataclass, field +from typing import Dict, List, Optional + +import numpy as np +import requests +import websockets + +# ============================================================================== +# IPC 通道重定向 +# ============================================================================== +# 保持与 main.py 的 IPC 通信一致 +ipc_fd = os.dup(sys.stdout.fileno()) +ipc_channel = os.fdopen(ipc_fd, "w", buffering=1, encoding="utf-8") +os.dup2(sys.stderr.fileno(), sys.stdout.fileno()) +sys.stdout = sys.stderr + +def send_ipc_message(data: dict): + try: + ipc_channel.write(json.dumps(data, ensure_ascii=False) + "\n") + ipc_channel.flush() + except Exception as exc: + sys.stderr.write(f"[IPC Error] {exc}\n") + sys.stderr.flush() + +# ============================================================================== +# 配置 +# ============================================================================== +BAIDU_APP_ID = os.environ.get("BAIDU_APP_ID", "").strip() +BAIDU_API_KEY = os.environ.get("BAIDU_API_KEY", "").strip() +BAIDU_SECRET_KEY = os.environ.get("BAIDU_SECRET_KEY", "").strip() + +if not BAIDU_APP_ID or not BAIDU_API_KEY or not BAIDU_SECRET_KEY: + sys.stderr.write("[Baidu Worker] WARNING: BAIDU_APP_ID, BAIDU_API_KEY or BAIDU_SECRET_KEY not set in environment variables.\n") + sys.stderr.flush() + +BAIDU_WS_URL = "wss://vop.baidu.com/realtime_asr" +SAMPLE_RATE = int(os.environ.get("ASR_SAMPLE_RATE", "16000")) + +# 能量检测阈值 (RMS),用于给 UI 反馈“正在说话” +# 300/32768 约等于 0.009 +SPEECH_THRESHOLD = float(os.environ.get("ASR_RMS_THRESHOLD", "0.009")) + +def decode_audio_chunk(audio_b64: str) -> np.ndarray: + audio_bytes = base64.b64decode(audio_b64) + audio_int16 = np.frombuffer(audio_bytes, dtype=np.int16) + return audio_int16.astype(np.float32) + +def float_to_int16(audio_f32: np.ndarray) -> bytes: + if np.max(np.abs(audio_f32)) <= 1.0: + return (audio_f32 * 32767).astype(np.int16).tobytes() + return audio_f32.astype(np.int16).tobytes() + +class BaiduSession: + def __init__(self, session_id: str, worker: 'BaiduWorker'): + self.session_id = session_id + self.worker = worker + self.ws: Optional[websockets.WebSocketClientProtocol] = None + self.audio_queue = asyncio.Queue() + self.is_running = False + self.task_send: Optional[asyncio.Task] = None + self.task_recv: Optional[asyncio.Task] = None + self.last_final_text = "" + self.segment_seq = 0 + + async def start(self): + if self.is_running: + return + self.is_running = True + token = await self.worker.get_token_async() + if not token: + sys.stderr.write(f"[{self.session_id}] Failed to get Baidu token\n") + self.is_running = False + return + + try: + # 强化 1:使用规范的、不带特殊字符的 sn 和 cuid + sn = str(uuid.uuid4()).replace("-", "") + cuid = "livegal_desktop_client" + # 强化 2:在 URL 握手阶段就带上 token (某些百度集群的要求) + url = f"{BAIDU_WS_URL}?sn={sn}&token={token}" + + sys.stderr.write(f"[{self.session_id}] Connecting to Baidu WS (AppID: {BAIDU_APP_ID})...\n") + self.ws = await websockets.connect(url) + + # 1. 发送 START 帧 + start_frame = { + "type": "START", + "data": { + "appid": int(BAIDU_APP_ID), + "appkey": BAIDU_API_KEY, + "appname": "livegal", # 强化 3:加入应用名称,对应控制台,解决 -3004 错误 + "dev_pid": 1537, # 普通话 + "cuid": cuid, + "format": "pcm", + "sample": SAMPLE_RATE, + "token": token + } + } + await self.ws.send(json.dumps(start_frame)) + + self.task_send = asyncio.create_task(self._send_loop()) + self.task_recv = asyncio.create_task(self._recv_loop()) + sys.stderr.write(f"[{self.session_id}] Baidu WS Connected & Started\n") + except Exception as e: + sys.stderr.write(f"[{self.session_id}] WS Connection error: {e}\n") + self.is_running = False + + async def stop(self): + self.is_running = False + if self.ws: + try: + # 发送 FINISH 帧 + await self.ws.send(json.dumps({"type": "FINISH"})) + await asyncio.sleep(0.5) + await self.ws.close() + except: pass + if self.task_send: self.task_send.cancel() + if self.task_recv: self.task_recv.cancel() + self.ws = None + + async def _send_loop(self): + try: + while self.is_running: + chunk = await self.audio_queue.get() + if chunk is None: break + if self.ws: + await self.ws.send(chunk) + except asyncio.CancelledError: pass + except Exception as e: + # 这里的 1005 或 1006 错误通常是由于 FINISH 导致的正常关闭 + if "1005" not in str(e) and "1006" not in str(e): + sys.stderr.write(f"[{self.session_id}] WS Send error: {e}\n") + finally: + self.is_running = False + + async def _recv_loop(self): + try: + async for message in self.ws: + resp = json.loads(message) + err_no = resp.get("err_no", 0) + if err_no != 0: + sys.stderr.write(f"[{self.session_id}] Baidu Error: {resp.get('err_msg')} (code={err_no})\n") + continue + + msg_type = resp.get("type") + text = resp.get("result") + if isinstance(text, list): text = "".join(text) + + if not text: continue + + if msg_type == "MID_TEXT": + # 流式中间结果 + send_ipc_message({ + "session_id": self.session_id, + "type": "partial_result", + "partialText": text, + "timestamp": int(time.time() * 1000), + "engine": "baidu" + }) + elif msg_type == "FIN_TEXT": + # 一句话最终结果 + self.segment_seq += 1 + send_ipc_message({ + "session_id": self.session_id, + "type": "sentence_complete", + "text": text, + "timestamp": int(time.time() * 1000), + "status": "success", + "engine": "baidu", + "is_segment_end": True, # 百度 FIN_TEXT 代表一句话结束,强制分句 + "segment_seq": self.segment_seq + }) + self.last_final_text = text + except asyncio.CancelledError: pass + except Exception as e: + if "1005" not in str(e) and "1006" not in str(e): + sys.stderr.write(f"[{self.session_id}] WS Recv error: {e}\n") + finally: + self.is_running = False + sys.stderr.write(f"[{self.session_id}] Baidu WS Session closed\n") + + def add_audio(self, audio_bytes: bytes): + if self.is_running: + self.audio_queue.put_nowait(audio_bytes) + +class BaiduWorker: + def __init__(self): + self.sessions: Dict[str, BaiduSession] = {} + self._token = None + self._token_expires = 0 + self._token_lock = asyncio.Lock() + + sys.stderr.write(f"[Baidu Worker] WebSocket Mode Initialized (Lightweight RMS VAD)\n") + + async def get_token_async(self): + async with self._token_lock: + if self._token and time.time() < self._token_expires: + return self._token + + url = "https://aip.baidubce.com/oauth/2.0/token" + params = { + "grant_type": "client_credentials", + "client_id": BAIDU_API_KEY, + "client_secret": BAIDU_SECRET_KEY + } + try: + # 使用 loop.run_in_executor 运行同步请求 + loop = asyncio.get_running_loop() + resp = await loop.run_in_executor(None, lambda: requests.get(url, params=params, timeout=10)) + data = resp.json() + if "access_token" in data: + self._token = data["access_token"] + self._token_expires = time.time() + data.get("expires_in", 2592000) - 3600 + return self._token + except Exception as e: + sys.stderr.write(f"[Baidu Worker] Token error: {e}\n") + return None + + def _is_speech(self, chunk_f32: np.ndarray) -> bool: + # 使用简单的 RMS 能量检测 + if chunk_f32.size == 0: + return False + rms = float(np.sqrt(np.mean(chunk_f32 ** 2))) + return rms >= SPEECH_THRESHOLD + + async def handle_streaming_chunk(self, data: dict): + session_id = data.get("session_id") or "default" + audio_b64 = data.get("audio_data") + if not audio_b64: return + + if session_id not in self.sessions: + self.sessions[session_id] = BaiduSession(session_id, self) + + session = self.sessions[session_id] + if not session.is_running: + await session.start() + + chunk_f32 = decode_audio_chunk(audio_b64) + has_voice = self._is_speech(chunk_f32) + + audio_bytes = float_to_int16(chunk_f32) + session.add_audio(audio_bytes) + + # 通知 UI 说话状态 (is_speaking) + if has_voice: + send_ipc_message({ + "session_id": session_id, + "type": "is_speaking", + "isSpeaking": True + }) + + async def handle_reset_session(self, data: dict): + session_id = data.get("session_id") + if session_id in self.sessions: + await self.sessions[session_id].stop() + del self.sessions[session_id] + + async def handle_force_commit(self, data: dict): + # 对于 WebSocket 模式,force_commit 不应该简单的重启, + # 因为这会导致正在处理的语音丢失。 + # 我们发送 FINISH 帧让百度完成当前识别即可。 + session_id = data.get("session_id") + if session_id in self.sessions: + session = self.sessions[session_id] + if session.ws and session.is_running: + try: + sys.stderr.write(f"[{session_id}] Force commit: sending FINISH frame\n") + await session.ws.send(json.dumps({"type": "FINISH"})) + # 百度收到 FINISH 后会返回 FIN_TEXT,recv_loop 会处理存库 + except Exception as e: + sys.stderr.write(f"[{session_id}] Force commit error: {e}\n") + +async def read_stdin(queue): + loop = asyncio.get_event_loop() + while True: + # sys.stdin.readline 是阻塞的,在执行器中运行以避免卡死主循环 + line = await loop.run_in_executor(None, sys.stdin.readline) + if not line: + break + stripped = line.strip() + if stripped: + await queue.put(stripped) + +async def main(): + # Windows 下 connect_read_pipe 在 ProactorEventLoop 中不稳定 (WinError 6) + # 我们改用 SelectorEventLoop 或者直接使用线程读取 stdin + if platform.system() == "Windows": + try: + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + except: + pass + + worker = BaiduWorker() + send_ipc_message({"status": "ready"}) + + queue = asyncio.Queue() + asyncio.create_task(read_stdin(queue)) + + while True: + line = await queue.get() + if not line: break + try: + data = json.loads(line) + rtype = data.get("type") + if rtype == "streaming_chunk": + await worker.handle_streaming_chunk(data) + elif rtype == "reset_session": + await worker.handle_reset_session(data) + elif rtype == "force_commit": + await worker.handle_force_commit(data) + except Exception as e: + sys.stderr.write(f"[Baidu Worker] Error processing line: {e}\n") + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/desktop/backend/asr/asr_funasr_worker.py b/desktop/backend/asr/asr_funasr_worker.py new file mode 100644 index 0000000..5f7d99f --- /dev/null +++ b/desktop/backend/asr/asr_funasr_worker.py @@ -0,0 +1,1019 @@ +#!/usr/bin/env python3 +""" +FunASR 2-Pass Worker: 基于 funasr_onnx 的流式/离线混合语音识别 + +参照 RealtimeMicPipeline demo 设计: +- Pass 1 (流式): ParaformerOnline 快速出字,用于实时显示 +- Pass 2 (离线): ParaformerOffline + 标点模型,用于最终修正 + +分句策略: +- VAD 检测语音边界 +- 静音累积达到阈值触发 Pass 2 修正 +- 支持强制提交 (force_commit) +""" + +import json +import os +import platform +import sys +import time +import traceback +import base64 +from dataclasses import dataclass, field +from typing import Dict, List, Optional + +import numpy as np + +# ============================================================================== +# OS 级别的文件描述符重定向 +# ============================================================================== +ipc_fd = os.dup(sys.stdout.fileno()) +ipc_channel = os.fdopen(ipc_fd, "w", buffering=1, encoding="utf-8") +os.dup2(sys.stderr.fileno(), sys.stdout.fileno()) +sys.stdout = sys.stderr + + +def send_ipc_message(data): + """发送 JSON 消息到 Node.js""" + try: + json_str = json.dumps(data, ensure_ascii=False) + ipc_channel.write(json_str + "\n") + ipc_channel.flush() + except Exception as exc: + sys.stderr.write(f"[IPC Error] Failed to send: {exc}\n") + sys.stderr.flush() + + +# ============================================================================== +# 环境变量配置 +# ============================================================================== +os.environ.setdefault("TQDM_DISABLE", "1") + +MODELSCOPE_CACHE = os.environ.get("MODELSCOPE_CACHE") or os.environ.get("ASR_CACHE_DIR") +if MODELSCOPE_CACHE: + os.environ.setdefault("MODELSCOPE_CACHE", MODELSCOPE_CACHE) + os.environ.setdefault("MODELSCOPE_CACHE_HOME", MODELSCOPE_CACHE) + +# 离线模式:如果设置了 MODELSCOPE_OFFLINE=1,则跳过网络请求,直接使用本地缓存 +OFFLINE_MODE = os.environ.get("MODELSCOPE_OFFLINE", "").lower() in ("1", "true", "yes") +if OFFLINE_MODE: + sys.stderr.write("[FunASR Worker] Offline mode enabled: using local cache only\n") + sys.stderr.flush() + # 设置 modelscope 离线模式相关环境变量 + os.environ["MODELSCOPE_OFFLINE"] = "1" + os.environ["HF_HUB_OFFLINE"] = "1" + # 尝试配置 modelscope 库的离线模式 + try: + from modelscope.hub.snapshot_download import snapshot_download + from modelscope.hub.file_download import model_file_download + # Monkey-patch: 让 modelscope 跳过版本检查 + import modelscope.hub.api as ms_api + if hasattr(ms_api, 'HubApi'): + _original_get_model_files = getattr(ms_api.HubApi, 'get_model_files', None) + if _original_get_model_files: + def _patched_get_model_files(self, model_id, revision=None, *args, **kwargs): + # 离线模式下直接返回空,让库使用本地缓存 + return [] + ms_api.HubApi.get_model_files = _patched_get_model_files + except Exception as e: + sys.stderr.write(f"[FunASR Worker] Warning: Could not configure modelscope offline mode: {e}\n") + sys.stderr.flush() + +# ============================================================================== +# FunASR 配置 +# ============================================================================== +SAMPLE_RATE = int(os.environ.get("ASR_SAMPLE_RATE", "16000")) +CHUNK_MS = int(os.environ.get("ASR_CHUNK_MS", "200")) # 每次读取的音频块时长 (毫秒) +CHUNK_SAMPLES = int(SAMPLE_RATE * CHUNK_MS / 1000) + +# 静音检测配置 +SILENCE_THRESHOLD_CHUNKS = int(os.environ.get("ASR_SILENCE_CHUNKS", "3")) # 连续静音块数触发句尾 +SILENCE_BUFFER_KEEP = 2 # 保留多少个静音块让音频更自然 + +# 分句配置 +SENTENCE_END_PUNCTUATION = set("。!?!?.;;") +MIN_SENTENCE_CHARS = int(os.environ.get("MIN_SENTENCE_CHARS", "2")) + +# 推理设备选择(影响本地 FunASR ONNX 模型:VAD/Online/Offline/Punc) +# - auto: 自动选择(优先 CUDA,其次 ROCm,其次 DirectML,最后 CPU) +# - cpu/cuda/rocm/dml: 强制指定 +ASR_DEVICE = os.environ.get("ASR_DEVICE", "auto").strip().lower() +ASR_DEVICE_ID = int(os.environ.get("ASR_DEVICE_ID", "0")) + + +@dataclass +class GPUConfig: + """ + 兼容历史测试脚本的 GPU 配置对象。 + + - device_type: cpu/cuda/rocm/dml + - provider_name: onnxruntime provider 名称(如 DmlExecutionProvider) + - available: 是否启用 GPU + - device_id: GPU 设备 id(CPU 时为 -1) + - providers: 可用 providers 列表(调试用) + """ + + device_type: str = "cpu" + provider_name: str = "CPUExecutionProvider" + available: bool = False + device_id: int = -1 + providers: List[str] = field(default_factory=list) + + +def detect_onnx_device() -> dict: + """ + 检测 onnxruntime 可用 provider,并选择推理设备。 + + 说明: + - funasr_onnx 的模型构造函数一般通过 device_id 控制:-1 为 CPU;>=0 尝试使用 GPU。 + - 实际走哪种 GPU 取决于安装的 onnxruntime 版本提供的 provider: + * CUDAExecutionProvider (onnxruntime-gpu) -> NVIDIA + * ROCMExecutionProvider (onnxruntime-rocm) -> AMD/ROCm + * DmlExecutionProvider (onnxruntime-directml) -> Windows 上 AMD/NVIDIA/Intel + """ + forced = ASR_DEVICE + device_id = ASR_DEVICE_ID + + try: + import onnxruntime as ort # type: ignore + + providers = ort.get_available_providers() or [] + except Exception: + providers = [] + + providers_set = {p.lower(): p for p in providers} + has_cuda = "cudaexecutionprovider" in providers_set + has_rocm = "rocmexecutionprovider" in providers_set + has_dml = "dmlexecutionprovider" in providers_set + + def _cpu(): + return { + "device": "cpu", + "device_id": -1, + "provider": "CPUExecutionProvider", + "providers": providers, + } + + def _gpu(provider_key: str, device: str): + return { + "device": device, + "device_id": device_id, + "provider": providers_set.get(provider_key, provider_key), + "providers": providers, + } + + if forced in ("cpu", "none", "off", "-1"): + return _cpu() + if forced in ("cuda", "nvidia"): + return _gpu("cudaexecutionprovider", "cuda") if has_cuda else _cpu() + if forced in ("rocm", "amd"): + return _gpu("rocmexecutionprovider", "rocm") if has_rocm else _cpu() + if forced in ("dml", "directml"): + return _gpu("dmlexecutionprovider", "dml") if has_dml else _cpu() + + # auto:按优先级选择(CUDA > ROCm > DirectML > CPU) + if has_cuda: + return _gpu("cudaexecutionprovider", "cuda") + if has_rocm: + return _gpu("rocmexecutionprovider", "rocm") + if has_dml: + return _gpu("dmlexecutionprovider", "dml") + return _cpu() + + +def detect_gpu() -> GPUConfig: + """ + 兼容接口:返回 GPUConfig,供 test_funasr_gpu.py 等脚本调用。 + """ + info = detect_onnx_device() + device = str(info.get("device", "cpu")) + device_id = int(info.get("device_id", -1)) + provider = str(info.get("provider", "CPUExecutionProvider")) + providers = list(info.get("providers") or []) + available = device_id >= 0 and provider != "CPUExecutionProvider" + return GPUConfig( + device_type=device, + provider_name=provider, + available=available, + device_id=device_id, + providers=providers, + ) + + +def smart_concat(history: str, new_text: str) -> str: + """ + 智能拼接流式文本:处理增量、全量、重叠等情况。 + """ + if not new_text: + return history + if not history: + return new_text + + # 1. 检查 new_text 是否完全包含 history (说明 new_text 是全量更新) + if new_text.startswith(history): + return new_text + + # 2. 检查 history 是否完全包含 new_text (说明 new_text 是旧的全量或者是重复输出) + if history.endswith(new_text): + return history + + # 3. 检查重叠 (history后缀 与 new_text前缀) + overlap_len = min(len(history), len(new_text)) + for i in range(overlap_len, 0, -1): + if history.endswith(new_text[:i]): + return history + new_text[i:] + + # 4. 无重叠,直接拼接 + return history + new_text + + +def decode_audio_chunk(audio_b64: str) -> np.ndarray: + """Base64 音频转 float32 numpy array(范围 -1~1)。""" + audio_bytes = base64.b64decode(audio_b64) + audio_int16 = np.frombuffer(audio_bytes, dtype=np.int16) + return audio_int16.astype(np.float32) # funasr_onnx 接受 float32,不除以 32768 + + +def smart_split_sentences(text: str) -> List[str]: + """ + 智能分句:基于标点符号将长文本切分成自然的句子。 + + 策略: + 1. 优先按句末标点(。!?!?.)分割 + 2. 如果分隔后的句子太短,考虑合并 + 3. 如果没有句末标点,返回原文 + """ + if not text or len(text) < MIN_SENTENCE_CHARS: + return [text] if text else [] + + # 定义句末标点 + sentence_endings = "。!?!?." + + sentences = [] + current_sentence = "" + + for char in text: + current_sentence += char + if char in sentence_endings: + trimmed = current_sentence.strip() + if trimmed and len(trimmed) >= MIN_SENTENCE_CHARS: + sentences.append(trimmed) + elif trimmed and sentences: + # 太短的句子合并到上一句 + sentences[-1] += trimmed + elif trimmed: + sentences.append(trimmed) + current_sentence = "" + + # 处理剩余的文本 + remaining = current_sentence.strip() + if remaining: + if len(remaining) < MIN_SENTENCE_CHARS and sentences: + # 太短就合并到上一句 + sentences[-1] += remaining + else: + sentences.append(remaining) + + return sentences if sentences else [text] + + + +@dataclass +class SessionState: + """ + FunASR 2-Pass 会话状态 + """ + # 音频缓冲区 (给 Pass 2 用) + full_sentence_buffer: List[np.ndarray] = field(default_factory=list) + + # Pass 1 流式模型的上下文缓存 + online_cache: Dict = field(default_factory=dict) + + # 静音检测 + silence_counter: int = 0 + is_speaking: bool = False + + # 累积的流式文本 + streaming_text: str = "" + last_sent_text: str = "" + + # 时间戳 + start_time: float = 0.0 + + def reset(self): + """重置会话状态""" + self.full_sentence_buffer.clear() + self.online_cache.clear() + self.silence_counter = 0 + self.is_speaking = False + self.streaming_text = "" + self.last_sent_text = "" + self.start_time = 0.0 + + +def resolve_local_model_path(model_id: str) -> Optional[str]: + """ + 在离线模式下,解析本地模型路径。 + 检查 MODELSCOPE_CACHE 和默认缓存目录下是否存在模型。 + """ + if not OFFLINE_MODE: + return None + + import os.path + cache_dirs = [ + os.environ.get("MODELSCOPE_CACHE"), + os.environ.get("ASR_CACHE_DIR"), + os.path.join(os.path.expanduser("~"), ".cache", "modelscope", "hub"), + ] + + for cache_dir in cache_dirs: + if not cache_dir: + continue + # ModelScope 缓存结构: hub/models// + candidates = [ + os.path.join(cache_dir, model_id), + os.path.join(cache_dir, "models", model_id), + ] + for candidate in candidates: + if os.path.isdir(candidate): + # 检查是否有模型文件 + files = os.listdir(candidate) + if any(f.endswith(('.onnx', '.bin', '.json')) for f in files): + sys.stderr.write(f"[FunASR Worker] Found local model: {candidate}\n") + sys.stderr.flush() + return candidate + + return None + + +def load_funasr_onnx_models(gpu_config: Optional[GPUConfig] = None): + """ + 加载 funasr_onnx 模型 (VAD + 流式ASR + 离线ASR + 标点) + + 支持的环境变量: + - ASR_MODEL: 模型 ID (funasr-paraformer / funasr-paraformer-large) + * funasr-paraformer: INT8 量化版,包体约 0.76GB(online/offline/punc/vad),速度更快 + * funasr-paraformer-large: FP32 未量化,约 2.1GB(按 INT8→FP32 体积估算),精度更高 + - ASR_QUANTIZE: 是否使用量化 (true/false),默认根据模型类型自动选择 + - MODELSCOPE_OFFLINE: 离线模式,跳过网络请求直接使用本地缓存 + """ + try: + from funasr_onnx.vad_bin import Fsmn_vad + from funasr_onnx.paraformer_online_bin import Paraformer as ParaformerOnline + from funasr_onnx.paraformer_bin import Paraformer as ParaformerOffline + from funasr_onnx.punc_bin import CT_Transformer + except ImportError as e: + sys.stderr.write(f"[FunASR Worker] Import error: {e}\n") + sys.stderr.write("[FunASR Worker] Please install: pip install funasr_onnx\n") + sys.stderr.flush() + raise + + # 读取模型配置 + model_id = os.environ.get("ASR_MODEL", "funasr-paraformer") + is_large = "large" in model_id.lower() + + device_info = detect_onnx_device() + if gpu_config is not None: + # 兼容:允许外部显式传入 device_id(例如 test_funasr_gpu.py) + try: + device_info = { + "device": getattr(gpu_config, "device_type", "cpu"), + "device_id": int(getattr(gpu_config, "device_id", -1)), + "provider": getattr(gpu_config, "provider_name", "CPUExecutionProvider"), + "providers": list(getattr(gpu_config, "providers", []) or []), + } + except Exception: + device_info = detect_onnx_device() + + # Large 版本默认不使用量化,精度更高 + quantize_env = os.environ.get("ASR_QUANTIZE", "").lower() + if quantize_env in ("true", "1", "yes"): + use_quantize = True + elif quantize_env in ("false", "0", "no"): + use_quantize = False + else: + # 默认: 普通版量化,Large版不量化 + use_quantize = not is_large + + sys.stderr.write(f"[FunASR Worker] Model ID: {model_id}\n") + sys.stderr.write(f"[FunASR Worker] Is Large model: {is_large}\n") + sys.stderr.write(f"[FunASR Worker] Use Quantize: {use_quantize}\n") + sys.stderr.write(f"[FunASR Worker] Offline mode: {OFFLINE_MODE}\n") + sys.stderr.write(f"[FunASR Worker] Host: {platform.system()} {platform.release()} ({platform.machine()})\n") + sys.stderr.write(f"[FunASR Worker] ASR_DEVICE={ASR_DEVICE}, ASR_DEVICE_ID={ASR_DEVICE_ID}\n") + sys.stderr.write(f"[FunASR Worker] ONNX Runtime providers: {device_info.get('providers')}\n") + sys.stderr.write( + "[FunASR Worker] Inference device selection: " + f"device={device_info.get('device')}, device_id={device_info.get('device_id')}, provider={device_info.get('provider')}\n" + ) + sys.stderr.write(f"[FunASR Worker] Preset size hint: {'~0.76GB INT8 (default)' if use_quantize else '~2.1GB FP32 (higher accuracy)'}\n") + if OFFLINE_MODE: + sys.stderr.write("[FunASR Worker] Loading ONNX models from local cache (offline mode)...\n") + else: + sys.stderr.write("[FunASR Worker] Loading ONNX models (first run will download)...\n") + sys.stderr.flush() + + # ONNX 模型配置 + # 可以通过环境变量覆盖默认模型 + vad_model_id = os.environ.get( + "FUNASR_VAD_MODEL", + "damo/speech_fsmn_vad_zh-cn-16k-common-onnx" + ) + online_model_id = os.environ.get( + "FUNASR_ONLINE_MODEL", + "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online-onnx" + ) + offline_model_id = os.environ.get( + "FUNASR_OFFLINE_MODEL", + "damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-onnx" + ) + punc_model_id = os.environ.get( + "FUNASR_PUNC_MODEL", + "damo/punc_ct-transformer_zh-cn-common-vocab272727-onnx" + ) + + def _normalize_model_id(value: str, label: str) -> str: + """ + 兼容历史/外部配置:有些环境可能会把 FUNASR_* 变量设置为本地缓存目录路径, + 但 funasr_onnx 内部会将该值传给 funasr.AutoModel(model=...)。 + AutoModel 需要 registry 模型 ID(如 "damo/xxx"),而不是 "C:\\...\\damo\\xxx"。 + """ + if not value: + return value + + # 已经是 registry 形式 + if "/" in value and not (":" in value or value.startswith("\\") or value.startswith("/")): + return value + + # 如果是本地路径(win/mac/linux),尝试从路径中提取 "org/model" + try: + norm = os.path.normpath(value) + parts = [p for p in norm.split(os.sep) if p] + # 常见结构: .../hub/models/damo/ 或 .../hub/damo/ + if "models" in parts: + idx = parts.index("models") + if idx + 2 < len(parts): + org = parts[idx + 1] + model = parts[idx + 2] + inferred = f"{org}/{model}" + sys.stderr.write(f"[FunASR Worker] Normalized {label} from local path to model id: {inferred}\n") + sys.stderr.flush() + return inferred + # 兜底:直接在路径中找 "damo/" + if "damo" in parts: + idx = parts.index("damo") + if idx + 1 < len(parts): + inferred = f"damo/{parts[idx + 1]}" + sys.stderr.write(f"[FunASR Worker] Normalized {label} from local path to model id: {inferred}\n") + sys.stderr.flush() + return inferred + except Exception: + pass + + # 无法识别时原样返回(让后续报错更明确) + return value + + vad_model_id = _normalize_model_id(vad_model_id, "VAD") + online_model_id = _normalize_model_id(online_model_id, "Streaming ASR (Pass 1)") + offline_model_id = _normalize_model_id(offline_model_id, "Offline ASR (Pass 2)") + punc_model_id = _normalize_model_id(punc_model_id, "Punctuation") + + def _ensure_cached(model_id: str, label: str) -> Optional[str]: + """ + 离线模式下仅用于校验本地缓存是否存在,并返回找到的目录路径(用于日志/提示)。 + + 重要:funasr_onnx 内部会将 model_dir 传给 funasr.AutoModel, + 这里必须传 registry 模型 ID(如 "damo/xxx"),不能传本地目录路径, + 否则会触发 AutoModel 的 "is not registered" 断言错误。 + """ + if not OFFLINE_MODE: + return None + found = resolve_local_model_path(model_id) + if not found: + raise RuntimeError( + f"Offline mode enabled (MODELSCOPE_OFFLINE=1) but required {label} model is not cached: {model_id}. " + f"Please download the model first, or disable offline mode." + ) + return found + + # 离线模式:只校验缓存是否存在(不把本地路径传给 funasr_onnx) + vad_cached = _ensure_cached(vad_model_id, "VAD") + online_cached = _ensure_cached(online_model_id, "Streaming ASR (Pass 1)") + offline_cached = _ensure_cached(offline_model_id, "Offline ASR (Pass 2)") + punc_cached = _ensure_cached(punc_model_id, "Punctuation") + + # 1. VAD 模型: 检测语音活动 + sys.stderr.write( + f"[FunASR Worker] Loading VAD model: {vad_model_id}" + + (f" (cached at {vad_cached})" if vad_cached else "") + + "...\n" + ) + sys.stderr.flush() + vad_model = Fsmn_vad( + model_dir=vad_model_id, + quantize=use_quantize, + device_id=int(device_info.get("device_id", -1)), + ) + + # 2. Pass 1 流式模型: 快速出字 + sys.stderr.write( + f"[FunASR Worker] Loading streaming ASR model (Pass 1): {online_model_id}" + + (f" (cached at {online_cached})" if online_cached else "") + + "...\n" + ) + sys.stderr.flush() + asr_online_model = ParaformerOnline( + model_dir=online_model_id, + batch_size=1, + device_id=int(device_info.get("device_id", -1)), + quantize=use_quantize, + intra_op_num_threads=4 + ) + + # 3. Pass 2 非流式模型: 高精度识别 + sys.stderr.write( + f"[FunASR Worker] Loading offline ASR model (Pass 2): {offline_model_id}" + + (f" (cached at {offline_cached})" if offline_cached else "") + + "...\n" + ) + sys.stderr.flush() + asr_offline_model = ParaformerOffline( + model_dir=offline_model_id, + batch_size=1, + device_id=int(device_info.get("device_id", -1)), + quantize=use_quantize, + intra_op_num_threads=4 + ) + + # 4. 标点模型: 给 Pass 2 结果加标点 + sys.stderr.write( + f"[FunASR Worker] Loading punctuation model: {punc_model_id}" + + (f" (cached at {punc_cached})" if punc_cached else "") + + "...\n" + ) + sys.stderr.flush() + punc_model = CT_Transformer( + model_dir=punc_model_id, + quantize=use_quantize, + device_id=int(device_info.get("device_id", -1)), + intra_op_num_threads=2 + ) + + sys.stderr.write("[FunASR Worker] All models loaded successfully!\n") + sys.stderr.write(f"[FunASR Worker] Configuration: model={model_id}, quantize={use_quantize}\n") + sys.stderr.flush() + + return vad_model, asr_online_model, asr_offline_model, punc_model + + +def handle_streaming_chunk( + vad_model, + asr_online_model, + asr_offline_model, + punc_model, + data: dict, + sessions_cache: Dict[str, SessionState], +): + """ + 处理流式音频块 - 2-Pass 架构 + + Pass 1: 实时流式识别,快速返回 partial 结果 + Pass 2: 检测到句尾后,使用离线模型 + 标点进行高精度修正 + """ + request_id = data.get("request_id", "default") + session_id = data.get("session_id", request_id) + audio_data_b64 = data.get("audio_data") + is_final = bool(data.get("is_final", False)) + timestamp_ms = data.get("timestamp", int(time.time() * 1000)) + + if not audio_data_b64: + send_ipc_message({"request_id": request_id, "error": "No audio_data provided"}) + return + + state = sessions_cache.setdefault(session_id, SessionState()) + audio_chunk = decode_audio_chunk(audio_data_b64) + + if audio_chunk.size == 0: + return + + # 记录开始时间 + if not state.is_speaking and state.start_time == 0: + state.start_time = time.time() + + # ==== VAD 检测 ==== + try: + vad_segments = vad_model(audio_chunk) + current_chunk_has_speech = len(vad_segments) > 0 + except Exception as e: + sys.stderr.write(f"[FunASR Worker] VAD error: {e}\n") + sys.stderr.flush() + current_chunk_has_speech = True # 出错时保守处理 + + # ==== 状态管理 ==== + if current_chunk_has_speech: + state.silence_counter = 0 + state.is_speaking = True + state.full_sentence_buffer.append(audio_chunk) + else: + if state.is_speaking: + state.silence_counter += 1 + # 保留一点静音段让音频更自然 + if state.silence_counter < SILENCE_BUFFER_KEEP: + state.full_sentence_buffer.append(audio_chunk) + + # ==== Pass 1: 实时流式识别 ==== + if state.is_speaking: + try: + partial_res = asr_online_model( + audio_chunk, + param_dict={"cache": state.online_cache, "is_final": False}, + ) + + if partial_res: + # 调试日志:查看实际返回的格式 + sys.stderr.write(f"[FunASR Worker] DEBUG partial_res type={type(partial_res).__name__}, value={str(partial_res)[:100]}\n") + sys.stderr.flush() + + # funasr_onnx 返回格式可能是: + # 1. [('text', ['chars'])] - 列表包含 tuple + # 2. [{'preds': 'text'}] - 列表包含字典 + # 3. ('text', ['chars']) - 直接是 tuple + text = "" + + # 先解包列表 + item = partial_res + while isinstance(item, list) and len(item) > 0: + item = item[0] + + # 现在 item 应该是 tuple 或 dict 或 str + if isinstance(item, dict): + preds_value = item.get("preds") or item.get("text") or "" + # 如果 preds 是 tuple,需要提取字符串 + if isinstance(preds_value, tuple) and len(preds_value) > 0: + text = preds_value[0] if isinstance(preds_value[0], str) else str(preds_value[0]) + elif isinstance(preds_value, str): + text = preds_value + else: + text = str(preds_value) if preds_value else "" + elif isinstance(item, tuple) and len(item) > 0: + # Tuple 格式: ('text', ['chars']) - 取第一个元素 + first_elem = item[0] + text = first_elem if isinstance(first_elem, str) else str(first_elem) + elif isinstance(item, str): + text = item + else: + text = str(item) if item else "" + + sys.stderr.write(f"[FunASR Worker] DEBUG extracted text=\"{text[:50]}...\"\n") + sys.stderr.flush() + + if text: + # 使用智能拼接更新 streaming_text,解决流式输出不连续问题 + new_streaming = smart_concat(state.streaming_text, text) + + if new_streaming != state.streaming_text: + state.streaming_text = new_streaming + send_ipc_message({ + "request_id": request_id, + "session_id": session_id, + "type": "partial", + "text": state.streaming_text, + "full_text": state.streaming_text, + "timestamp": timestamp_ms, + "is_final": False, + "status": "success", + "language": "zh", + }) + state.last_sent_text = text + sys.stderr.write(f"[FunASR Worker] 📝 PARTIAL: \"{state.streaming_text[-50:]}...\"\n") + sys.stderr.flush() + except Exception as e: + sys.stderr.write(f"[FunASR Worker] Pass 1 error: {e}\n") + sys.stderr.flush() + + # ==== Pass 2: 检测到句尾,触发高精度修正 ==== + if state.is_speaking and state.silence_counter >= SILENCE_THRESHOLD_CHUNKS: + _trigger_pass2( + asr_offline_model, + punc_model, + state, + request_id, + session_id, + timestamp_ms, + trigger="silence", + ) + + # ==== 处理 is_final 标记 ==== + if is_final and state.full_sentence_buffer: + _trigger_pass2( + asr_offline_model, + punc_model, + state, + request_id, + session_id, + timestamp_ms, + trigger="final", + ) + + +def _trigger_pass2( + asr_offline_model, + punc_model, + state: SessionState, + request_id: str, + session_id: str, + timestamp_ms: int, + trigger: str, +): + """ + 触发 Pass 2: 离线高精度识别 + 标点 + 智能分句 + + 改进:使用标点模型结果进行智能分句,将长文本拆分成多个自然句子分别发送。 + """ + if not state.full_sentence_buffer: + return + + sys.stderr.write(f"[FunASR Worker] Triggering Pass 2 ({trigger})...\n") + sys.stderr.flush() + + try: + # 合并音频片段 + complete_audio = np.concatenate(state.full_sentence_buffer) + audio_duration = len(complete_audio) / SAMPLE_RATE + + # A. 非流式高精度识别 + offline_res = asr_offline_model(complete_audio) + raw_text = "" + if offline_res: + # 解析返回值(可能是 tuple 或 dict) + item = offline_res[0] if isinstance(offline_res, list) else offline_res + if isinstance(item, dict): + raw_text = item.get("preds") or item.get("text") or "" + elif isinstance(item, (tuple, list)) and len(item) > 0: + raw_text = item[0] if isinstance(item[0], str) else str(item[0]) + elif isinstance(item, str): + raw_text = item + else: + raw_text = str(item) if item else "" + + if raw_text and len(raw_text) >= MIN_SENTENCE_CHARS: + # B. 标点预测 + try: + punc_res = punc_model(raw_text) + # 解析标点模型返回值 + if punc_res: + punc_item = punc_res[0] if isinstance(punc_res, list) else punc_res + if isinstance(punc_item, str): + punctuated_text = punc_item + elif isinstance(punc_item, (tuple, list)) and len(punc_item) > 0: + punctuated_text = punc_item[0] if isinstance(punc_item[0], str) else str(punc_item[0]) + else: + punctuated_text = str(punc_item) if punc_item else raw_text + else: + punctuated_text = raw_text + except Exception as e: + sys.stderr.write(f"[FunASR Worker] Punctuation error: {e}\n") + sys.stderr.flush() + punctuated_text = raw_text + + sys.stderr.write(f"[FunASR Worker] Raw: \"{raw_text}\"\n") + sys.stderr.write(f"[FunASR Worker] With punc: \"{punctuated_text}\"\n") + sys.stderr.flush() + + # C. 智能分句:将长文本拆分成多个自然句子 + sentences = smart_split_sentences(punctuated_text) + + # 计算每个句子的大致时间分布 + total_chars = sum(len(s) for s in sentences) + current_time = state.start_time * 1000 if state.start_time else timestamp_ms - (audio_duration * 1000) + + for i, sentence in enumerate(sentences): + # 估算这个句子的时间范围 + sentence_ratio = len(sentence) / max(total_chars, 1) + sentence_duration = audio_duration * sentence_ratio + sentence_end_time = current_time + (sentence_duration * 1000) + + is_last = (i == len(sentences) - 1) + + sys.stderr.write(f"[FunASR Worker] 🎯 SENTENCE [{i+1}/{len(sentences)}]: \"{sentence[:50]}...\"\n") + sys.stderr.flush() + + send_ipc_message({ + "request_id": request_id, + "session_id": session_id, + "type": "sentence_complete", + "text": sentence, + "raw_text": raw_text if i == 0 else "", # 只在第一句附带原始文本 + "timestamp": int(sentence_end_time), + "is_final": is_last, + "status": "success", + "language": "zh", + "audio_duration": sentence_duration, + "trigger": trigger, + "start_time": int(current_time), + "end_time": int(sentence_end_time), + "sentence_index": i, + "total_sentences": len(sentences), + }) + + current_time = sentence_end_time + + except Exception as e: + sys.stderr.write(f"[FunASR Worker] Pass 2 error: {e}\n") + sys.stderr.write(traceback.format_exc()) + sys.stderr.flush() + + # 重置状态,准备下一句 + state.reset() + + +def handle_force_commit( + asr_offline_model, + punc_model, + data: dict, + sessions_cache: Dict[str, SessionState], +): + """强制提交当前句子""" + request_id = data.get("request_id", "default") + session_id = data.get("session_id", request_id) + timestamp_ms = int(time.time() * 1000) + + sys.stderr.write(f"[FunASR Worker] force_commit received for session={session_id}\n") + sys.stderr.flush() + + state = sessions_cache.get(session_id) + if not state: + sys.stderr.write(f"[FunASR Worker] No session state found for session={session_id}\n") + sys.stderr.flush() + return + + # 如果有缓冲的音频,触发 Pass 2 + if state.full_sentence_buffer: + _trigger_pass2( + asr_offline_model, + punc_model, + state, + request_id, + session_id, + timestamp_ms, + trigger="force_commit", + ) + elif state.streaming_text and len(state.streaming_text) >= MIN_SENTENCE_CHARS: + # 没有缓冲的音频,但有流式文本,直接提交流式文本 + send_ipc_message({ + "request_id": request_id, + "session_id": session_id, + "type": "sentence_complete", + "text": state.streaming_text, + "timestamp": timestamp_ms, + "is_final": True, + "status": "success", + "trigger": "force_commit_text_only", + "language": "zh", + "audio_duration": 0, + }) + state.reset() + else: + sys.stderr.write(f"[FunASR Worker] force_commit: no content to commit\n") + sys.stderr.flush() + + +def handle_batch_file(asr_offline_model, punc_model, data: dict): + """处理批量文件识别""" + request_id = data.get("request_id", "unknown") + audio_path = data.get("audio_path") + + if not audio_path: + send_ipc_message({"request_id": request_id, "error": "No audio_path provided"}) + return + if not os.path.exists(audio_path): + send_ipc_message({"request_id": request_id, "error": f"File not found: {audio_path}"}) + return + + try: + # 读取音频文件 + import wave + with wave.open(audio_path, 'rb') as wf: + audio_data = np.frombuffer(wf.readframes(wf.getnframes()), dtype=np.int16) + audio_float = audio_data.astype(np.float32) + + # 离线识别 + offline_res = asr_offline_model(audio_float) + raw_text = "" + if offline_res: + # 解析返回值(可能是 tuple 或 dict) + item = offline_res[0] if isinstance(offline_res, list) else offline_res + if isinstance(item, dict): + raw_text = item.get("preds") or item.get("text") or "" + elif isinstance(item, (tuple, list)) and len(item) > 0: + raw_text = item[0] if isinstance(item[0], str) else str(item[0]) + elif isinstance(item, str): + raw_text = item + else: + raw_text = str(item) if item else "" + + # 标点 + if raw_text: + try: + punc_res = punc_model(raw_text) + # 解析标点模型返回值 + if punc_res: + punc_item = punc_res[0] if isinstance(punc_res, list) else punc_res + if isinstance(punc_item, str): + final_text = punc_item + elif isinstance(punc_item, (tuple, list)) and len(punc_item) > 0: + final_text = punc_item[0] if isinstance(punc_item[0], str) else str(punc_item[0]) + else: + final_text = str(punc_item) if punc_item else raw_text + else: + final_text = raw_text + except Exception: + final_text = raw_text + else: + final_text = "" + + send_ipc_message({ + "request_id": request_id, + "text": final_text, + "raw_text": raw_text, + "language": "zh", + "status": "success", + }) + + except Exception as exc: + send_ipc_message({ + "request_id": request_id, + "error": str(exc), + "traceback": traceback.format_exc(), + }) + + +def main(): + try: + sys.stderr.write("[FunASR Worker] Starting FunASR 2-Pass Worker...\n") + sys.stderr.flush() + + # 加载模型 + vad_model, asr_online_model, asr_offline_model, punc_model = load_funasr_onnx_models() + + sessions_cache: Dict[str, SessionState] = {} + send_ipc_message({"status": "ready"}) + + sys.stderr.write("[FunASR Worker] Ready! 2-Pass mode enabled.\n") + sys.stderr.flush() + + while True: + line = sys.stdin.readline() + if not line: + break + + try: + data = json.loads(line) + except json.JSONDecodeError as exc: + send_ipc_message({"request_id": "unknown", "error": f"Invalid JSON: {exc}"}) + continue + + request_type = data.get("type") + request_id = data.get("request_id", "default") + session_id = data.get("session_id", request_id) + + if request_type == "reset_session": + sys.stderr.write(f"[FunASR Worker] Resetting session: {session_id}\n") + sys.stderr.flush() + sessions_cache.pop(session_id, None) + continue + + if request_type == "force_commit": + handle_force_commit(asr_offline_model, punc_model, data, sessions_cache) + continue + + if request_type == "streaming_chunk": + handle_streaming_chunk( + vad_model, + asr_online_model, + asr_offline_model, + punc_model, + data, + sessions_cache, + ) + continue + + if request_type == "batch_file" or "audio_path" in data: + handle_batch_file(asr_offline_model, punc_model, data) + continue + + send_ipc_message({ + "request_id": request_id, + "error": f"Unknown request type: {request_type}", + }) + + except Exception as exc: + sys.stderr.write(f"[FunASR Worker] Fatal error: {exc}\n") + sys.stderr.write(traceback.format_exc()) + sys.stderr.flush() + send_ipc_message({"status": "fatal", "error": str(exc)}) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/desktop/backend/asr/asr_siliconflow_worker.py b/desktop/backend/asr/asr_siliconflow_worker.py new file mode 100644 index 0000000..b524969 --- /dev/null +++ b/desktop/backend/asr/asr_siliconflow_worker.py @@ -0,0 +1,573 @@ +#!/usr/bin/env python3 +# coding: utf-8 +""" +SiliconFlow ASR Worker - Parallel Redundant Architecture (并行冗余架构) + +策略: +- VAD 精准断句:使用 FunASR 轻量级 FSMN-VAD 模型(本地推理,延迟低) +- 并行冗余请求:每段音频同时发送 N 个(默认2个)请求到云端 API +- Race 机制:只接受最先返回的结果,其他自动取消 +- 段落独立:每段音频独立处理,不等待前一段完成 + +优势: +- 高可靠性:单个请求失败不影响结果 +- 低延迟:总是取最快返回的那个 +- 简化逻辑:无需复杂的重试和补偿机制 +""" + +import base64 +import concurrent.futures +import io +import json +import os +import platform +import sys +import time +import traceback +import wave +from dataclasses import dataclass, field +from typing import Dict, List, Optional + +import numpy as np + +# ============================================================================== +# IPC 通道重定向 +# ============================================================================== +ipc_fd = os.dup(sys.stdout.fileno()) +ipc_channel = os.fdopen(ipc_fd, "w", buffering=1, encoding="utf-8") +os.dup2(sys.stderr.fileno(), sys.stdout.fileno()) +sys.stdout = sys.stderr + + +def send_ipc_message(data: dict): + try: + ipc_channel.write(json.dumps(data, ensure_ascii=False) + "\n") + ipc_channel.flush() + except Exception as exc: + sys.stderr.write(f"[IPC Error] {exc}\n") + sys.stderr.flush() + + +# ============================================================================== +# 配置 +# ============================================================================== +API_URL = "https://api.siliconflow.cn/v1/audio/transcriptions" +_SF_API_KEY_OBFUSCATED = "c2staWJndG9zZmhuYmZxbmlueWVtYnRvY3B2eGJ2aG1qb3JuemJsZWZteWxlamd2a2xr" +API_KEY = os.environ.get("SILICONFLOW_API_KEY", base64.b64decode(_SF_API_KEY_OBFUSCATED).decode()).strip() +MODEL_NAME = os.environ.get("SILICONFLOW_MODEL", "TeleAI/TeleSpeechASR").strip() + +SAMPLE_RATE = int(os.environ.get("ASR_SAMPLE_RATE", "16000")) +CHUNK_MS = 200 # VAD 输入块大小 +MAX_BUFFER_SEC = float(os.environ.get("SF_MAX_BUFFER_SEC", "5.0")) # 降低到5秒,避免单句过长 +REQUEST_TIMEOUT = float(os.environ.get("SF_REQUEST_TIMEOUT", "25.0")) + +# 并行冗余配置 +PARALLEL_REQUESTS = int(os.environ.get("SF_PARALLEL_REQUESTS", "2")) # 每段发送的并行请求数 + +# VAD 配置 +SILENCE_THRESHOLD_CHUNKS = int(os.environ.get("SF_SILENCE_CHUNKS", "2")) # 降低到2,更快断句(原3) +USE_FUNASR_VAD = os.environ.get("SF_USE_FUNASR_VAD", "1") in ("1", "true", "yes") + +# VAD 推理设备选择(仅影响本地 VAD;云端 SiliconFlow ASR 不受影响) +# - auto: 自动选择(优先 CUDA,其次 ROCm,其次 DirectML,最后 CPU) +# - cpu/cuda/rocm/dml: 强制指定 +SF_VAD_DEVICE = os.environ.get("SF_VAD_DEVICE", "auto").strip().lower() +SF_VAD_DEVICE_ID = int(os.environ.get("SF_VAD_DEVICE_ID", "0")) + +MIN_SENT_CHARS = 2 +SENTENCE_END_PUNCT = set("。!?!?.;;") + + +def decode_audio_chunk(audio_b64: str) -> np.ndarray: + """Base64 -> float32 PCM""" + audio_bytes = base64.b64decode(audio_b64) + audio_int16 = np.frombuffer(audio_bytes, dtype=np.int16) + return audio_int16.astype(np.float32) + + +def pcm_to_wav_bytes(pcm: np.ndarray, sample_rate: int) -> bytes: + """float32/int16 -> wav bytes""" + if pcm.dtype != np.int16: + if np.max(np.abs(pcm)) <= 1.0: + pcm = (pcm * 32767).astype(np.int16) + else: + pcm = pcm.astype(np.int16) + buf = io.BytesIO() + with wave.open(buf, "wb") as wf: + wf.setnchannels(1) + wf.setsampwidth(2) + wf.setframerate(sample_rate) + wf.writeframes(pcm.tobytes()) + return buf.getvalue() + + +def smart_concat(history: str, new_text: str) -> str: + """智能拼接文本""" + if not new_text: + return history + if not history: + return new_text + if new_text.startswith(history): + return new_text + if history.endswith(new_text): + return history + # 检查重叠 + overlap_len = min(len(history), len(new_text)) + for i in range(overlap_len, 0, -1): + if history.endswith(new_text[:i]): + return history + new_text[i:] + # 无重叠,添加空格 + if history and not history.endswith(tuple(SENTENCE_END_PUNCT)) and not history.endswith((" ", "\n")): + return history + " " + new_text + return history + new_text + + +@dataclass +class SessionState: + audio_buffer: List[np.ndarray] = field(default_factory=list) + silence_counter: int = 0 + is_speaking: bool = False + start_time_ms: int = 0 + # 移除 committed_text,每段独立返回 + # committed_text: str = "" + segment_seq: int = 0 + + def reset(self): + self.audio_buffer.clear() + self.silence_counter = 0 + self.is_speaking = False + self.start_time_ms = 0 + + def reset_all(self): + self.reset() + self.segment_seq = 0 + + +class SiliconFlowWorker: + def __init__(self): + self.sessions: Dict[str, SessionState] = {} + self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=8) + self.vad_model = None + self._vad_device_info = {"device": "cpu", "device_id": -1, "provider": "CPUExecutionProvider", "providers": []} + + # 加载轻量级 VAD 模型 + if USE_FUNASR_VAD: + self._load_vad_model() + + sys.stderr.write(f"[SF Worker] Parallel Redundant Mode\n") + sys.stderr.write(f"[SF Worker] - Model: {MODEL_NAME}\n") + sys.stderr.write(f"[SF Worker] - Parallel requests: {PARALLEL_REQUESTS}\n") + if self.vad_model: + sys.stderr.write( + "[SF Worker] - VAD: FunASR FSMN-VAD" + f" (device={self._vad_device_info.get('device')}, device_id={self._vad_device_info.get('device_id')}, " + f"provider={self._vad_device_info.get('provider')})\n" + ) + else: + sys.stderr.write(f"[SF Worker] - VAD: Simple RMS\n") + sys.stderr.write(f"[SF Worker] - Max buffer: {MAX_BUFFER_SEC}s\n") + sys.stderr.flush() + + def _detect_onnx_vad_device(self) -> dict: + """ + 自动检测 onnxruntime 可用的执行后端,并选择 VAD 使用的设备。 + + 注意: + - 这里只能控制「本地 VAD」的推理设备;SiliconFlow 云端 ASR 不会使用本机 GPU。 + - funasr_onnx 的 Fsmn_vad 接口通常通过 device_id 控制是否走 GPU(>=0)或 CPU(-1)。 + - Provider 选择受安装的 onnxruntime 版本影响: + * NVIDIA:onnxruntime-gpu -> CUDAExecutionProvider + * AMD/Win:onnxruntime-directml -> DmlExecutionProvider(适配 A/N/Intel) + * AMD/Linux:onnxruntime-rocm -> ROCMExecutionProvider + """ + forced = SF_VAD_DEVICE + device_id = SF_VAD_DEVICE_ID + + try: + import onnxruntime as ort # type: ignore + + providers = ort.get_available_providers() or [] + except Exception: + providers = [] + + providers_set = {p.lower(): p for p in providers} + has_cuda = "cudaexecutionprovider" in providers_set + has_rocm = "rocmexecutionprovider" in providers_set + has_dml = "dmlexecutionprovider" in providers_set + + def _cpu(): + return { + "device": "cpu", + "device_id": -1, + "provider": "CPUExecutionProvider", + "providers": providers, + } + + def _gpu(provider_key: str, device: str): + return { + "device": device, + "device_id": device_id, + "provider": providers_set.get(provider_key, provider_key), + "providers": providers, + } + + # 强制模式 + if forced in ("cpu", "none", "off", "-1"): + return _cpu() + if forced in ("cuda", "nvidia"): + return _gpu("cudaexecutionprovider", "cuda") if has_cuda else _cpu() + if forced in ("rocm", "amd"): + return _gpu("rocmexecutionprovider", "rocm") if has_rocm else _cpu() + if forced in ("dml", "directml"): + return _gpu("dmlexecutionprovider", "dml") if has_dml else _cpu() + + # auto:按优先级选择(CUDA > ROCm > DirectML > CPU) + if has_cuda: + return _gpu("cudaexecutionprovider", "cuda") + if has_rocm: + return _gpu("rocmexecutionprovider", "rocm") + # Windows 下 AMD/NVIDIA 通常走 DirectML + if has_dml: + return _gpu("dmlexecutionprovider", "dml") + return _cpu() + + def _load_vad_model(self): + """加载 FunASR 轻量级 VAD 模型(约 100MB,比完整 ASR 模型小得多)""" + try: + from funasr_onnx.vad_bin import Fsmn_vad + vad_model_id = "damo/speech_fsmn_vad_zh-cn-16k-common-onnx" + + self._vad_device_info = self._detect_onnx_vad_device() + sys.stderr.write(f"[SF Worker] Host: {platform.system()} {platform.release()} ({platform.machine()})\n") + sys.stderr.write(f"[SF Worker] SF_VAD_DEVICE={SF_VAD_DEVICE}, SF_VAD_DEVICE_ID={SF_VAD_DEVICE_ID}\n") + sys.stderr.write(f"[SF Worker] ONNX Runtime providers: {self._vad_device_info.get('providers')}\n") + sys.stderr.write( + f"[SF Worker] Loading VAD model: {vad_model_id} " + f"(device={self._vad_device_info.get('device')}, device_id={self._vad_device_info.get('device_id')}, " + f"provider={self._vad_device_info.get('provider')})...\n" + ) + sys.stderr.flush() + + # funasr_onnx:device_id=-1 表示 CPU;>=0 尝试使用 GPU(由安装的 onnxruntime provider 决定) + self.vad_model = Fsmn_vad( + model_dir=vad_model_id, + quantize=True, + device_id=int(self._vad_device_info.get("device_id", -1)), + ) + sys.stderr.write("[SF Worker] VAD model loaded successfully!\n") + sys.stderr.flush() + except Exception as e: + sys.stderr.write(f"[SF Worker] VAD loading failed: {e}, fallback to RMS\n") + sys.stderr.flush() + self.vad_model = None + + def _is_speech(self, chunk_f32: np.ndarray) -> bool: + """VAD 检测:优先用 FunASR 模型,回退到简单 RMS""" + if chunk_f32.size == 0: + return False + + if self.vad_model: + try: + # FunASR VAD 实际上接受 float32 格式(范围在 -32768 到 32768) + # 输入的 chunk_f32 已经是正确格式了,直接传入 + segments = self.vad_model(chunk_f32) + return len(segments) > 0 + except Exception as e: + sys.stderr.write(f"[SF Worker] VAD error: {e}, using RMS fallback\n") + sys.stderr.flush() + + # RMS 回退方案 + rms = float(np.sqrt(np.mean(chunk_f32 ** 2))) + threshold = 300 / 32768.0 + return rms >= threshold + + def _get_state(self, session_id: str) -> SessionState: + if session_id not in self.sessions: + self.sessions[session_id] = SessionState() + return self.sessions[session_id] + + def reset_session(self, session_id: str): + self.sessions.pop(session_id, None) + sys.stderr.write(f"[SF Worker] Session reset: {session_id}\n") + sys.stderr.flush() + + def handle_force_commit(self, data: dict): + session_id = data.get("session_id") + if not session_id: + return + state = self.sessions.get(session_id) + if not state or not state.audio_buffer: + return + self._commit_segment(state, data.get("request_id", "default"), session_id, "force_commit") + + def handle_streaming_chunk(self, data: dict): + session_id = data.get("session_id") or data.get("request_id") or "default" + request_id = data.get("request_id", "default") + audio_b64 = data.get("audio_data") + timestamp_ms = int(data.get("timestamp", int(time.time() * 1000))) + is_final = bool(data.get("is_final", False)) + + if not audio_b64: + return + + state = self._get_state(session_id) + if state.start_time_ms == 0: + state.start_time_ms = timestamp_ms + + chunk = decode_audio_chunk(audio_b64) + if chunk.size == 0: + return + + # VAD 检测 + has_voice = self._is_speech(chunk) + + if has_voice: + state.is_speaking = True + state.silence_counter = 0 + state.audio_buffer.append(chunk) + else: + if state.is_speaking: + state.silence_counter += 1 + # 保留少量尾部静音 + if state.silence_counter <= 2: + state.audio_buffer.append(chunk) + + # 检查是否应该提交 + buffered_samples = sum(c.size for c in state.audio_buffer) + buffered_sec = buffered_samples / float(SAMPLE_RATE) + + should_commit = state.is_speaking and ( + state.silence_counter >= SILENCE_THRESHOLD_CHUNKS or + buffered_sec >= MAX_BUFFER_SEC or + is_final + ) + + if should_commit and state.audio_buffer: + trigger = "final" if is_final else ("max_buffer" if buffered_sec >= MAX_BUFFER_SEC else "silence") + self._commit_segment(state, request_id, session_id, trigger) + + def handle_batch_file(self, data: dict): + request_id = data.get("request_id", "unknown") + audio_path = data.get("audio_path") + + if not audio_path or not os.path.exists(audio_path): + send_ipc_message({"request_id": request_id, "status": "error", "error": f"File not found: {audio_path}"}) + return + + try: + with wave.open(audio_path, "rb") as wf: + if wf.getsampwidth() != 2: + raise ValueError("Only 16-bit PCM supported") + ch = wf.getnchannels() + sr = wf.getframerate() + raw = wf.readframes(wf.getnframes()) + + audio = np.frombuffer(raw, dtype=np.int16) + if ch > 1: + audio = audio.reshape(-1, ch)[:, 0] + audio_f32 = audio.astype(np.float32) + + # 批量文件使用单请求即可 + self._parallel_transcribe_and_send(audio_f32, sr, request_id, "batch_file", None, None) + except Exception as exc: + send_ipc_message({ + "request_id": request_id, + "status": "error", + "error": str(exc), + "traceback": traceback.format_exc() + }) + + def _commit_segment(self, state: SessionState, request_id: str, session_id: str, trigger: str): + """提交音频段""" + merged = np.concatenate(state.audio_buffer) + sr = SAMPLE_RATE + state.reset() + state.segment_seq += 1 + seg_seq = state.segment_seq + + duration_sec = len(merged) / float(sr) + sys.stderr.write(f"[SF Worker] 📤 Committing segment #{seg_seq} ({duration_sec:.1f}s, trigger={trigger})\n") + sys.stderr.flush() + + # 并行冗余请求 + self._parallel_transcribe_and_send(merged, sr, request_id, trigger, session_id, seg_seq) + + def _parallel_transcribe_and_send( + self, + audio_f32: np.ndarray, + sample_rate: int, + request_id: str, + trigger: str, + session_id: Optional[str], + seg_seq: Optional[int], + ): + """并行发送多个冗余请求,取最快返回的结果""" + import requests + + t0 = time.time() + wav_bytes = pcm_to_wav_bytes(audio_f32, sample_rate) + + def single_request(replica_id: int): + """单个 API 请求""" + try: + files = {"file": ("chunk.wav", wav_bytes, "audio/wav")} + data = {"model": MODEL_NAME} + headers = {"Authorization": f"Bearer {API_KEY}"} if API_KEY else {} + + sys.stderr.write(f"[SF Worker] - Request #{replica_id} started\n") + sys.stderr.flush() + + resp = requests.post( + API_URL, + headers=headers, + data=data, + files=files, + timeout=(3, REQUEST_TIMEOUT), + ) + resp.raise_for_status() + + j = resp.json() + text = (j.get("text") or "").strip() + latency = time.time() - t0 + + sys.stderr.write(f"[SF Worker] ✓ Request #{replica_id} returned in {latency:.2f}s: \"{text[:30]}...\"\n") + sys.stderr.flush() + + return {"text": text, "replica_id": replica_id, "latency": latency} + except Exception as exc: + sys.stderr.write(f"[SF Worker] ✗ Request #{replica_id} failed: {exc}\n") + sys.stderr.flush() + raise + + # 并行发送 N 个请求 + futures = [] + for i in range(PARALLEL_REQUESTS): + future = self.executor.submit(single_request, i) + futures.append(future) + + # 等待第一个完成的请求(Race) + result = None + try: + done, pending = concurrent.futures.wait( + futures, + timeout=REQUEST_TIMEOUT + 5, + return_when=concurrent.futures.FIRST_COMPLETED + ) + + # 取第一个成功的结果 + for future in done: + try: + result = future.result() + break + except Exception: + continue + + # 取消其他未完成的请求 + for future in pending: + future.cancel() + + except Exception as exc: + sys.stderr.write(f"[SF Worker] Parallel request failed: {exc}\n") + sys.stderr.flush() + + # 如果所有请求都失败 + if result is None: + send_ipc_message({ + "request_id": request_id, + "session_id": session_id or request_id, + "status": "error", + "error": "All parallel requests failed", + "trigger": trigger, + "engine": "siliconflow", + }) + return + + # 处理成功的结果 + text = result["text"] + latency_ms = int(result["latency"] * 1000) + now_ms = int(time.time() * 1000) + + # batch_file:直接返回 + if session_id is None: + send_ipc_message({ + "request_id": request_id, + "session_id": request_id, + "type": "sentence_complete", + "text": text, + "timestamp": now_ms, + "is_final": True, + "status": "success", + "language": "zh", + "trigger": trigger, + "latency_ms": latency_ms, + "engine": "siliconflow", + "replica_id": result["replica_id"], + }) + return + + # streaming:每段独立返回(不再累积) + # 这样避免了重复保存问题,由前端/Node.js端决定如何处理多段文本 + if not text: + return + + send_ipc_message({ + "request_id": request_id, + "session_id": session_id, + "type": "sentence_complete", + "text": text, # 直接返回本段文本,不累积 + "timestamp": now_ms, + "is_final": True, + "status": "success", + "language": "zh", + "trigger": trigger, + "latency_ms": latency_ms, + "engine": "siliconflow", + "segment_seq": seg_seq, + "replica_id": result["replica_id"], + }) + + +def main(): + try: + worker = SiliconFlowWorker() + send_ipc_message({"status": "ready"}) + sys.stderr.write("[SF Worker] READY - Parallel Redundant Mode Enabled\n") + sys.stderr.flush() + + while True: + line = sys.stdin.readline() + if not line: + break + + try: + data = json.loads(line) + except json.JSONDecodeError: + continue + + req_type = data.get("type") + if req_type == "reset_session": + worker.reset_session(data.get("session_id", "")) + elif req_type == "force_commit": + worker.handle_force_commit(data) + elif req_type == "streaming_chunk": + worker.handle_streaming_chunk(data) + elif req_type == "batch_file" or "audio_path" in data: + worker.handle_batch_file(data) + else: + send_ipc_message({ + "request_id": data.get("request_id", "unknown"), + "status": "error", + "error": f"Unknown request type: {req_type}" + }) + + except Exception as exc: + sys.stderr.write(f"[SF Worker] Fatal: {exc}\n") + sys.stderr.write(traceback.format_exc()) + sys.stderr.flush() + send_ipc_message({"status": "fatal", "error": str(exc)}) + sys.exit(1) + + +if __name__ == "__main__": + main() + diff --git a/desktop/backend/main.py b/desktop/backend/main.py new file mode 100644 index 0000000..e30a8cc --- /dev/null +++ b/desktop/backend/main.py @@ -0,0 +1,466 @@ +import asyncio +import base64 +import json +import os +import sys +import tempfile +from pathlib import Path +from typing import Dict, Optional +from uuid import uuid4 +import time + +from fastapi import FastAPI, WebSocket, WebSocketDisconnect, UploadFile, File, HTTPException +from fastapi.responses import JSONResponse +import uvicorn +from starlette.websockets import WebSocketState + + +# --------------------------------------------------------------------------- +# Environment & paths +# --------------------------------------------------------------------------- +BASE_DIR = Path(__file__).resolve().parent +PROJECT_ROOT = BASE_DIR.parent +MEIPASS_DIR = Path(getattr(sys, "_MEIPASS", BASE_DIR)) +ASSETS_ROOT = MEIPASS_DIR if MEIPASS_DIR.exists() else PROJECT_ROOT +# Worker 脚本在 backend/asr/ 目录下 +ASR_DIR = BASE_DIR / "asr" +if not ASR_DIR.exists(): + # 回退到打包环境或其他位置 + ASR_DIR = (ASSETS_ROOT / "asr") if (ASSETS_ROOT / "asr").exists() else (PROJECT_ROOT / "asr") + +DEFAULT_ENGINE = os.environ.get("ASR_ENGINE", "funasr").lower() +DEFAULT_MODEL = os.environ.get("ASR_MODEL", "funasr-paraformer") + +# 支持的引擎列表 +SUPPORTED_ENGINES = {"funasr", "siliconflow", "baidu"} + + +def _print_debug_info(): + """打印调试信息,帮助排查打包后路径问题""" + print("=" * 60, file=sys.stderr) + print("[ASR Backend] DEBUG INFO", file=sys.stderr) + print("=" * 60, file=sys.stderr) + print(f" sys.executable: {sys.executable}", file=sys.stderr) + print(f" sys.argv: {sys.argv}", file=sys.stderr) + print(f" cwd: {os.getcwd()}", file=sys.stderr) + print(f" __file__: {__file__}", file=sys.stderr) + print(f" BASE_DIR: {BASE_DIR} (exists={BASE_DIR.exists()})", file=sys.stderr) + print(f" PROJECT_ROOT: {PROJECT_ROOT} (exists={PROJECT_ROOT.exists()})", file=sys.stderr) + print(f" has _MEIPASS: {hasattr(sys, '_MEIPASS')}", file=sys.stderr) + if hasattr(sys, "_MEIPASS"): + print(f" sys._MEIPASS: {sys._MEIPASS}", file=sys.stderr) + print(f" MEIPASS_DIR: {MEIPASS_DIR} (exists={MEIPASS_DIR.exists()})", file=sys.stderr) + print(f" ASSETS_ROOT: {ASSETS_ROOT} (exists={ASSETS_ROOT.exists()})", file=sys.stderr) + print(f" ASR_DIR: {ASR_DIR} (exists={ASR_DIR.exists()})", file=sys.stderr) + + # 列出 ASR_DIR 内容 + if ASR_DIR.exists(): + try: + files = list(ASR_DIR.iterdir()) + print(f" ASR_DIR contents: {[f.name for f in files]}", file=sys.stderr) + except Exception as e: + print(f" ASR_DIR list error: {e}", file=sys.stderr) + + # 检查 worker 脚本 + for worker_name in ["asr_funasr_worker.py", "asr_siliconflow_worker.py", "asr_worker.py"]: + worker_path = ASR_DIR / worker_name + print(f" {worker_name}: {worker_path} (exists={worker_path.exists()})", file=sys.stderr) + + # 关键环境变量 + env_keys = ["ASR_ENGINE", "ASR_MODEL", "ASR_HOST", "ASR_PORT", "ASR_CACHE_DIR", + "HF_HOME", "MODELSCOPE_CACHE", "PYTHONPATH"] + print(" Environment variables:", file=sys.stderr) + for key in env_keys: + val = os.environ.get(key, "") + print(f" {key}={val}", file=sys.stderr) + print("=" * 60, file=sys.stderr) + sys.stderr.flush() + + +# 启动时打印调试信息 +_print_debug_info() + + +def resolve_python_cmd() -> str: + """解析 Python 命令(仅在非打包环境下使用)""" + env_py = os.environ.get("ASR_PYTHON_PATH") + if env_py and Path(env_py).exists(): + return env_py + + # Prefer the current interpreter if available + if sys.executable and Path(sys.executable).exists(): + return sys.executable + + return "python.exe" if sys.platform.startswith("win") else "python3" + + +def is_packaged() -> bool: + """检测是否在 PyInstaller 打包环境中运行""" + return hasattr(sys, "_MEIPASS") + + +class WorkerBridge: + """ + Thin bridge that keeps the existing stdin/stdout workers (asr_worker.py / asr_funasr_worker.py) + and exposes them over WebSocket via FastAPI. + """ + + def __init__(self, engine: str, model: str): + self.engine = engine + self.model = model + self.process: Optional[asyncio.subprocess.Process] = None + self.stdout_task: Optional[asyncio.Task] = None + self.ready_event = asyncio.Event() + self.ws_clients: Dict[str, WebSocket] = {} + self.pending_requests: Dict[str, asyncio.Future] = {} + + def _worker_script_path(self, packaged: bool) -> Path: + """获取 worker 脚本路径(统一使用 Python 解释器启动,而非独立可执行文件)。""" + # 无论是否打包,都使用 ASR_DIR,因为打包时文件在 _internal/asr/ 目录下 + base_dir = ASR_DIR + if self.engine == "funasr": + return base_dir / "asr_funasr_worker.py" + if self.engine == "siliconflow": + return base_dir / "asr_siliconflow_worker.py" + if self.engine == "baidu": + return base_dir / "asr_baidu_worker.py" + # Fallback to generic worker + return base_dir / "asr_worker.py" + + async def start(self): + if self.process: + return + + print(f"[WorkerBridge] engine={self.engine}, model={self.model}", file=sys.stderr) + print(f"[WorkerBridge] is_packaged={is_packaged()}", file=sys.stderr) + + packaged = is_packaged() + worker_path = self._worker_script_path(packaged) + python_cmd = sys.executable if packaged else resolve_python_cmd() + + print(f"[WorkerBridge] worker_path={worker_path} (exists={worker_path.exists()})", file=sys.stderr) + print(f"[WorkerBridge] python_cmd={python_cmd}", file=sys.stderr) + + env = os.environ.copy() + + # 判断是否使用 Large 模型(Large 版本默认不量化) + is_large_model = "large" in self.model.lower() + + env.update( + { + "PYTHONUNBUFFERED": "1", + "ASR_MODEL": self.model, + "ASR_ENGINE": self.engine, + # Large 模型默认不使用量化,精度更高 + "ASR_QUANTIZE": "false" if is_large_model else "true", + # align ModelScope cache with ASR cache to avoid global locks + "MODELSCOPE_CACHE": os.environ.get("MODELSCOPE_CACHE") or "", + "MODELSCOPE_CACHE_HOME": os.environ.get("MODELSCOPE_CACHE") or "", + } + ) + + if not worker_path.exists(): + parent = worker_path.parent + print(f"[WorkerBridge] ERROR: Worker script not found!", file=sys.stderr) + print(f"[WorkerBridge] Parent dir: {parent} (exists={parent.exists()})", file=sys.stderr) + if parent.exists(): + try: + files = list(parent.iterdir()) + print(f"[WorkerBridge] Parent contents: {[f.name for f in files]}", file=sys.stderr) + except Exception as e: + print(f"[WorkerBridge] Cannot list parent: {e}", file=sys.stderr) + sys.stderr.flush() + raise FileNotFoundError(f"Worker script not found: {worker_path}") + + print(f"[WorkerBridge] Spawning worker subprocess...", file=sys.stderr) + sys.stderr.flush() + + self.process = await asyncio.create_subprocess_exec( + python_cmd, + str(worker_path), + stdin=asyncio.subprocess.PIPE, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + env=env, + ) + + print(f"[WorkerBridge] Worker process spawned, pid={self.process.pid}", file=sys.stderr) + sys.stderr.flush() + + self.stdout_task = asyncio.create_task(self._consume_output()) + asyncio.create_task(self._consume_stderr()) + + async def _consume_output(self): + assert self.process and self.process.stdout + print(f"[WorkerBridge] _consume_output started, reading worker stdout...", file=sys.stderr) + sys.stderr.flush() + async for line in self.process.stdout: + line = line.decode("utf-8", errors="ignore").strip() + if not line: + continue + try: + payload = json.loads(line) + except json.JSONDecodeError: + # Not a JSON line, print for debugging + print(f"[WorkerBridge][stdout] (non-JSON): {line[:200]}", file=sys.stderr) + sys.stderr.flush() + continue + + if payload.get("status") == "ready": + print(f"[WorkerBridge] Received READY signal from worker!", file=sys.stderr) + sys.stderr.flush() + self.ready_event.set() + continue + + request_id = payload.get("request_id") + session_id = payload.get("session_id") + + # Resolve pending HTTP requests + if request_id and request_id in self.pending_requests: + fut = self.pending_requests.pop(request_id) + if not fut.done(): + fut.set_result(payload) + + # Fan-out to websocket clients + if session_id and session_id in self.ws_clients: + ws = self.ws_clients[session_id] + try: + await ws.send_json(payload) + except RuntimeError: + # websocket already closed + pass + + # stdout 结束,说明进程已退出 + print(f"[WorkerBridge] Worker stdout closed (process exited)", file=sys.stderr) + if self.process: + print(f"[WorkerBridge] Process returncode={self.process.returncode}", file=sys.stderr) + sys.stderr.flush() + + async def _consume_stderr(self): + if not self.process or not self.process.stderr: + return + async for line in self.process.stderr: + sys.stderr.write(line.decode("utf-8", errors="ignore")) + sys.stderr.flush() + + async def ensure_ready(self): + print(f"[WorkerBridge] ensure_ready() called, starting worker...", file=sys.stderr) + sys.stderr.flush() + await self.start() + print(f"[WorkerBridge] Worker started, waiting for ready signal (timeout=300s)...", file=sys.stderr) + sys.stderr.flush() + try: + await asyncio.wait_for(self.ready_event.wait(), timeout=300) # allow slower first-time downloads + print(f"[WorkerBridge] Worker is READY!", file=sys.stderr) + sys.stderr.flush() + except asyncio.TimeoutError as exc: + print(f"[WorkerBridge] TIMEOUT waiting for worker ready signal!", file=sys.stderr) + # 检查进程是否还活着 + if self.process: + print(f"[WorkerBridge] Process returncode={self.process.returncode}", file=sys.stderr) + sys.stderr.flush() + raise RuntimeError("ASR worker did not become ready in time") from exc + + async def stop(self): + if self.process: + # 进程可能已提前退出,先检查 returncode,避免重复 terminate 触发 ProcessLookupError + if self.process.returncode is None: + try: + self.process.terminate() + except ProcessLookupError: + pass + try: + await asyncio.wait_for(self.process.wait(), timeout=10) + except asyncio.TimeoutError: + try: + self.process.kill() + except ProcessLookupError: + pass + if self.stdout_task: + self.stdout_task.cancel() + self.process = None + self.stdout_task = None + self.ready_event.clear() + + async def send(self, payload: dict): + if not self.process or not self.process.stdin: + raise RuntimeError("Worker process is not running") + data = json.dumps(payload, ensure_ascii=False) + "\n" + self.process.stdin.write(data.encode("utf-8")) + await self.process.stdin.drain() + + async def force_commit(self, session_id: str): + await self.send({"type": "force_commit", "session_id": session_id}) + + async def reset_session(self, session_id: str): + await self.send({"type": "reset_session", "session_id": session_id}) + + def bind_ws(self, session_id: str, ws: WebSocket): + self.ws_clients[session_id] = ws + + def unbind_ws(self, session_id: str): + self.ws_clients.pop(session_id, None) + + async def request_transcribe(self, audio_path: str, timeout: float = 300.0): + request_id = str(uuid4()) + fut: asyncio.Future = asyncio.get_event_loop().create_future() + self.pending_requests[request_id] = fut + + await self.send( + { + "type": "batch_file", + "request_id": request_id, + "audio_path": audio_path, + } + ) + return await asyncio.wait_for(fut, timeout=timeout) + + +app = FastAPI() +bridge: Optional[WorkerBridge] = None + + +@app.on_event("startup") +async def startup(): + global bridge + bridge = WorkerBridge(engine=DEFAULT_ENGINE, model=DEFAULT_MODEL) + await bridge.ensure_ready() + + +@app.on_event("shutdown") +async def shutdown(): + if bridge: + await bridge.stop() + + +@app.get("/health") +async def health(): + return {"status": "ok", "engine": DEFAULT_ENGINE, "model": DEFAULT_MODEL} + + +@app.post("/transcribe") +async def transcribe(file: UploadFile = File(...)): + if not bridge: + raise HTTPException(status_code=500, detail="ASR bridge not initialized") + + suffix = Path(file.filename).suffix or ".wav" + with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp: + content = await file.read() + tmp.write(content) + tmp_path = tmp.name + + try: + result = await bridge.request_transcribe(tmp_path) + return JSONResponse(result) + finally: + try: + os.remove(tmp_path) + except OSError: + pass + + +@app.websocket("/ws/transcribe") +async def ws_transcribe(websocket: WebSocket, session_id: str): + if not bridge: + await websocket.close(code=1011) + return + + await bridge.ensure_ready() + await websocket.accept() + bridge.bind_ws(session_id, websocket) + + try: + while True: + message = await websocket.receive() + + if message["type"] == "websocket.disconnect": + break + + if message.get("bytes") is not None: + audio_bytes: bytes = message["bytes"] + audio_b64 = base64.b64encode(audio_bytes).decode("ascii") + await bridge.send( + { + "type": "streaming_chunk", + "session_id": session_id, + "audio_data": audio_b64, + "timestamp": int(time.time() * 1000), + "is_final": False, + } + ) + elif message.get("text"): + try: + payload = json.loads(message["text"]) + except json.JSONDecodeError: + continue + + msg_type = payload.get("type") + if msg_type == "force_commit": + await bridge.force_commit(session_id) + elif msg_type == "reset_session": + await bridge.reset_session(session_id) + except WebSocketDisconnect: + pass + finally: + bridge.unbind_ws(session_id) + try: + await bridge.reset_session(session_id) + except Exception: + pass + # 客户端已断开时避免重复发送 close 触发 RuntimeError + if websocket.application_state != WebSocketState.DISCONNECTED: + try: + await websocket.close() + except RuntimeError: + pass + + +def main(): + host = os.environ.get("ASR_HOST", "127.0.0.1") + port = int(os.environ.get("ASR_PORT", "0") or 0) + if port == 0: + # pick random free port if not set + import socket + + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind((host, 0)) + port = s.getsockname()[1] + + print(f"[ASR API] Starting FastAPI server on {host}:{port} (engine={DEFAULT_ENGINE}, model={DEFAULT_MODEL})") + uvicorn.run(app, host=host, port=port, log_level="info") + + +if __name__ == "__main__": + import runpy + + # Simple argument parsing to support running worker scripts in packaged environment + if len(sys.argv) > 1 and sys.argv[1].endswith(".py"): + script_path = Path(sys.argv[1]) + # Security check: only allow running scripts from ASR_DIR + # NOTE: Path.is_relative_to was introduced in Python 3.9. PyInstaller builds may use older versions. + # Use Path.relative_to for broad compatibility. + try: + script_path.resolve().relative_to(ASR_DIR.resolve()) + is_relative = True + except Exception: + is_relative = False + + if script_path.exists() and (is_relative or os.environ.get("ALLOW_ARBITRARY_SCRIPTS") == "1"): + # Adjust sys.argv so the script sees itself as argv[0] + sys.argv = sys.argv[1:] + print(f"[ASR Launcher] Running script: {script_path}", file=sys.stderr) + try: + runpy.run_path(str(script_path), run_name="__main__") + except Exception as e: + print(f"[ASR Launcher] Error running script: {e}", file=sys.stderr) + import traceback + traceback.print_exc() + sys.exit(1) + sys.exit(0) + else: + print(f"[ASR Launcher] Script not found or not allowed: {script_path}", file=sys.stderr) + if not is_relative: + print(f"[ASR Launcher] Script must be in {ASR_DIR}", file=sys.stderr) + + main() + diff --git a/desktop/backend/pyinstaller/asr-backend.spec b/desktop/backend/pyinstaller/asr-backend.spec new file mode 100644 index 0000000..95558fb --- /dev/null +++ b/desktop/backend/pyinstaller/asr-backend.spec @@ -0,0 +1,63 @@ +# -*- mode: python ; coding: utf-8 -*- +import os +from PyInstaller.utils.hooks import collect_data_files, collect_submodules + +project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) +asr_dir = os.path.join(project_root, 'asr') + +datas = [ + (asr_dir, 'asr'), +] + +datas += collect_data_files('onnxruntime', include_py_files=False) +datas += collect_data_files('ctranslate2', include_py_files=False) + +hiddenimports = [ + 'uvicorn.lifespan', + 'uvicorn.loops.auto', + 'uvicorn.protocols.http.auto', + 'uvicorn.protocols.websockets.auto', + 'fastapi', + 'fastapi.applications', + 'starlette.websockets', + 'ctranslate2', + 'onnxruntime.capi.onnxruntime_pybind11_state', +] + collect_submodules('onnxruntime.capi') + collect_submodules('funasr') + +a = Analysis( + ['../main.py'], + pathex=[project_root], + binaries=[], + datas=datas, + hiddenimports=hiddenimports, + hookspath=[], + runtime_hooks=[], + excludes=[], + noarchive=False, +) +pyz = PYZ(a.pure, a.zipped_data) + +exe = EXE( + pyz, + a.scripts, + [], + name='asr-backend', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + console=True, +) + +coll = COLLECT( + exe, + a.binaries, + a.zipfiles, + a.datas, + strip=False, + upx=True, + upx_exclude=[], + name='asr-backend', + distpath=os.path.join(project_root, 'backend', 'dist'), +) + diff --git a/desktop/docs/git-log-hhh2210.md b/desktop/docs/git-log-hhh2210.md new file mode 100644 index 0000000..70ece6c --- /dev/null +++ b/desktop/docs/git-log-hhh2210.md @@ -0,0 +1,23 @@ +# hhh2210 近期改动(高层概要) + +时间范围:2025-12-06 ~ 2025-12-08 + +## 1) 建议/对话相关 +- 新增 “场景判断 LLM” 配置,完全由专用模型决定是否生成选项,移除关键词启发式,支持单独模型名与开关。 +- 前端表单和 Hook 同步加载/保存该配置,默认沿用 gpt-4o-mini;架构图补充字段与流程。 + +## 2) 结构化记忆侧车 +- 引入独立 FastAPI 侧车(profiles/events 查询与写入,SQLite 存储),支持 add/update/append/replace 合并模式与时间/标签过滤。 +- Electron 主进程增加 MemoryService 客户端与 IPC,渲染层可通过 preload 调用;未配置 baseUrl 时安全降级为空结果。 +- README 补充发布建议:PyInstaller 打包随 Electron 分发、启动探活、端口选择、macOS 签名注意事项。 + +## 3) ASR 下载与体验 +- 下载缓存改为应用级共享(HF/MS 双源,自动创建 HF_HOME / MODELSCOPE_CACHE),可解析 ModelScope 实际落盘路径。 +- UI 支持断点续传与错误提示,下载源可选;状态更新更及时。 + +## 4) 稳定性修复 +- HUD 创建增加防抖并在加载中提示。 +- 数据库路径优先写入 userData,回退检测避免打包后权限问题。 +- DB 模块改用预生成文本 ID,避免 lastInsertRowid 失配。 +- ASR 设置页的“测试 ASR”按钮现执行真实测试并展示实时结果/错误。 + diff --git a/desktop/docs/llm-architecture.puml b/desktop/docs/llm-architecture.puml new file mode 100644 index 0000000..197df14 --- /dev/null +++ b/desktop/docs/llm-architecture.puml @@ -0,0 +1,162 @@ +@startuml LiveGalGame LLM Architecture + +skinparam backgroundColor #FEFEFE +skinparam componentStyle rectangle + +title LiveGalGame - LLM 集成架构图 + +' ========== 角色定义 ========== +actor "用户" as User +actor "算法" as AlgoEngineer #LightBlue +actor "开发" as DevEngineer #LightGreen + +' ========== 前端层 ========== +package "前端层 Renderer Process" as Frontend #E8F5E9 { + component "Settings.jsx" as SettingsUI + component "useLLMConfig.js" as LLMConfigHook + component "useSuggestions.js" as SuggestionHook + component "useSuggestionConfig.js" as SuggestionCfgHook + component "LLMConfigForm.jsx" as ConfigForm + component "CompactHud.jsx" as HudUI +} + +' ========== IPC 桥接层 ========== +package "IPC 桥接层" as IPCBridge #FFF3E0 { + component "preload.js" as Preload +} + +note right of Preload + 暴露安全的 API: + saveLLMConfig() + testLLMConnection() + generateLLMSuggestions() + detectTopicShift() / startSuggestionStream() + memory.queryProfiles()/queryEvents() +end note + +' ========== 主进程层 ========== +package "主进程层 Main Process" as MainProcess #E3F2FD { + component "llm-handlers.js" as LLMHandlers + component "suggestion-handlers.js" as SuggestionHandlers + component "rate-limiter.js" as RateLimiter + + package "核心服务" as CoreServices #BBDEFB { + component "LLMSuggestionService (单主循环)" as LLMService + component "context-builder.js" as ContextBuilder + component "toon-parser.js" as ToonParser + component "situation-llm (fast 判定)" as SituationLLM + component "auth-manager.js" as AuthManager + component "model-router.js" as ModelRouter + component "llm-resilience.js" as ResiliencePolicy + component "llm-telemetry.js" as Telemetry + } + + package "数据层" as DataLayer #B3E5FC { + component "llm-config.js" as LLMConfigDB + component "suggestion-config.js" as SuggestionConfigDB + note right of SuggestionConfigDB + 新增: + situation_llm_enabled + situation_model_name + end note + database "SQLite" as SQLite + } +} + +' ========== 外部 LLM 服务 ========== +cloud "LLM Provider" as LLMCloud #F3E5F5 { + component "OpenAI API" as OpenAI + component "Azure OpenAI" as AzureOpenAI + component "Ollama" as Ollama + component "vLLM" as vLLM + component "OpenRouter" as OpenRouter + component "Low-latency Model" as FastModel +} + +cloud "Memory Service (Sidecar)" as MemorySidecar #F0F4C3 { + component "Profile/Event API (结构化存储)" as MemoryAPI + component "profile_extractor (Model-First)" as ExtractTopics + component "profile_merger (ADD/UPDATE/APPEND/ABORT)" as MergeYolo + component "event_tracker" as EventTracker + note right of MemoryAPI + 仅结构化过滤: topic/sub_topic/tag/time + 无默认向量召回 + 长程摘要按需写入 + end note +} + +cloud "Observability" as Observability #FFFDE7 { + component "日志/指标/追踪" as LogsMetrics +} + +note bottom of LLMCloud + 统一契约: OpenAI Compatible API + POST /chat/completions + 支持 SSE 流式输出 +end note + +' ========== 连接关系 ========== +User --> SettingsUI : 配置 LLM +User --> HudUI : 查看建议 + +SettingsUI --> LLMConfigHook +LLMConfigHook --> ConfigForm +LLMConfigHook --> Preload : IPC invoke + +SuggestionHook --> Preload : IPC send +HudUI --> SuggestionHook +SuggestionCfgHook --> Preload : IPC invoke + +Preload --> RateLimiter +RateLimiter --> LLMHandlers : 调用限流 +RateLimiter --> SuggestionHandlers : 调用限流 + +LLMHandlers --> LLMConfigDB +SuggestionHandlers --> LLMService +SuggestionHandlers --> SuggestionConfigDB + +LLMService --> ContextBuilder : 构建上下文 +LLMService --> ToonParser : 解析响应 +LLMService --> LLMConfigDB : 获取配置 +LLMService --> SituationLLM : 话题/介入判定 +LLMService --> ResiliencePolicy : 超时/重试/熔断 +LLMService --> Telemetry : 记录日志与耗时 +LLMService --> ModelRouter : 模型路由 +LLMService --> AuthManager : 安全管理密钥 +LLMService --> MemoryAPI : 推送/获取画像与事件 +ContextBuilder --> MemoryAPI : 获取 Profile/Event(结构化过滤) + +LLMConfigDB --> SQLite +SuggestionConfigDB --> SQLite + +ModelRouter --> OpenAI : OpenAI SDK +ModelRouter --> AzureOpenAI +ModelRouter --> Ollama +ModelRouter --> vLLM +ModelRouter --> OpenRouter +SituationLLM --> FastModel +AuthManager --> OpenAI +AuthManager --> AzureOpenAI +AuthManager --> OpenRouter +Telemetry --> LogsMetrics + +' ========== 团队职责 ========== +DevEngineer --> Frontend : 负责 +DevEngineer --> IPCBridge : 负责 +DevEngineer --> MainProcess : 负责 + +AlgoEngineer ..> LLMCloud : 部署调优 +AlgoEngineer ..> LLMService : Prompt设计 + +' ========== 图例 ========== +legend right + |= 颜色 |= 含义 | + |<#E8F5E9>| 前端层 Renderer | + |<#FFF3E0>| IPC 桥接 | + |<#E3F2FD>| 主进程 Main | + |<#F3E5F5>| 外部 LLM 服务 | + |<#F0F4C3>| 外部记忆 Sidecar | + |<#FFFDE7>| 可观测性 | +endlegend + +@enduml diff --git a/desktop/memory-service/README.md b/desktop/memory-service/README.md new file mode 100644 index 0000000..660545b --- /dev/null +++ b/desktop/memory-service/README.md @@ -0,0 +1,77 @@ +# Structured Memory Service (Sidecar) + +面向 LiveGalGame 的轻量级画像 / 事件侧车服务,结构化存储、无默认向量召回,供桌面端通过 `MEMORY_API_BASE_URL` 访问,遵从 Model First 原则。 + +## 功能 +- `GET /profiles`:按 user/project/topic/sub_topic/tag/time 过滤画像 +- `POST /profiles`:新增/更新/追加画像(mode: add|update|append|replace) +- `GET /events`:按 user/project/tag/time 查询事件 +- `POST /events`:写入事件(含 profile_delta) +- `GET /health`:健康检查 + +## 目录 +- `pyproject.toml`:依赖由 uv 管理 +- `main.py`:入口,启动 uvicorn 加载 app +- `app/`:模块化代码 + - `db.py`:引擎 & session + - `models.py`:SQLModel 定义(Profile/Event) + - `schemas.py`:Pydantic I/O 模型 + - `crud/`:业务操作拆分 + - `routes/`:FastAPI 路由 + - `__init__.py`:create_app 注册路由 + +## 快速启动(使用 uv) +```bash +cd desktop/memory-service +uv venv +source .venv/bin/activate # Windows: .\.venv\Scripts\activate +uv sync +uv run main.py # 默认 0.0.0.0:8000 +``` + +可通过环境变量调整: +- `PORT`:端口,默认 8000 +- `MEMORY_DB_PATH`:SQLite 路径,默认 ./memory.db + +## 与桌面端联调 +1) 启动本服务:`uv run main.py` +2) 启动 Electron 应用前设置:`export MEMORY_API_BASE_URL=http://127.0.0.1:8000/` +3) 渲染层可调用: + - `window.electronAPI.memoryQueryProfiles({ userId, projectId, topic, sub_topic, tag, time_from, time_to, limit })` + - `window.electronAPI.memoryQueryEvents({ userId, projectId, tag, time_from, time_to, limit })` + +## 发行版建议(自动配置最佳实践) +首选方案:将 Memory Service 打成独立可执行文件并随 Electron 分发,启动时自动托管,用户无需安装 Python。 + +推荐步骤: +1. 使用 PyInstaller 生成单文件可执行(示例): + ```bash + cd desktop/memory-service + uv venv && source .venv/bin/activate + uv sync + pyinstaller --onefile --name memory-service main.py + ``` + 产物位于 `dist/memory-service`(macOS arm64 约 20–40 MB)。 + +2. 在 Electron 主进程启动时: + - 检测 `resources/memory-service/memory-service` 是否存在;若无则解压/复制。 + - 选取空闲端口(如 18000+随机),`spawn` 可执行并设置 `MEMORY_API_BASE_URL=http://127.0.0.1:/` 后再创建窗口。 + - 调用 `/health` 探活,进程退出时清理。 + +3. macOS 发布需 codesign/notarize;未签名时用户需“右键打开”绕过 Gatekeeper。 + +4. 开发者仍可用 uv 启动,保持兼容。 + +## 设计要点 +- 结构化过滤为主,不依赖向量检索;需要向量召回时可另行扩展。 +- 简单 SQLite 持久化,便于本地开发;如需多实例或持久化升级,可替换为 Postgres,修改 `create_engine` 连接串即可。 +- 默认 80% 逻辑交给模型:侧车只提供 CRUD 与过滤,不做复杂合并;合并策略留给上层模型或额外的 profile_merger 服务。 + +## 开发/测试便捷命令(可选) +```bash +# 本地运行 +uv run main.py + +# 调试请求 +curl 'http://127.0.0.1:8000/health' +``` diff --git a/desktop/memory-service/app/__init__.py b/desktop/memory-service/app/__init__.py new file mode 100644 index 0000000..0d3bd5b --- /dev/null +++ b/desktop/memory-service/app/__init__.py @@ -0,0 +1,23 @@ +from fastapi import FastAPI + +from .routes.event_routes import router as event_router +from .routes.profile_routes import router as profile_router +from .routes.health import router as health_router +from .db import init_db + + +def create_app() -> FastAPI: + app = FastAPI(title="Structured Memory Service", version="0.1.0") + + # 初始化数据库(建表) + init_db() + + app.include_router(health_router) + app.include_router(profile_router, prefix="") + app.include_router(event_router, prefix="") + + return app + + +app = create_app() + diff --git a/desktop/memory-service/app/crud/__init__.py b/desktop/memory-service/app/crud/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/desktop/memory-service/app/crud/events.py b/desktop/memory-service/app/crud/events.py new file mode 100644 index 0000000..1071351 --- /dev/null +++ b/desktop/memory-service/app/crud/events.py @@ -0,0 +1,57 @@ +from typing import List, Optional + +from sqlmodel import Session, select + +from ..models import Event +from .profiles import list_to_csv + + +def csv_to_list(text: Optional[str]) -> List[str]: + if not text: + return [] + return [i.strip() for i in text.split(",") if i.strip()] + + +def query_events( + session: Session, + *, + user_id: str, + project_id: Optional[str] = None, + topics: Optional[List[str]] = None, + tags: Optional[List[str]] = None, + time_from: Optional[int] = None, + time_to: Optional[int] = None, + limit: int = 50, +): + stmt = select(Event) + if user_id: + stmt = stmt.where(Event.user_id == user_id) + if project_id: + stmt = stmt.where(Event.project_id == project_id) + if topics: + for t in topics: + stmt = stmt.where(Event.profile_delta.contains(t)) + if tags: + for t in tags: + stmt = stmt.where(Event.event_tags.contains(t)) + if time_from: + stmt = stmt.where(Event.timestamp >= time_from) + if time_to: + stmt = stmt.where(Event.timestamp <= time_to) + stmt = stmt.order_by(Event.timestamp.desc()).limit(limit) + return session.exec(stmt).all() + + +def create_event(session: Session, payload, now_ms_fn) -> Event: + event = Event( + user_id=payload.user_id, + project_id=payload.project_id, + event_tip=payload.event_tip, + event_tags=list_to_csv(payload.event_tags), + profile_delta=(payload.profile_delta and str(payload.profile_delta)) or None, + timestamp=payload.timestamp or now_ms_fn(), + ) + session.add(event) + session.commit() + session.refresh(event) + return event diff --git a/desktop/memory-service/app/crud/profiles.py b/desktop/memory-service/app/crud/profiles.py new file mode 100644 index 0000000..367d94d --- /dev/null +++ b/desktop/memory-service/app/crud/profiles.py @@ -0,0 +1,105 @@ +from typing import List, Optional + +from sqlmodel import Session, select + +from ..models import Profile, now_ms + + +def list_to_csv(items: Optional[List[str]]) -> Optional[str]: + if not items: + return None + return ",".join([i.strip() for i in items if i and i.strip()]) or None + + +def csv_to_list(text: Optional[str]) -> List[str]: + if not text: + return [] + return [i.strip() for i in text.split(",") if i.strip()] + + +def query_profiles( + session: Session, + *, + user_id: str, + project_id: Optional[str] = None, + topics: Optional[List[str]] = None, + sub_topics: Optional[List[str]] = None, + tags: Optional[List[str]] = None, + time_from: Optional[int] = None, + time_to: Optional[int] = None, + limit: int = 50, +): + stmt = select(Profile) + if user_id: + stmt = stmt.where(Profile.user_id == user_id) + if project_id: + stmt = stmt.where(Profile.project_id == project_id) + if topics: + stmt = stmt.where(Profile.topic.in_(topics)) + if sub_topics: + stmt = stmt.where(Profile.sub_topic.in_(sub_topics)) + if tags: + for t in tags: + stmt = stmt.where(Profile.tags.contains(t)) + if time_from: + stmt = stmt.where(Profile.updated_at >= time_from) + if time_to: + stmt = stmt.where(Profile.updated_at <= time_to) + stmt = stmt.order_by(Profile.updated_at.desc()).limit(limit) + return session.exec(stmt).all() + + +def upsert_profile(session: Session, payload) -> Profile: + stmt = ( + select(Profile) + .where(Profile.user_id == payload.user_id) + .where(Profile.topic == payload.topic) + .where(Profile.sub_topic == payload.sub_topic) + ) + if payload.project_id: + stmt = stmt.where(Profile.project_id == payload.project_id) + + existing = session.exec(stmt).first() + tags_csv = list_to_csv(payload.tags) + + if existing: + if payload.mode == "add": + new_profile = Profile( + user_id=payload.user_id, + project_id=payload.project_id, + topic=payload.topic, + sub_topic=payload.sub_topic, + content=payload.content, + tags=tags_csv, + ) + session.add(new_profile) + session.commit() + session.refresh(new_profile) + return new_profile + + if payload.mode in {"update", "replace"}: + existing.content = payload.content + elif payload.mode == "append": + sep = "; " if existing.content else "" + existing.content = f"{existing.content}{sep}{payload.content}" if existing.content else payload.content + + if tags_csv: + existing.tags = tags_csv + existing.updated_at = now_ms() + session.add(existing) + session.commit() + session.refresh(existing) + return existing + + new_profile = Profile( + user_id=payload.user_id, + project_id=payload.project_id, + topic=payload.topic, + sub_topic=payload.sub_topic, + content=payload.content, + tags=tags_csv, + ) + session.add(new_profile) + session.commit() + session.refresh(new_profile) + return new_profile diff --git a/desktop/memory-service/app/db.py b/desktop/memory-service/app/db.py new file mode 100644 index 0000000..942b676 --- /dev/null +++ b/desktop/memory-service/app/db.py @@ -0,0 +1,20 @@ +import os +from sqlmodel import SQLModel, create_engine, Session + + +DB_PATH = os.environ.get( + "MEMORY_DB_PATH", + os.path.abspath(os.path.join(os.path.dirname(__file__), "../memory.db")) +) +DATABASE_URL = f"sqlite:///{DB_PATH}" + +engine = create_engine(DATABASE_URL) + + +def init_db(): + SQLModel.metadata.create_all(engine) + + +def get_session(): + with Session(engine) as session: + yield session diff --git a/desktop/memory-service/app/models.py b/desktop/memory-service/app/models.py new file mode 100644 index 0000000..78fa68a --- /dev/null +++ b/desktop/memory-service/app/models.py @@ -0,0 +1,33 @@ +import time +import uuid +from typing import Optional + +from sqlmodel import Field, SQLModel + + +def now_ms() -> int: + return int(time.time() * 1000) + + +class Profile(SQLModel, table=True): + id: str = Field(default_factory=lambda: str(uuid.uuid4()), primary_key=True) + user_id: str = Field(index=True) + project_id: Optional[str] = Field(default=None, index=True) + topic: str = Field(index=True) + sub_topic: Optional[str] = Field(default=None, index=True) + content: str + tags: Optional[str] = Field(default=None, index=True) # comma-separated + updated_at: int = Field(default_factory=now_ms, index=True) + created_at: int = Field(default_factory=now_ms) + + +class Event(SQLModel, table=True): + id: str = Field(default_factory=lambda: str(uuid.uuid4()), primary_key=True) + user_id: str = Field(index=True) + project_id: Optional[str] = Field(default=None, index=True) + event_tip: str + event_tags: Optional[str] = Field(default=None, index=True) # comma-separated + profile_delta: Optional[str] = None # JSON string + timestamp: int = Field(default_factory=now_ms, index=True) + created_at: int = Field(default_factory=now_ms) + diff --git a/desktop/memory-service/app/routes/__init__.py b/desktop/memory-service/app/routes/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/desktop/memory-service/app/routes/event_routes.py b/desktop/memory-service/app/routes/event_routes.py new file mode 100644 index 0000000..5006506 --- /dev/null +++ b/desktop/memory-service/app/routes/event_routes.py @@ -0,0 +1,63 @@ +from typing import List, Optional + +from fastapi import APIRouter, Depends, Query +from sqlmodel import Session + +from ..crud import events as event_crud +from ..db import get_session +from ..models import now_ms +from ..schemas import EventCreate, EventRead + + +router = APIRouter() + + +@router.get("/events", response_model=List[EventRead]) +async def query_events( + user_id: str = Query(...), + project_id: Optional[str] = Query(None), + topic: Optional[List[str]] = Query(None), + tag: Optional[List[str]] = Query(None), + time_from: Optional[int] = Query(None), + time_to: Optional[int] = Query(None), + limit: int = Query(50, ge=1, le=500), + session: Session = Depends(get_session), +): + rows = event_crud.query_events( + session, + user_id=user_id, + project_id=project_id, + topics=topic, + tags=tag, + time_from=time_from, + time_to=time_to, + limit=limit, + ) + return [ + EventRead( + id=e.id, + user_id=e.user_id, + project_id=e.project_id, + event_tip=e.event_tip, + event_tags=event_crud.csv_to_list(e.event_tags), + profile_delta=e.profile_delta, + timestamp=e.timestamp, + created_at=e.created_at, + ) + for e in rows + ] + + +@router.post("/events", response_model=EventRead) +async def create_event(payload: EventCreate, session: Session = Depends(get_session)): + e = event_crud.create_event(session, payload, now_ms_fn=now_ms) + return EventRead( + id=e.id, + user_id=e.user_id, + project_id=e.project_id, + event_tip=e.event_tip, + event_tags=event_crud.csv_to_list(e.event_tags), + profile_delta=e.profile_delta, + timestamp=e.timestamp, + created_at=e.created_at, + ) diff --git a/desktop/memory-service/app/routes/health.py b/desktop/memory-service/app/routes/health.py new file mode 100644 index 0000000..be0c6c9 --- /dev/null +++ b/desktop/memory-service/app/routes/health.py @@ -0,0 +1,12 @@ +from fastapi import APIRouter + +from ..db import DB_PATH + + +router = APIRouter() + + +@router.get("/health") +async def health(): + return {"status": "ok", "db": DB_PATH} + diff --git a/desktop/memory-service/app/routes/profile_routes.py b/desktop/memory-service/app/routes/profile_routes.py new file mode 100644 index 0000000..b10d060 --- /dev/null +++ b/desktop/memory-service/app/routes/profile_routes.py @@ -0,0 +1,67 @@ +from typing import List, Optional + +from fastapi import APIRouter, Depends, Query +from sqlmodel import Session + +from ..crud import profiles as profile_crud +from ..db import get_session +from ..schemas import ProfileRead, ProfileUpsert + + +router = APIRouter() + + +@router.get("/profiles", response_model=List[ProfileRead]) +async def query_profiles( + user_id: str = Query(..., description="User ID"), + project_id: Optional[str] = Query(None), + topic: Optional[List[str]] = Query(None), + sub_topic: Optional[List[str]] = Query(None), + tag: Optional[List[str]] = Query(None), + time_from: Optional[int] = Query(None), + time_to: Optional[int] = Query(None), + limit: int = Query(50, ge=1, le=200), + session: Session = Depends(get_session), +): + rows = profile_crud.query_profiles( + session, + user_id=user_id, + project_id=project_id, + topics=topic, + sub_topics=sub_topic, + tags=tag, + time_from=time_from, + time_to=time_to, + limit=limit, + ) + return [ + ProfileRead( + id=p.id, + user_id=p.user_id, + project_id=p.project_id, + topic=p.topic, + sub_topic=p.sub_topic, + content=p.content, + tags=profile_crud.csv_to_list(p.tags), + updated_at=p.updated_at, + created_at=p.created_at, + ) + for p in rows + ] + + +@router.post("/profiles", response_model=ProfileRead) +async def upsert_profile(payload: ProfileUpsert, session: Session = Depends(get_session)): + p = profile_crud.upsert_profile(session, payload) + return ProfileRead( + id=p.id, + user_id=p.user_id, + project_id=p.project_id, + topic=p.topic, + sub_topic=p.sub_topic, + content=p.content, + tags=profile_crud.csv_to_list(p.tags), + updated_at=p.updated_at, + created_at=p.created_at, + ) + diff --git a/desktop/memory-service/app/schemas.py b/desktop/memory-service/app/schemas.py new file mode 100644 index 0000000..ca1c182 --- /dev/null +++ b/desktop/memory-service/app/schemas.py @@ -0,0 +1,54 @@ +from typing import List, Optional + +from pydantic import BaseModel, field_validator + + +class ProfileUpsert(BaseModel): + user_id: str + project_id: Optional[str] = None + topic: str + sub_topic: Optional[str] = None + content: str + tags: Optional[List[str]] = None + mode: str = "append" # add|update|append|replace + + @field_validator("mode") + @classmethod + def validate_mode(cls, v): + allowed = {"add", "update", "append", "replace"} + if v not in allowed: + raise ValueError(f"mode must be one of {allowed}") + return v + + +class EventCreate(BaseModel): + user_id: str + project_id: Optional[str] = None + event_tip: str + event_tags: Optional[List[str]] = None + profile_delta: Optional[dict] = None + timestamp: Optional[int] = None + + +class ProfileRead(BaseModel): + id: str + user_id: str + project_id: Optional[str] + topic: str + sub_topic: Optional[str] + content: str + tags: Optional[List[str]] + updated_at: int + created_at: int + + +class EventRead(BaseModel): + id: str + user_id: str + project_id: Optional[str] + event_tip: str + event_tags: Optional[List[str]] + profile_delta: Optional[str] + timestamp: int + created_at: int + diff --git a/desktop/memory-service/main.py b/desktop/memory-service/main.py new file mode 100644 index 0000000..8b8bf70 --- /dev/null +++ b/desktop/memory-service/main.py @@ -0,0 +1,19 @@ +import os + +import uvicorn + +from app import app + + +def run(): + uvicorn.run( + "app:app", + host=os.environ.get("HOST", "0.0.0.0"), + port=int(os.environ.get("PORT", 8000)), + reload=os.environ.get("RELOAD", "false").lower() == "true", + ) + + +if __name__ == "__main__": + run() + diff --git a/desktop/memory-service/pyproject.toml b/desktop/memory-service/pyproject.toml new file mode 100644 index 0000000..11f50be --- /dev/null +++ b/desktop/memory-service/pyproject.toml @@ -0,0 +1,18 @@ +[project] +name = "structured-memory-service" +version = "0.1.0" +description = "Lightweight structured profile/event memory sidecar for LiveGalGame" +requires-python = ">=3.10" +dependencies = [ + "fastapi==0.115.5", + "uvicorn==0.32.1", + "sqlmodel==0.0.22", + "python-multipart==0.0.9" +] + +[tool.uv] +dev-dependencies = [] + +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" diff --git a/desktop/package.json b/desktop/package.json index 1880f71..f68d49b 100644 --- a/desktop/package.json +++ b/desktop/package.json @@ -6,24 +6,24 @@ "main": "src/main.js", "scripts": { "predev": "node scripts/ensure-port-free.js", - "dev": "pnpm run predev && concurrently -k \"vite\" \"npm run dev:electron\"", - "dev:electron": "cross-env NODE_ENV=development electron . --enable-logging", + "dev": "chcp 65001 > nul && pnpm run predev && concurrently -k \"vite\" \"npm run dev:electron\"", + "dev:log": "node scripts/dev-log.js", + "dev:electron": "cross-env NODE_ENV=development PYTHONIOENCODING=utf-8 electron . --enable-logging", "dev:vite": "vite", "download-models": "node scripts/download-models.js", - "download-ggml-models": "node scripts/download-ggml-models.js", - "download-belle-whisper": "node scripts/download-belle-whisper-model.js", "update-asr-model": "node scripts/update-asr-model.js", - "setup-whisper-cpp": "bash scripts/setup-whisper-cpp.sh", "setup-funasr": "bash scripts/setup-funasr.sh", "build:native": "node-gyp rebuild --directory=src/native/system-audio-capture", "rebuild:better-sqlite3": "electron-rebuild -f -w better-sqlite3", "rebuild:native": "electron-rebuild -f -w system_audio_capture", "postinstall": "pnpm run rebuild:better-sqlite3", - "prebuild": "npm run download-ggml-models", - "build": "vite build && electron-builder", + "prepare:python": "node scripts/prepare-python-env.js", + "build:backend": "node scripts/build-backend.js", + "prebuild": "node scripts/prebuild.js", + "build": "npm run prebuild && vite build && electron-builder", "build:vite": "vite build", - "build:win": "vite build && electron-builder --win", - "build:mac": "vite build && electron-builder --mac", + "build:win": "npm run prebuild && vite build && electron-builder --win", + "build:mac": "npm run prebuild && vite build && electron-builder --mac", "preview": "vite preview", "test:asr": "cross-env NODE_ENV=test ALL_PROXY=socks5h://127.0.0.1:13659 LIVEGALGAME_DB_PATH=./data/test-livegalgame.db electron tests/asr-pipeline.test.js", "test:settings-logs": "electron tests/test-settings-system-audio-logs.js", @@ -48,7 +48,6 @@ "autoprefixer": "^10.4.22", "concurrently": "^9.1.0", "cross-env": "^10.1.0", - "kill-port": "^2.0.1", "electron": "^35.7.5", "electron-builder": "^25.1.8", "postcss": "^8.5.6", @@ -58,40 +57,71 @@ }, "dependencies": { "@ricky0123/vad-web": "^0.0.29", + "@toon-format/toon": "^2.0.1", "@xenova/transformers": "^2.17.2", "better-sqlite3": "^12.4.1", "electron-audio-loopback": "^1.0.6", "electron-store": "^8.2.0", "node-addon-api": "^7.0.0", "openai": "^6.9.0", + "kill-port": "^2.0.1", + "portfinder": "^1.0.32", "react": "^18.3.1", "react-dom": "^18.3.1", "react-router-dom": "^6.28.0", "sharp": "0.34.5", + "tree-kill": "^1.2.2", "ws": "^8.18.3" }, "build": { "appId": "com.livegalgame.desktop", "productName": "LiveGalGame Desktop", "directories": { - "output": "dist" + "output": "release" }, "files": [ "dist/renderer/**/*", + "data/**/*", "src/main.js", "src/preload.js", - "src/db/**/*", + "src/core/**/*", "src/asr/**/*", + "src/shared/**/*", + "src/utils/**/*", + "src/db/**/*", "src/native/**/*", + "backend/**/*", "models/**/*", - "third_party/whisper.cpp/examples/addon.node/build/**/*", "package.json" ], + "asarUnpack": [ + "backend/asr/**/*.py", + "backend/asr/**/*.json", + "backend/asr/**/*.bin" + ], + "extraResources": [ + { + "from": "data", + "to": "data" + }, + { + "from": "backend/dist/asr-backend", + "to": "backend/asr-backend", + "filter": [ + "**/*" + ] + } + ], "mac": { - "category": "public.app-category.productivity" + "category": "public.app-category.productivity", + "target": "dmg" }, "win": { "target": "nsis" + }, + "nsis": { + "oneClick": false, + "allowToChangeInstallationDirectory": true } } } \ No newline at end of file diff --git a/desktop/pnpm-lock.yaml b/desktop/pnpm-lock.yaml index 098973d..4c3b171 100644 --- a/desktop/pnpm-lock.yaml +++ b/desktop/pnpm-lock.yaml @@ -11,6 +11,9 @@ importers: '@ricky0123/vad-web': specifier: ^0.0.29 version: 0.0.29 + '@toon-format/toon': + specifier: ^2.0.1 + version: 2.1.0 '@xenova/transformers': specifier: ^2.17.2 version: 2.17.2 @@ -23,12 +26,18 @@ importers: electron-store: specifier: ^8.2.0 version: 8.2.0 + kill-port: + specifier: ^2.0.1 + version: 2.0.1 node-addon-api: specifier: ^7.0.0 version: 7.1.1 openai: specifier: ^6.9.0 version: 6.9.1(ws@8.18.3) + portfinder: + specifier: ^1.0.32 + version: 1.0.38 react: specifier: ^18.3.1 version: 18.3.1 @@ -41,6 +50,9 @@ importers: sharp: specifier: 0.34.5 version: 0.34.5 + tree-kill: + specifier: ^1.2.2 + version: 1.2.2 ws: specifier: ^8.18.3 version: 8.18.3 @@ -75,9 +87,6 @@ importers: electron-builder: specifier: ^25.1.8 version: 25.1.8(electron-builder-squirrel-windows@25.1.8) - kill-port: - specifier: ^2.0.1 - version: 2.0.1 postcss: specifier: ^8.5.6 version: 8.5.6 @@ -784,6 +793,9 @@ packages: resolution: {integrity: sha512-4BAffykYOgO+5nzBWYwE3W90sBgLJoUPRWWcL8wlyiM8IB8ipJz3UMJ9KXQd1RKQXpKp8Tutn80HZtWsu2u76w==} engines: {node: '>=10'} + '@toon-format/toon@2.1.0': + resolution: {integrity: sha512-JwWptdF5eOA0HaQxbKAzkpQtR4wSWTEfDlEy/y3/4okmOAX1qwnpLZMmtEWr+ncAhTTY1raCKH0kteHhSXnQqg==} + '@tootallnate/once@2.0.0': resolution: {integrity: sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==} engines: {node: '>= 10'} @@ -2310,6 +2322,10 @@ packages: resolution: {integrity: sha512-uysumyrvkUX0rX/dEVqt8gC3sTBzd4zoWfLeS29nb53imdaXVvLINYXTI2GNqzaMuvacNx4uJQ8+b3zXR0pkgQ==} engines: {node: '>=10.4.0'} + portfinder@1.0.38: + resolution: {integrity: sha512-rEwq/ZHlJIKw++XtLAO8PPuOQA/zaPJOZJ37BVuN97nLpMJeuDVLVGRwbFoBgLudgdTMP2hdRJP++H+8QOA3vg==} + engines: {node: '>= 10.12'} + postcss-import@15.1.0: resolution: {integrity: sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==} engines: {node: '>=14.0.0'} @@ -3558,6 +3574,8 @@ snapshots: dependencies: defer-to-connect: 2.0.1 + '@toon-format/toon@2.1.0': {} + '@tootallnate/once@2.0.0': {} '@types/babel__core@7.20.5': @@ -5288,6 +5306,13 @@ snapshots: base64-js: 1.5.1 xmlbuilder: 15.1.1 + portfinder@1.0.38: + dependencies: + async: 3.2.6 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + postcss-import@15.1.0(postcss@8.5.6): dependencies: postcss: 8.5.6 diff --git a/desktop/pnpm-workspace.yaml b/desktop/pnpm-workspace.yaml index 6bc03d6..ad058b4 100644 --- a/desktop/pnpm-workspace.yaml +++ b/desktop/pnpm-workspace.yaml @@ -1,3 +1,5 @@ +packages: + - . ignoredBuiltDependencies: - better-sqlite3 - electron diff --git a/desktop/requirements.txt b/desktop/requirements.txt new file mode 100644 index 0000000..9181e86 --- /dev/null +++ b/desktop/requirements.txt @@ -0,0 +1,18 @@ +funasr-onnx==0.4.1 +soundfile>=0.12.1 +numpy>=1.26.4,<2 +huggingface_hub>=0.20.0 +requests[socks]>=2.31.0 +httpx[socks]>=0.27.0 +fastapi>=0.115.0 +uvicorn[standard]>=0.30.0 +websockets>=12.0 +pyinstaller>=6.3.0 +python-multipart>=0.0.9 +onnxruntime==1.21.1 + +# FunASR ONNX 运行时依赖(funasr_onnx 内部会 import) +jieba>=0.42.1 +torch +modelscope + diff --git a/desktop/scripts/build-backend.js b/desktop/scripts/build-backend.js new file mode 100644 index 0000000..3bba8d3 --- /dev/null +++ b/desktop/scripts/build-backend.js @@ -0,0 +1,115 @@ +/** + * Build Python backend (FastAPI + workers) via PyInstaller. + * - 打包 main.py 为主入口 (asr-backend) + * - 同时打包每个 worker 为独立可执行文件 (asr-funasr-worker) + * - Windows: onefile exe + * - macOS/Linux: onedir + */ +import fs from 'fs'; +import { execSync } from 'child_process'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); +const projectRoot = path.resolve(__dirname, '..'); + +function resolvePython() { + // 优先显式传入的 ASR_PYTHON_PATH(CI 已指向 python-env) + if (process.env.ASR_PYTHON_PATH) { + return process.env.ASR_PYTHON_PATH; + } + if (process.env.PYTHON) { + return process.env.PYTHON; + } + // 尝试使用项目内的 python-env + const venvPy = process.platform === 'win32' + ? path.join(projectRoot, 'python-env', 'Scripts', 'python.exe') + : path.join(projectRoot, 'python-env', 'bin', 'python3'); + if (fs.existsSync(venvPy)) { + return venvPy; + } + return process.platform === 'win32' ? 'python' : 'python3'; +} + +const pythonCmd = resolvePython(); + +const backendDir = path.join(projectRoot, 'backend'); +const asrDir = path.join(backendDir, 'asr'); +const distDir = path.join(backendDir, 'dist'); +const buildDir = path.join(backendDir, 'build'); +const entryFile = path.join(backendDir, 'main.py'); +const isWin = process.platform === 'win32'; + +function run(cmd) { + execSync(cmd, { + stdio: 'inherit', + cwd: projectRoot, + env: { + ...process.env, + // 解决 macOS 上 PyInstaller 分析 torch 时 OpenMP 库冲突问题 + // OMP: Error #15: Initializing libomp.dylib, but found libomp.dylib already initialized. + KMP_DUPLICATE_LIB_OK: 'TRUE', + }, + }); +} + +function ensureDirs() { + [backendDir, distDir, buildDir].forEach((dir) => fs.mkdirSync(dir, { recursive: true })); +} + +function main() { + console.log(`[build-backend] using python: ${pythonCmd}`); + console.log(`[build-backend] entry: ${entryFile}`); + ensureDirs(); + + // 单入口打包:仅打包 main.py,强制使用 onedir(避免 onefile 重复解包体积) + console.log('[build-backend] Step 1: Building asr-backend (single onedir) ...'); + + const dataSep = isWin ? ';' : ':'; // PyInstaller add-data 分隔符 + const mainArgs = [ + `"${pythonCmd}"`, + '-m PyInstaller', + '--clean', + '-y', + '--name asr-backend', + `--distpath "${distDir}"`, + `--workpath "${buildDir}"`, + // 打包 asr 目录,便于运行时子进程直接调用 python 脚本(不再构建独立 worker 可执行文件) + `--add-data "${asrDir}${dataSep}asr"`, + // 隐式依赖收集:确保 funasr_onnx 等在主包中一次性收集 + '--collect-submodules funasr_onnx', + '--collect-submodules jieba', + '--collect-submodules ctranslate2', + '--collect-submodules tokenizers', + '--collect-submodules sentencepiece', + '--collect-all jieba', + '--collect-all ctranslate2', + '--collect-all tokenizers', + '--collect-all sentencepiece', + '--collect-all numpy', + '--hidden-import funasr_onnx', + '--hidden-import jieba', + ]; + + // 统一使用 onedir,避免 onefile 的压缩/解压开销 + const mainModeArgs = ['--onedir']; + const mainCmd = [...mainArgs, ...mainModeArgs, `"${entryFile}"`].join(' '); + console.log(`[build-backend] PyInstaller cmd: ${mainCmd}`); + run(mainCmd); + + // 输出列表 + console.log('[build-backend] Listing final artifacts:'); + const targetDir = path.join(distDir, 'asr-backend'); + if (fs.existsSync(targetDir)) { + const files = fs.readdirSync(targetDir); + files.forEach((f) => console.log(` - ${f}`)); + } else { + console.warn(`[build-backend] targetDir not found: ${targetDir}`); + } + + console.log('[build-backend] done'); +} + +main(); + diff --git a/desktop/scripts/complex-test-data.sql b/desktop/scripts/complex-test-data.sql new file mode 100644 index 0000000..b6f8b4f --- /dev/null +++ b/desktop/scripts/complex-test-data.sql @@ -0,0 +1,476 @@ +-- Test Data for Conversation Review +PRAGMA foreign_keys = ON; +INSERT OR IGNORE INTO characters (id, name, nickname, relationship_label, avatar_color, affinity, created_at, updated_at) +VALUES ('complex-test-girl', '林舒涵', '舒涵', '青梅竹马', '#ff85c0', 65, 1766477258405, 1766477258405); +INSERT INTO conversations (id, character_id, title, date, created_at, updated_at) +VALUES ('conv-complex-1766477258405', 'complex-test-girl', '关于未来的深夜长谈', 1766470058405, 1766477258405, 1766477258405); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-0', 'conv-complex-1766477258405', 'character', '在忙吗?', 1766470069551, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-1', 'conv-complex-1766477258405', 'user', '刚忙完,正打算刷会儿手机,怎么啦?', 1766470079354, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-2', 'conv-complex-1766477258405', 'character', '没什么,今天去吃了你说的那家甜品店,草莓大福真的超好吃!', 1766470087618, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-3', 'conv-complex-1766477258405', 'user', '哈哈我就说吧,那家店是老字号了。', 1766470100248, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-4', 'conv-complex-1766477258405', 'character', '明天天气好像不太好呢。', 1766470110971, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-5', 'conv-complex-1766477258405', 'user', '是吗?我看预报说是多云。', 1766470116106, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-6', 'conv-complex-1766477258405', 'character', '但我刚才看又有雷阵雨预警了。', 1766470127422, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-7', 'conv-complex-1766477258405', 'user', '那出门得记得带伞。', 1766470146835, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-8', 'conv-complex-1766477258405', 'character', '嗯我知道啦。', 1766470158588, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-9', 'conv-complex-1766477258405', 'character', '明天天气好像不太好呢。', 1766470170284, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-10', 'conv-complex-1766477258405', 'user', '是吗?我看预报说是多云。', 1766470189391, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-11', 'conv-complex-1766477258405', 'character', '但我刚才看又有雷阵雨预警了。', 1766470202595, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-12', 'conv-complex-1766477258405', 'user', '那出门得记得带伞。', 1766470215800, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-13', 'conv-complex-1766477258405', 'character', '嗯我知道啦。', 1766470230435, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-14', 'conv-complex-1766477258405', 'character', '明天天气好像不太好呢。', 1766470240390, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-15', 'conv-complex-1766477258405', 'user', '是吗?我看预报说是多云。', 1766470252201, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-16', 'conv-complex-1766477258405', 'character', '但我刚才看又有雷阵雨预警了。', 1766470263707, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-17', 'conv-complex-1766477258405', 'user', '那出门得记得带伞。', 1766470281846, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-18', 'conv-complex-1766477258405', 'character', '嗯我知道啦。', 1766470294496, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-19', 'conv-complex-1766477258405', 'character', '明天天气好像不太好呢。', 1766470312833, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-20', 'conv-complex-1766477258405', 'user', '是吗?我看预报说是多云。', 1766470318700, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-21', 'conv-complex-1766477258405', 'character', '但我刚才看又有雷阵雨预警了。', 1766470334323, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-22', 'conv-complex-1766477258405', 'user', '那出门得记得带伞。', 1766470349788, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-23', 'conv-complex-1766477258405', 'character', '嗯我知道啦。', 1766470365875, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-24', 'conv-complex-1766477258405', 'character', '明天天气好像不太好呢。', 1766470385172, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-25', 'conv-complex-1766477258405', 'user', '是吗?我看预报说是多云。', 1766470400246, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-26', 'conv-complex-1766477258405', 'character', '但我刚才看又有雷阵雨预警了。', 1766470410964, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-27', 'conv-complex-1766477258405', 'user', '那出门得记得带伞。', 1766470423597, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-28', 'conv-complex-1766477258405', 'character', '嗯我知道啦。', 1766470435533, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-29', 'conv-complex-1766477258405', 'character', '明天天气好像不太好呢。', 1766470445801, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-30', 'conv-complex-1766477258405', 'user', '是吗?我看预报说是多云。', 1766470465068, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-31', 'conv-complex-1766477258405', 'character', '但我刚才看又有雷阵雨预警了。', 1766470480671, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-32', 'conv-complex-1766477258405', 'user', '那出门得记得带伞。', 1766470487624, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-33', 'conv-complex-1766477258405', 'character', '嗯我知道啦。', 1766470503260, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-34', 'conv-complex-1766477258405', 'character', '明天天气好像不太好呢。', 1766470514240, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-35', 'conv-complex-1766477258405', 'user', '是吗?我看预报说是多云。', 1766470528207, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-36', 'conv-complex-1766477258405', 'character', '但我刚才看又有雷阵雨预警了。', 1766470537001, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-37', 'conv-complex-1766477258405', 'user', '那出门得记得带伞。', 1766470546307, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-38', 'conv-complex-1766477258405', 'character', '嗯我知道啦。', 1766470558467, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-39', 'conv-complex-1766477258405', 'character', '明天天气好像不太好呢。', 1766470572307, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-40', 'conv-complex-1766477258405', 'character', '对了,你还记得咱们大二那年去洱海骑行吗?', 1766470581201, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-41', 'conv-complex-1766477258405', 'user', '当然记得,那天我晒得跟黑炭一样。', 1766470601050, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-42', 'conv-complex-1766477258405', 'character', '你还好意思说,我那天可是提醒过你要涂防晒的。', 1766470610118, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-43', 'conv-complex-1766477258405', 'character', '其实我偶尔还会翻出那时候的照片看,大家都好青涩。 (2)', 1766470623017, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-44', 'conv-complex-1766477258405', 'user', '那时候还没这么多烦心事,每天就想着晚上去哪儿吃。 (3)', 1766470636319, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-45', 'conv-complex-1766477258405', 'character', '那时候咱们真是有精力,骑了大半个洱海。 (4)', 1766470647783, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-46', 'conv-complex-1766477258405', 'user', '现在的我估计骑五公里就要求饶了。 (5)', 1766470657886, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-47', 'conv-complex-1766477258405', 'character', '其实我偶尔还会翻出那时候的照片看,大家都好青涩。 (6)', 1766470668037, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-48', 'conv-complex-1766477258405', 'user', '那时候还没这么多烦心事,每天就想着晚上去哪儿吃。 (7)', 1766470683980, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-49', 'conv-complex-1766477258405', 'character', '那时候咱们真是有精力,骑了大半个洱海。 (8)', 1766470691379, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-50', 'conv-complex-1766477258405', 'user', '现在的我估计骑五公里就要求饶了。 (9)', 1766470699533, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-51', 'conv-complex-1766477258405', 'character', '其实我偶尔还会翻出那时候的照片看,大家都好青涩。 (10)', 1766470709264, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-52', 'conv-complex-1766477258405', 'user', '那时候还没这么多烦心事,每天就想着晚上去哪儿吃。 (11)', 1766470725094, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-53', 'conv-complex-1766477258405', 'character', '那时候咱们真是有精力,骑了大半个洱海。 (12)', 1766470737026, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-54', 'conv-complex-1766477258405', 'user', '现在的我估计骑五公里就要求饶了。 (13)', 1766470748614, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-55', 'conv-complex-1766477258405', 'character', '其实我偶尔还会翻出那时候的照片看,大家都好青涩。 (14)', 1766470758427, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-56', 'conv-complex-1766477258405', 'user', '那时候还没这么多烦心事,每天就想着晚上去哪儿吃。 (15)', 1766470771100, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-57', 'conv-complex-1766477258405', 'character', '那时候咱们真是有精力,骑了大半个洱海。 (16)', 1766470779544, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-58', 'conv-complex-1766477258405', 'user', '现在的我估计骑五公里就要求饶了。 (17)', 1766470787440, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-59', 'conv-complex-1766477258405', 'character', '其实我偶尔还会翻出那时候的照片看,大家都好青涩。 (18)', 1766470794224, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-60', 'conv-complex-1766477258405', 'user', '那时候还没这么多烦心事,每天就想着晚上去哪儿吃。 (19)', 1766470799720, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-61', 'conv-complex-1766477258405', 'character', '那时候咱们真是有精力,骑了大半个洱海。 (20)', 1766470806923, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-62', 'conv-complex-1766477258405', 'user', '现在的我估计骑五公里就要求饶了。 (21)', 1766470826541, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-63', 'conv-complex-1766477258405', 'character', '其实我偶尔还会翻出那时候的照片看,大家都好青涩。 (22)', 1766470844048, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-64', 'conv-complex-1766477258405', 'user', '那时候还没这么多烦心事,每天就想着晚上去哪儿吃。 (23)', 1766470860210, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-65', 'conv-complex-1766477258405', 'character', '那时候咱们真是有精力,骑了大半个洱海。 (24)', 1766470867690, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-66', 'conv-complex-1766477258405', 'user', '现在的我估计骑五公里就要求饶了。 (25)', 1766470883445, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-67', 'conv-complex-1766477258405', 'character', '其实我偶尔还会翻出那时候的照片看,大家都好青涩。 (26)', 1766470894587, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-68', 'conv-complex-1766477258405', 'user', '那时候还没这么多烦心事,每天就想着晚上去哪儿吃。 (27)', 1766470911137, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-69', 'conv-complex-1766477258405', 'character', '那时候咱们真是有精力,骑了大半个洱海。 (28)', 1766470917883, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-70', 'conv-complex-1766477258405', 'user', '现在的我估计骑五公里就要求饶了。 (29)', 1766470932091, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-71', 'conv-complex-1766477258405', 'character', '其实我偶尔还会翻出那时候的照片看,大家都好青涩。 (30)', 1766470937769, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-72', 'conv-complex-1766477258405', 'user', '那时候还没这么多烦心事,每天就想着晚上去哪儿吃。 (31)', 1766470946881, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-73', 'conv-complex-1766477258405', 'character', '那时候咱们真是有精力,骑了大半个洱海。 (32)', 1766470954175, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-74', 'conv-complex-1766477258405', 'user', '现在的我估计骑五公里就要求饶了。 (33)', 1766470964994, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-75', 'conv-complex-1766477258405', 'character', '其实我偶尔还会翻出那时候的照片看,大家都好青涩。 (34)', 1766470978215, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-76', 'conv-complex-1766477258405', 'user', '那时候还没这么多烦心事,每天就想着晚上去哪儿吃。 (35)', 1766470984326, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-77', 'conv-complex-1766477258405', 'character', '那时候咱们真是有精力,骑了大半个洱海。 (36)', 1766470996571, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-78', 'conv-complex-1766477258405', 'user', '现在的我估计骑五公里就要求饶了。 (37)', 1766471013888, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-79', 'conv-complex-1766477258405', 'character', '如果我们现在还能一起再去一次,你觉得会和以前感觉一样吗?', 1766471030671, 1); +INSERT INTO decision_points (id, conversation_id, anchor_message_id, created_at) VALUES ('dp-conv-complex-1766477258405-1', 'conv-complex-1766477258405', 'msg-conv-complex-1766477258405-79', 1766471030771); +INSERT INTO suggestion_batches (id, decision_point_id, trigger, reason, created_at) VALUES ('batch-conv-complex-1766477258405-1', 'dp-conv-complex-1766477258405-1', 'manual', 'user_silence', 1766471030871); +INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-conv-complex-1766477258405-1A', 'conv-complex-1766477258405', 'dp-conv-complex-1766477258405-1', 'batch-conv-complex-1766477258405-1', 0, '怀旧浪漫', '肯定不一样啊,毕竟现在的我,比那时候更珍惜和你在一起的时间了。', 5, 1766471030971); +INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-conv-complex-1766477258405-1B', 'conv-complex-1766477258405', 'dp-conv-complex-1766477258405-1', 'batch-conv-complex-1766477258405-1', 1, '幽默回应', '感觉肯定不一样,这次我得租个电动车,坚决不脚踩了!', 2, 1766471030971); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-80', 'conv-complex-1766477258405', 'user', '肯定不一样啊,毕竟现在的我,比那时候更珍惜和你在一起的时间了。', 1766471047842, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-81', 'conv-complex-1766477258405', 'character', '突然这么感性,我都不知道该怎么接了……', 1766471052925, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-82', 'conv-complex-1766477258405', 'character', '不过说真的,最近我一直在想,现在的这份工作真的适合我吗?', 1766471060510, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-83', 'conv-complex-1766477258405', 'user', '怎么突然想这个了?压力太大了吗? (0)', 1766471071918, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-84', 'conv-complex-1766477258405', 'character', '倒也不是压力,就是觉得每天都在重复,找不到成就感。 (1)', 1766471088308, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-85', 'conv-complex-1766477258405', 'user', '职场倦怠其实挺普遍的。 (2)', 1766471099893, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-86', 'conv-complex-1766477258405', 'character', '但我怕自己一直在这个舒适圈呆下去会废掉。 (3)', 1766471105204, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-87', 'conv-complex-1766477258405', 'user', '怎么突然想这个了?压力太大了吗? (4)', 1766471110419, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-88', 'conv-complex-1766477258405', 'character', '倒也不是压力,就是觉得每天都在重复,找不到成就感。 (5)', 1766471123180, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-89', 'conv-complex-1766477258405', 'user', '职场倦怠其实挺普遍的。 (6)', 1766471128494, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-90', 'conv-complex-1766477258405', 'character', '但我怕自己一直在这个舒适圈呆下去会废掉。 (7)', 1766471140924, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-91', 'conv-complex-1766477258405', 'user', '怎么突然想这个了?压力太大了吗? (8)', 1766471160206, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-92', 'conv-complex-1766477258405', 'character', '倒也不是压力,就是觉得每天都在重复,找不到成就感。 (9)', 1766471172316, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-93', 'conv-complex-1766477258405', 'user', '职场倦怠其实挺普遍的。 (10)', 1766471189979, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-94', 'conv-complex-1766477258405', 'character', '但我怕自己一直在这个舒适圈呆下去会废掉。 (11)', 1766471195926, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-95', 'conv-complex-1766477258405', 'user', '怎么突然想这个了?压力太大了吗? (12)', 1766471206216, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-96', 'conv-complex-1766477258405', 'character', '倒也不是压力,就是觉得每天都在重复,找不到成就感。 (13)', 1766471222526, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-97', 'conv-complex-1766477258405', 'user', '职场倦怠其实挺普遍的。 (14)', 1766471232987, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-98', 'conv-complex-1766477258405', 'character', '但我怕自己一直在这个舒适圈呆下去会废掉。 (15)', 1766471247143, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-99', 'conv-complex-1766477258405', 'user', '怎么突然想这个了?压力太大了吗? (16)', 1766471254982, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-100', 'conv-complex-1766477258405', 'character', '倒也不是压力,就是觉得每天都在重复,找不到成就感。 (17)', 1766471273983, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-101', 'conv-complex-1766477258405', 'user', '职场倦怠其实挺普遍的。 (18)', 1766471281822, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-102', 'conv-complex-1766477258405', 'character', '但我怕自己一直在这个舒适圈呆下去会废掉。 (19)', 1766471287397, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-103', 'conv-complex-1766477258405', 'user', '怎么突然想这个了?压力太大了吗? (20)', 1766471298375, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-104', 'conv-complex-1766477258405', 'character', '倒也不是压力,就是觉得每天都在重复,找不到成就感。 (21)', 1766471305089, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-105', 'conv-complex-1766477258405', 'user', '职场倦怠其实挺普遍的。 (22)', 1766471310220, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-106', 'conv-complex-1766477258405', 'character', '但我怕自己一直在这个舒适圈呆下去会废掉。 (23)', 1766471316998, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-107', 'conv-complex-1766477258405', 'user', '怎么突然想这个了?压力太大了吗? (24)', 1766471333520, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-108', 'conv-complex-1766477258405', 'character', '倒也不是压力,就是觉得每天都在重复,找不到成就感。 (25)', 1766471343492, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-109', 'conv-complex-1766477258405', 'user', '职场倦怠其实挺普遍的。 (26)', 1766471359762, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-110', 'conv-complex-1766477258405', 'character', '但我怕自己一直在这个舒适圈呆下去会废掉。 (27)', 1766471365112, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-111', 'conv-complex-1766477258405', 'user', '怎么突然想这个了?压力太大了吗? (28)', 1766471382602, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-112', 'conv-complex-1766477258405', 'character', '倒也不是压力,就是觉得每天都在重复,找不到成就感。 (29)', 1766471399484, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-113', 'conv-complex-1766477258405', 'user', '职场倦怠其实挺普遍的。 (30)', 1766471419061, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-114', 'conv-complex-1766477258405', 'character', '但我怕自己一直在这个舒适圈呆下去会废掉。 (31)', 1766471428288, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-115', 'conv-complex-1766477258405', 'user', '怎么突然想这个了?压力太大了吗? (32)', 1766471437642, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-116', 'conv-complex-1766477258405', 'character', '倒也不是压力,就是觉得每天都在重复,找不到成就感。 (33)', 1766471452998, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-117', 'conv-complex-1766477258405', 'user', '职场倦怠其实挺普遍的。 (34)', 1766471470941, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-118', 'conv-complex-1766477258405', 'character', '但我怕自己一直在这个舒适圈呆下去会废掉。 (35)', 1766471486310, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-119', 'conv-complex-1766477258405', 'user', '怎么突然想这个了?压力太大了吗? (36)', 1766471500159, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-120', 'conv-complex-1766477258405', 'character', '倒也不是压力,就是觉得每天都在重复,找不到成就感。 (37)', 1766471509379, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-121', 'conv-complex-1766477258405', 'character', '你说,我是不是该鼓起勇气去试试那个新项目的机会?', 1766471522987, 1); +INSERT INTO decision_points (id, conversation_id, anchor_message_id, created_at) VALUES ('dp-conv-complex-1766477258405-2', 'conv-complex-1766477258405', 'msg-conv-complex-1766477258405-121', 1766471523087); +INSERT INTO suggestion_batches (id, decision_point_id, trigger, reason, created_at) VALUES ('batch-conv-complex-1766477258405-2', 'dp-conv-complex-1766477258405-2', 'passive', 'topic_switch', 1766471523187); +INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-conv-complex-1766477258405-2A', 'conv-complex-1766477258405', 'dp-conv-complex-1766477258405-2', 'batch-conv-complex-1766477258405-2', 0, '理性支持', '如果那个项目对你的长期规划有帮助,确实值得一试。', 3, 1766471523287); +INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-conv-complex-1766477258405-2B', 'conv-complex-1766477258405', 'dp-conv-complex-1766477258405-2', 'batch-conv-complex-1766477258405-2', 1, '共情鼓励', '无论你做什么决定,我都会支持你的。想试就去试吧,别让自己遗憾。', 6, 1766471523287); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-122', 'conv-complex-1766477258405', 'user', '我觉得现在的生活节奏也挺好的,没必要把自己搞得那么累吧?', 1766471529812, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-123', 'conv-complex-1766477258405', 'character', '可是我想变得更好啊,你是在质疑我的上进心吗?', 1766471541187, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-124', 'conv-complex-1766477258405', 'user', '我不是那个意思,只是觉得健康和心情更重要。', 1766471548785, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-125', 'conv-complex-1766477258405', 'character', '但你总是试图在我想拼一把的时候泼冷水。 (0)', 1766471556505, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-126', 'conv-complex-1766477258405', 'user', '我只是不想看你每天只睡五小时。 (1)', 1766471572911, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-127', 'conv-complex-1766477258405', 'character', '那是我的选择,我觉得值得。 (2)', 1766471585002, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-128', 'conv-complex-1766477258405', 'user', '好吧,既然你这么坚持,我也没什么好说的了。 (3)', 1766471590133, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-129', 'conv-complex-1766477258405', 'character', '但你总是试图在我想拼一把的时候泼冷水。 (4)', 1766471610101, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-130', 'conv-complex-1766477258405', 'user', '我只是不想看你每天只睡五小时。 (5)', 1766471623711, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-131', 'conv-complex-1766477258405', 'character', '那是我的选择,我觉得值得。 (6)', 1766471631755, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-132', 'conv-complex-1766477258405', 'user', '好吧,既然你这么坚持,我也没什么好说的了。 (7)', 1766471642597, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-133', 'conv-complex-1766477258405', 'character', '但你总是试图在我想拼一把的时候泼冷水。 (8)', 1766471648067, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-134', 'conv-complex-1766477258405', 'user', '我只是不想看你每天只睡五小时。 (9)', 1766471667883, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-135', 'conv-complex-1766477258405', 'character', '那是我的选择,我觉得值得。 (10)', 1766471675948, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-136', 'conv-complex-1766477258405', 'user', '好吧,既然你这么坚持,我也没什么好说的了。 (11)', 1766471690592, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-137', 'conv-complex-1766477258405', 'character', '但你总是试图在我想拼一把的时候泼冷水。 (12)', 1766471698144, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-138', 'conv-complex-1766477258405', 'user', '我只是不想看你每天只睡五小时。 (13)', 1766471703644, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-139', 'conv-complex-1766477258405', 'character', '那是我的选择,我觉得值得。 (14)', 1766471716504, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-140', 'conv-complex-1766477258405', 'user', '好吧,既然你这么坚持,我也没什么好说的了。 (15)', 1766471733107, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-141', 'conv-complex-1766477258405', 'character', '但你总是试图在我想拼一把的时候泼冷水。 (16)', 1766471746390, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-142', 'conv-complex-1766477258405', 'user', '我只是不想看你每天只睡五小时。 (17)', 1766471761929, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-143', 'conv-complex-1766477258405', 'character', '那是我的选择,我觉得值得。 (18)', 1766471771757, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-144', 'conv-complex-1766477258405', 'user', '好吧,既然你这么坚持,我也没什么好说的了。 (19)', 1766471785213, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-145', 'conv-complex-1766477258405', 'character', '但你总是试图在我想拼一把的时候泼冷水。 (20)', 1766471800641, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-146', 'conv-complex-1766477258405', 'user', '我只是不想看你每天只睡五小时。 (21)', 1766471819558, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-147', 'conv-complex-1766477258405', 'character', '那是我的选择,我觉得值得。 (22)', 1766471828945, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-148', 'conv-complex-1766477258405', 'user', '好吧,既然你这么坚持,我也没什么好说的了。 (23)', 1766471845053, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-149', 'conv-complex-1766477258405', 'character', '但你总是试图在我想拼一把的时候泼冷水。 (24)', 1766471861115, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-150', 'conv-complex-1766477258405', 'user', '我只是不想看你每天只睡五小时。 (25)', 1766471877540, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-151', 'conv-complex-1766477258405', 'character', '那是我的选择,我觉得值得。 (26)', 1766471889214, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-152', 'conv-complex-1766477258405', 'user', '好吧,既然你这么坚持,我也没什么好说的了。 (27)', 1766471905421, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-153', 'conv-complex-1766477258405', 'character', '但你总是试图在我想拼一把的时候泼冷水。 (28)', 1766471917655, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-154', 'conv-complex-1766477258405', 'user', '我只是不想看你每天只睡五小时。 (29)', 1766471927744, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-155', 'conv-complex-1766477258405', 'character', '那是我的选择,我觉得值得。 (30)', 1766471933961, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-156', 'conv-complex-1766477258405', 'user', '好吧,既然你这么坚持,我也没什么好说的了。 (31)', 1766471942686, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-157', 'conv-complex-1766477258405', 'character', '但你总是试图在我想拼一把的时候泼冷水。 (32)', 1766471952431, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-158', 'conv-complex-1766477258405', 'user', '我只是不想看你每天只睡五小时。 (33)', 1766471970163, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-159', 'conv-complex-1766477258405', 'character', '那是我的选择,我觉得值得。 (34)', 1766471976164, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-160', 'conv-complex-1766477258405', 'user', '好吧,既然你这么坚持,我也没什么好说的了。 (35)', 1766471989710, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-161', 'conv-complex-1766477258405', 'character', '但你总是试图在我想拼一把的时候泼冷水。 (36)', 1766472005913, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-162', 'conv-complex-1766477258405', 'user', '我只是不想看你每天只睡五小时。 (37)', 1766472017002, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-163', 'conv-complex-1766477258405', 'character', '算了,感觉咱们讨论这个话题只会吵架,早点睡吧。', 1766472032313, 1); +INSERT INTO decision_points (id, conversation_id, anchor_message_id, created_at) VALUES ('dp-conv-complex-1766477258405-3', 'conv-complex-1766477258405', 'msg-conv-complex-1766477258405-163', 1766472032413); +INSERT INTO suggestion_batches (id, decision_point_id, trigger, reason, created_at) VALUES ('batch-conv-complex-1766477258405-3', 'dp-conv-complex-1766477258405-3', 'manual', 'conflict_detected', 1766472032513); +INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-conv-complex-1766477258405-3A', 'conv-complex-1766477258405', 'dp-conv-complex-1766477258405-3', 'batch-conv-complex-1766477258405-3', 0, '道歉服软', '对不起,我刚才说话语气可能重了点,其实我是担心你。', 8, 1766472032613); +INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-conv-complex-1766477258405-3B', 'conv-complex-1766477258405', 'dp-conv-complex-1766477258405-3', 'batch-conv-complex-1766477258405-3', 1, '冷静结束', '行吧,那确实都累了,早点休息。', -2, 1766472032613); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-164', 'conv-complex-1766477258405', 'user', '对不起舒涵,我刚才说话语气太生硬了,其实我只是看你最近压力大,很心疼你。', 1766472048221, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-165', 'conv-complex-1766477258405', 'character', '……我也知道你是关心我,刚才我也有点激动了。', 1766472056574, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-166', 'conv-complex-1766477258405', 'user', '抱歉哈,其实你的上进心一直是我最佩服你的地方。 (0)', 1766472071281, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-167', 'conv-complex-1766477258405', 'character', '真的吗?我总觉得自己做不到最好。 (1)', 1766472079442, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-168', 'conv-complex-1766477258405', 'user', '那是你对自己要求太高了。 (2)', 1766472093608, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-169', 'conv-complex-1766477258405', 'character', '听到你这么说,我感觉心情好多了。 (3)', 1766472111001, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-170', 'conv-complex-1766477258405', 'user', '抱歉哈,其实你的上进心一直是我最佩服你的地方。 (4)', 1766472118861, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-171', 'conv-complex-1766477258405', 'character', '真的吗?我总觉得自己做不到最好。 (5)', 1766472136213, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-172', 'conv-complex-1766477258405', 'user', '那是你对自己要求太高了。 (6)', 1766472151820, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-173', 'conv-complex-1766477258405', 'character', '听到你这么说,我感觉心情好多了。 (7)', 1766472167899, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-174', 'conv-complex-1766477258405', 'user', '抱歉哈,其实你的上进心一直是我最佩服你的地方。 (8)', 1766472177011, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-175', 'conv-complex-1766477258405', 'character', '真的吗?我总觉得自己做不到最好。 (9)', 1766472185130, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-176', 'conv-complex-1766477258405', 'user', '那是你对自己要求太高了。 (10)', 1766472203950, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-177', 'conv-complex-1766477258405', 'character', '听到你这么说,我感觉心情好多了。 (11)', 1766472211429, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-178', 'conv-complex-1766477258405', 'user', '抱歉哈,其实你的上进心一直是我最佩服你的地方。 (12)', 1766472225240, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-179', 'conv-complex-1766477258405', 'character', '真的吗?我总觉得自己做不到最好。 (13)', 1766472242869, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-180', 'conv-complex-1766477258405', 'user', '那是你对自己要求太高了。 (14)', 1766472251234, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-181', 'conv-complex-1766477258405', 'character', '听到你这么说,我感觉心情好多了。 (15)', 1766472256269, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-182', 'conv-complex-1766477258405', 'user', '抱歉哈,其实你的上进心一直是我最佩服你的地方。 (16)', 1766472263989, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-183', 'conv-complex-1766477258405', 'character', '真的吗?我总觉得自己做不到最好。 (17)', 1766472282936, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-184', 'conv-complex-1766477258405', 'user', '那是你对自己要求太高了。 (18)', 1766472290154, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-185', 'conv-complex-1766477258405', 'character', '听到你这么说,我感觉心情好多了。 (19)', 1766472302297, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-186', 'conv-complex-1766477258405', 'user', '抱歉哈,其实你的上进心一直是我最佩服你的地方。 (20)', 1766472321102, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-187', 'conv-complex-1766477258405', 'character', '真的吗?我总觉得自己做不到最好。 (21)', 1766472339535, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-188', 'conv-complex-1766477258405', 'user', '那是你对自己要求太高了。 (22)', 1766472352147, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-189', 'conv-complex-1766477258405', 'character', '听到你这么说,我感觉心情好多了。 (23)', 1766472357369, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-190', 'conv-complex-1766477258405', 'user', '抱歉哈,其实你的上进心一直是我最佩服你的地方。 (24)', 1766472376137, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-191', 'conv-complex-1766477258405', 'character', '真的吗?我总觉得自己做不到最好。 (25)', 1766472393710, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-192', 'conv-complex-1766477258405', 'user', '那是你对自己要求太高了。 (26)', 1766472408837, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-193', 'conv-complex-1766477258405', 'character', '听到你这么说,我感觉心情好多了。 (27)', 1766472425447, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-194', 'conv-complex-1766477258405', 'user', '抱歉哈,其实你的上进心一直是我最佩服你的地方。 (28)', 1766472434002, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-195', 'conv-complex-1766477258405', 'character', '真的吗?我总觉得自己做不到最好。 (29)', 1766472449521, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-196', 'conv-complex-1766477258405', 'user', '那是你对自己要求太高了。 (30)', 1766472467377, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-197', 'conv-complex-1766477258405', 'character', '听到你这么说,我感觉心情好多了。 (31)', 1766472485104, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-198', 'conv-complex-1766477258405', 'user', '抱歉哈,其实你的上进心一直是我最佩服你的地方。 (32)', 1766472490499, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-199', 'conv-complex-1766477258405', 'character', '真的吗?我总觉得自己做不到最好。 (33)', 1766472496421, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-200', 'conv-complex-1766477258405', 'user', '那是你对自己要求太高了。 (34)', 1766472508635, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-201', 'conv-complex-1766477258405', 'character', '听到你这么说,我感觉心情好多了。 (35)', 1766472523463, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-202', 'conv-complex-1766477258405', 'user', '抱歉哈,其实你的上进心一直是我最佩服你的地方。 (36)', 1766472531027, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-203', 'conv-complex-1766477258405', 'character', '真的吗?我总觉得自己做不到最好。 (37)', 1766472547703, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-204', 'conv-complex-1766477258405', 'character', '下个周末你有空吗?我请你吃饭,就当是赔罪啦。', 1766472558914, 1); +INSERT INTO decision_points (id, conversation_id, anchor_message_id, created_at) VALUES ('dp-conv-complex-1766477258405-4', 'conv-complex-1766477258405', 'msg-conv-complex-1766477258405-204', 1766472559014); +INSERT INTO suggestion_batches (id, decision_point_id, trigger, reason, created_at) VALUES ('batch-conv-complex-1766477258405-4', 'dp-conv-complex-1766477258405-4', 'passive', 'positive_vibe', 1766472559114); +INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-conv-complex-1766477258405-4A', 'conv-complex-1766477258405', 'dp-conv-complex-1766477258405-4', 'batch-conv-complex-1766477258405-4', 0, '欣然接受', '好啊,那我也要吃草莓大福!', 4, 1766472559214); +INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-conv-complex-1766477258405-4B', 'conv-complex-1766477258405', 'dp-conv-complex-1766477258405-4', 'batch-conv-complex-1766477258405-4', 1, '调皮调侃', '赔罪可不够,得三顿起步。', 3, 1766472559214); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-205', 'conv-complex-1766477258405', 'user', '好啊,那我也要吃草莓大福!', 1766472565861, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-206', 'conv-complex-1766477258405', 'character', '没问题!管够!', 1766472579650, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-207', 'conv-complex-1766477258405', 'user', '那早点睡吧,都快一点了。 (0)', 1766472588080, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-208', 'conv-complex-1766477258405', 'character', '嗯确实不早了,晚安。 (1)', 1766472597590, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-209', 'conv-complex-1766477258405', 'user', '晚安,好梦。 (2)', 1766472603011, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-210', 'conv-complex-1766477258405', 'character', '你也是,梦里见~ (3)', 1766472614793, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-211', 'conv-complex-1766477258405', 'user', '那早点睡吧,都快一点了。 (4)', 1766472632618, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-212', 'conv-complex-1766477258405', 'character', '嗯确实不早了,晚安。 (5)', 1766472650071, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-213', 'conv-complex-1766477258405', 'user', '晚安,好梦。 (6)', 1766472669561, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-214', 'conv-complex-1766477258405', 'character', '你也是,梦里见~ (7)', 1766472681988, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-215', 'conv-complex-1766477258405', 'user', '那早点睡吧,都快一点了。 (8)', 1766472689037, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-216', 'conv-complex-1766477258405', 'character', '嗯确实不早了,晚安。 (9)', 1766472704592, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-217', 'conv-complex-1766477258405', 'user', '晚安,好梦。 (10)', 1766472710167, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-218', 'conv-complex-1766477258405', 'character', '你也是,梦里见~ (11)', 1766472726040, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-219', 'conv-complex-1766477258405', 'user', '那早点睡吧,都快一点了。 (12)', 1766472745717, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-220', 'conv-complex-1766477258405', 'character', '嗯确实不早了,晚安。 (13)', 1766472760136, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-221', 'conv-complex-1766477258405', 'user', '晚安,好梦。 (14)', 1766472770771, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-222', 'conv-complex-1766477258405', 'character', '你也是,梦里见~ (15)', 1766472788424, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-223', 'conv-complex-1766477258405', 'user', '那早点睡吧,都快一点了。 (16)', 1766472800216, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-224', 'conv-complex-1766477258405', 'character', '嗯确实不早了,晚安。 (17)', 1766472811127, 1); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-225', 'conv-complex-1766477258405', 'user', '晚安,好梦。 (18)', 1766472819470, 0); +INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('msg-conv-complex-1766477258405-226', 'conv-complex-1766477258405', 'character', '你也是,梦里见~ (19)', 1766472838877, 1); \ No newline at end of file diff --git a/desktop/scripts/dev-log.js b/desktop/scripts/dev-log.js new file mode 100644 index 0000000..e7cb590 --- /dev/null +++ b/desktop/scripts/dev-log.js @@ -0,0 +1,56 @@ +import { spawn } from 'child_process'; +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); +const projectRoot = path.resolve(__dirname, '..'); +const logDir = path.resolve(projectRoot, '..', 'logs'); + +if (!fs.existsSync(logDir)) { + fs.mkdirSync(logDir, { recursive: true }); +} + +const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); +const logFile = path.join(logDir, `dev-${timestamp}.log`); +const logStream = fs.createWriteStream(logFile, { flags: 'a', encoding: 'utf8' }); + +console.log(`[DevLog] Logging to: ${logFile}`); + +// 强制设置环境变量 +const env = { + ...process.env, + PYTHONIOENCODING: 'utf-8', + NODE_ENV: 'development' +}; + +const child = spawn('pnpm', ['dev'], { + cwd: projectRoot, + shell: true, + env +}); + +child.stdout.on('data', (data) => { + process.stdout.write(data); + logStream.write(data); +}); + +child.stderr.on('data', (data) => { + process.stderr.write(data); + logStream.write(data); +}); + +child.on('close', (code) => { + console.log(`[DevLog] Process exited with code ${code}`); + logStream.end(); +}); + + + + + + + + + + diff --git a/desktop/scripts/download_funasr_model.py b/desktop/scripts/download_funasr_model.py new file mode 100644 index 0000000..1d43d1d --- /dev/null +++ b/desktop/scripts/download_funasr_model.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +""" +下载 FunASR ONNX 模型。 +该脚本通过实例化 funasr_onnx 模型对象来触发自动下载逻辑。 +""" +import os +import sys +import json +import argparse +import traceback + +# ============================================================================== +# OS 级别的文件描述符重定向,防止库的日志污染 stdout +# ============================================================================== +try: + ipc_fd = os.dup(sys.stdout.fileno()) + ipc_channel = os.fdopen(ipc_fd, "w", buffering=1, encoding="utf-8") + os.dup2(sys.stderr.fileno(), sys.stdout.fileno()) +except Exception: + # 如果重定向失败(例如在非标准终端环境中),则回退到直接使用 stdout + ipc_channel = sys.stdout + +def emit(event, **payload): + """发送 JSON 消息到 Node.js""" + try: + data = {"event": event} + data.update(payload) + ipc_channel.write(json.dumps(data, ensure_ascii=False) + "\n") + ipc_channel.flush() + except Exception as exc: + sys.stderr.write(f"[IPC Error] Failed to send: {exc}\n") + sys.stderr.flush() + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--model-id", required=True) + parser.add_argument("--cache-dir", required=False) + args = parser.parse_args() + + # 设置环境变量 + # FunASR 默认下载到 ~/.cache/modelscope/hub + # MODELSCOPE_CACHE 语义通常是 "base dir",实际下载会落到 /hub。 + # 但历史上我们也可能传入了 ".../hub"。这里做兼容归一化,确保 Win/mac/Linux 都稳定落盘。 + cache_base = None + cache_hub = None + if args.cache_dir: + raw = os.path.abspath(args.cache_dir) + if os.path.basename(raw).lower() == "hub": + cache_base = os.path.dirname(raw) + cache_hub = raw + else: + cache_base = raw + cache_hub = os.path.join(raw, "hub") + else: + # 兼容旧逻辑:若仅提供 ASR_CACHE_DIR(通常是 HF 的 hub),尝试回退到其父目录作为 base + cache = os.environ.get("MODELSCOPE_CACHE") or os.environ.get("MODELSCOPE_CACHE_HOME") or os.environ.get("ASR_CACHE_DIR") + if cache: + raw = os.path.abspath(cache) + if os.path.basename(raw).lower() == "hub": + cache_base = os.path.dirname(raw) + cache_hub = raw + else: + cache_base = raw + cache_hub = os.path.join(raw, "hub") + + if cache_base: + os.environ["MODELSCOPE_CACHE"] = cache_base + os.environ["MODELSCOPE_CACHE_HOME"] = cache_base + try: + os.makedirs(cache_base, exist_ok=True) + os.makedirs(cache_hub, exist_ok=True) + except Exception: + pass + + emit("manifest", modelId=args.model_id, message="准备下载 FunASR 模型...", totalBytes=0, fileCount=0) + + try: + import funasr_onnx + # 尝试静默 funasr 的日志 + # funasr_onnx 内部可能没有简单的日志级别控制,只能依赖 stderr 重定向 + except ImportError: + emit("error", modelId=args.model_id, message="funasr_onnx 库未安装,请先安装依赖。") + sys.exit(1) + + # 模拟 asr_funasr_worker.py 中的模型 ID 逻辑 + is_large = "large" in args.model_id.lower() + + # 定义模型路径 (与 worker 保持一致) + # 这些是默认值,worker 中允许通过环境变量覆盖,这里我们为了下载默认模型,使用默认值 + vad_model_dir = "damo/speech_fsmn_vad_zh-cn-16k-common-onnx" + punc_model_dir = "damo/punc_ct-transformer_zh-cn-common-vocab272727-onnx" + + if is_large: + online_model_dir = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online-onnx" + offline_model_dir = "damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-onnx" + use_quantize = False + else: + online_model_dir = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online-onnx" + offline_model_dir = "damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-onnx" + use_quantize = True + + try: + from funasr_onnx.vad_bin import Fsmn_vad + from funasr_onnx.paraformer_online_bin import Paraformer as ParaformerOnline + from funasr_onnx.paraformer_bin import Paraformer as ParaformerOffline + from funasr_onnx.punc_bin import CT_Transformer + + # 1. 下载 VAD + emit("manifest", modelId=args.model_id, message=f"正在下载 VAD 模型: {vad_model_dir} (1/4)") + Fsmn_vad(model_dir=vad_model_dir, quantize=use_quantize) + + # 2. 下载 Online + emit("manifest", modelId=args.model_id, message=f"正在下载流式模型: {online_model_dir} (2/4)") + ParaformerOnline(model_dir=online_model_dir, batch_size=1, quantize=use_quantize, intra_op_num_threads=1) + + # 3. 下载 Offline + emit("manifest", modelId=args.model_id, message=f"正在下载离线模型: {offline_model_dir} (3/4)") + ParaformerOffline(model_dir=offline_model_dir, batch_size=1, quantize=use_quantize, intra_op_num_threads=1) + + # 4. 下载 Punc + emit("manifest", modelId=args.model_id, message=f"正在下载标点模型: {punc_model_dir} (4/4)") + CT_Transformer(model_dir=punc_model_dir, quantize=use_quantize, intra_op_num_threads=1) + + emit( + "completed", + modelId=args.model_id, + message="FunASR 模型下载完成", + localDir=cache_hub or os.environ.get("MODELSCOPE_CACHE") or "", + cacheBase=os.environ.get("MODELSCOPE_CACHE") or "", + cacheHub=cache_hub or "", + ) + + except Exception as e: + emit("error", modelId=args.model_id, message=str(e), traceback=traceback.format_exc()) + sys.exit(1) + +if __name__ == "__main__": + main() diff --git a/desktop/scripts/generate-complex-review-data.js b/desktop/scripts/generate-complex-review-data.js new file mode 100644 index 0000000..d5b239e --- /dev/null +++ b/desktop/scripts/generate-complex-review-data.js @@ -0,0 +1,175 @@ +#!/usr/bin/env node + +/** + * 脚本:生成超长且复杂的测试对话 SQL + * 生成 200+ 条消息,并带有多条决策路径 + */ + +import path from 'path'; +import fs from 'fs'; +import { fileURLToPath } from 'url'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +// --- 配置 --- +const CHAR_ID = 'complex-test-girl'; +const CONV_ID = 'conv-complex-' + Date.now(); +const START_TIME = Date.now() - 3600000 * 2; // 2小时前开始 + +const sqlStatements = []; +sqlStatements.push(`-- Test Data for Conversation Review`); +sqlStatements.push(`PRAGMA foreign_keys = ON;`); + +// 1. 创建测试角色 +sqlStatements.push(`INSERT OR IGNORE INTO characters (id, name, nickname, relationship_label, avatar_color, affinity, created_at, updated_at) +VALUES ('${CHAR_ID}', '林舒涵', '舒涵', '青梅竹马', '#ff85c0', 65, ${Date.now()}, ${Date.now()});`); + +// 2. 创建会话 +sqlStatements.push(`INSERT INTO conversations (id, character_id, title, date, created_at, updated_at) +VALUES ('${CONV_ID}', '${CHAR_ID}', '关于未来的深夜长谈', ${START_TIME}, ${Date.now()}, ${Date.now()});`); + +// 3. 生成 200+ 条消息 +const messages = []; +let currentTs = START_TIME; + +const addMsg = (sender, content) => { + currentTs += Math.floor(Math.random() * 15000) + 5000; + const id = `msg-${CONV_ID}-${messages.length}`; + const escContent = content.replace(/'/g, "''"); + sqlStatements.push(`INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) +VALUES ('${id}', '${CONV_ID}', '${sender}', '${escContent}', ${currentTs}, ${sender === 'user' ? 0 : 1});`); + messages.push({ id }); + return id; +}; + +// --- 第一阶段:轻松闲聊 (0-40) --- +addMsg('character', '在忙吗?'); +addMsg('user', '刚忙完,正打算刷会儿手机,怎么啦?'); +addMsg('character', '没什么,今天去吃了你说的那家甜品店,草莓大福真的超好吃!'); +addMsg('user', '哈哈我就说吧,那家店是老字号了。'); +for (let i = 0; i < 36; i++) { + const contents = [ + ['character', '明天天气好像不太好呢。'], + ['user', '是吗?我看预报说是多云。'], + ['character', '但我刚才看又有雷阵雨预警了。'], + ['user', '那出门得记得带伞。'], + ['character', '嗯我知道啦。'] + ]; + addMsg(...contents[i % contents.length]); +} + +// --- 第二阶段:怀旧回忆 (41-80) --- +addMsg('character', '对了,你还记得咱们大二那年去洱海骑行吗?'); +addMsg('user', '当然记得,那天我晒得跟黑炭一样。'); +addMsg('character', '你还好意思说,我那天可是提醒过你要涂防晒的。'); +for (let i = 2; i < 38; i++) { + const contents = [ + ['character', '那时候咱们真是有精力,骑了大半个洱海。'], + ['user', '现在的我估计骑五公里就要求饶了。'], + ['character', '其实我偶尔还会翻出那时候的照片看,大家都好青涩。'], + ['user', '那时候还没这么多烦心事,每天就想着晚上去哪儿吃。'] + ]; + const item = contents[i % contents.length]; + addMsg(item[0], item[1] + ` (${i})`); +} + +// 插入决策点 1 +const dp1Anchor = addMsg('character', '如果我们现在还能一起再去一次,你觉得会和以前感觉一样吗?'); +const dp1Id = `dp-${CONV_ID}-1`; +const batch1Id = `batch-${CONV_ID}-1`; +sqlStatements.push(`INSERT INTO decision_points (id, conversation_id, anchor_message_id, created_at) VALUES ('${dp1Id}', '${CONV_ID}', '${dp1Anchor}', ${currentTs + 100});`); +sqlStatements.push(`INSERT INTO suggestion_batches (id, decision_point_id, trigger, reason, created_at) VALUES ('${batch1Id}', '${dp1Id}', 'manual', 'user_silence', ${currentTs + 200});`); +sqlStatements.push(`INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-${CONV_ID}-1A', '${CONV_ID}', '${dp1Id}', '${batch1Id}', 0, '怀旧浪漫', '肯定不一样啊,毕竟现在的我,比那时候更珍惜和你在一起的时间了。', 5, ${currentTs + 300});`); +sqlStatements.push(`INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-${CONV_ID}-1B', '${CONV_ID}', '${dp1Id}', '${batch1Id}', 1, '幽默回应', '感觉肯定不一样,这次我得租个电动车,坚决不脚踩了!', 2, ${currentTs + 300});`); + +addMsg('user', '肯定不一样啊,毕竟现在的我,比那时候更珍惜和你在一起的时间了。'); + +// --- 第三阶段:深沉话题 (81-120) --- +addMsg('character', '突然这么感性,我都不知道该怎么接了……'); +addMsg('character', '不过说真的,最近我一直在想,现在的这份工作真的适合我吗?'); +for (let i = 0; i < 38; i++) { + const contents = [ + ['user', '怎么突然想这个了?压力太大了吗?'], + ['character', '倒也不是压力,就是觉得每天都在重复,找不到成就感。'], + ['user', '职场倦怠其实挺普遍的。'], + ['character', '但我怕自己一直在这个舒适圈呆下去会废掉。'] + ]; + const item = contents[i % contents.length]; + addMsg(item[0], item[1] + ` (${i})`); +} + +// 插入决策点 2 +const dp2Anchor = addMsg('character', '你说,我是不是该鼓起勇气去试试那个新项目的机会?'); +const dp2Id = `dp-${CONV_ID}-2`; +const batch2Id = `batch-${CONV_ID}-2`; +sqlStatements.push(`INSERT INTO decision_points (id, conversation_id, anchor_message_id, created_at) VALUES ('${dp2Id}', '${CONV_ID}', '${dp2Anchor}', ${currentTs + 100});`); +sqlStatements.push(`INSERT INTO suggestion_batches (id, decision_point_id, trigger, reason, created_at) VALUES ('${batch2Id}', '${dp2Id}', 'passive', 'topic_switch', ${currentTs + 200});`); +sqlStatements.push(`INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-${CONV_ID}-2A', '${CONV_ID}', '${dp2Id}', '${batch2Id}', 0, '理性支持', '如果那个项目对你的长期规划有帮助,确实值得一试。', 3, ${currentTs + 300});`); +sqlStatements.push(`INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-${CONV_ID}-2B', '${CONV_ID}', '${dp2Id}', '${batch2Id}', 1, '共情鼓励', '无论你做什么决定,我都会支持你的。想试就去试吧,别让自己遗憾。', 6, ${currentTs + 300});`); + +addMsg('user', '我觉得现在的生活节奏也挺好的,没必要把自己搞得那么累吧?'); + +// --- 第四阶段:观点冲突 (121-160) --- +addMsg('character', '可是我想变得更好啊,你是在质疑我的上进心吗?'); +addMsg('user', '我不是那个意思,只是觉得健康和心情更重要。'); +for (let i = 0; i < 38; i++) { + const contents = [ + ['character', '但你总是试图在我想拼一把的时候泼冷水。'], + ['user', '我只是不想看你每天只睡五小时。'], + ['character', '那是我的选择,我觉得值得。'], + ['user', '好吧,既然你这么坚持,我也没什么好说的了。'] + ]; + const item = contents[i % contents.length]; + addMsg(item[0], item[1] + ` (${i})`); +} + +const dp3Anchor = addMsg('character', '算了,感觉咱们讨论这个话题只会吵架,早点睡吧。'); +const dp3Id = `dp-${CONV_ID}-3`; +const batch3Id = `batch-${CONV_ID}-3`; +sqlStatements.push(`INSERT INTO decision_points (id, conversation_id, anchor_message_id, created_at) VALUES ('${dp3Id}', '${CONV_ID}', '${dp3Anchor}', ${currentTs + 100});`); +sqlStatements.push(`INSERT INTO suggestion_batches (id, decision_point_id, trigger, reason, created_at) VALUES ('${batch3Id}', '${dp3Id}', 'manual', 'conflict_detected', ${currentTs + 200});`); +sqlStatements.push(`INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-${CONV_ID}-3A', '${CONV_ID}', '${dp3Id}', '${batch3Id}', 0, '道歉服软', '对不起,我刚才说话语气可能重了点,其实我是担心你。', 8, ${currentTs + 300});`); +sqlStatements.push(`INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-${CONV_ID}-3B', '${CONV_ID}', '${dp3Id}', '${batch3Id}', 1, '冷静结束', '行吧,那确实都累了,早点休息。', -2, ${currentTs + 300});`); + +addMsg('user', '对不起舒涵,我刚才说话语气太生硬了,其实我只是看你最近压力大,很心疼你。'); + +// --- 第五阶段:和解 (161-200) --- +addMsg('character', '……我也知道你是关心我,刚才我也有点激动了。'); +for (let i = 0; i < 38; i++) { + const contents = [ + ['user', '抱歉哈,其实你的上进心一直是我最佩服你的地方。'], + ['character', '真的吗?我总觉得自己做不到最好。'], + ['user', '那是你对自己要求太高了。'], + ['character', '听到你这么说,我感觉心情好多了。'] + ]; + const item = contents[i % contents.length]; + addMsg(item[0], item[1] + ` (${i})`); +} + +const dp4Anchor = addMsg('character', '下个周末你有空吗?我请你吃饭,就当是赔罪啦。'); +const dp4Id = `dp-${CONV_ID}-4`; +const batch4Id = `batch-${CONV_ID}-4`; +sqlStatements.push(`INSERT INTO decision_points (id, conversation_id, anchor_message_id, created_at) VALUES ('${dp4Id}', '${CONV_ID}', '${dp4Anchor}', ${currentTs + 100});`); +sqlStatements.push(`INSERT INTO suggestion_batches (id, decision_point_id, trigger, reason, created_at) VALUES ('${batch4Id}', '${dp4Id}', 'passive', 'positive_vibe', ${currentTs + 200});`); +sqlStatements.push(`INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-${CONV_ID}-4A', '${CONV_ID}', '${dp4Id}', '${batch4Id}', 0, '欣然接受', '好啊,那我也要吃草莓大福!', 4, ${currentTs + 300});`); +sqlStatements.push(`INSERT INTO ai_suggestions (id, conversation_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, created_at) VALUES ('sugg-${CONV_ID}-4B', '${CONV_ID}', '${dp4Id}', '${batch4Id}', 1, '调皮调侃', '赔罪可不够,得三顿起步。', 3, ${currentTs + 300});`); + +addMsg('user', '好啊,那我也要吃草莓大福!'); + +addMsg('character', '没问题!管够!'); +for (let i = 0; i < 20; i++) { + const contents = [ + ['user', '那早点睡吧,都快一点了。'], + ['character', '嗯确实不早了,晚安。'], + ['user', '晚安,好梦。'], + ['character', '你也是,梦里见~'] + ]; + const item = contents[i % contents.length]; + addMsg(item[0], item[1] + ` (${i})`); +} + +const outputPath = path.join(__dirname, 'complex-test-data.sql'); +fs.writeFileSync(outputPath, sqlStatements.join('\n')); +console.log(`SQL file generated at: ${outputPath}`); +console.log(`Conversation ID: ${CONV_ID}`); diff --git a/desktop/scripts/insert-test-review-data.js b/desktop/scripts/insert-test-review-data.js new file mode 100755 index 0000000..5df9985 --- /dev/null +++ b/desktop/scripts/insert-test-review-data.js @@ -0,0 +1,116 @@ +#!/usr/bin/env node + +/** + * 脚本:插入测试复盘数据到数据库 + * 用于测试决策树展示效果 + */ + +import path from 'path'; +import fs from 'fs'; +import { fileURLToPath } from 'url'; +import Database from 'better-sqlite3'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +// 数据库路径(与主应用一致) +const dbPath = path.join( + process.env.HOME || process.env.USERPROFILE, + 'Library/Application Support/livegalgame-desktop/livegalgame.db' +); + +console.log('数据库路径:', dbPath); + +if (!fs.existsSync(dbPath)) { + console.error('错误: 数据库文件不存在:', dbPath); + console.error('请先启动应用以创建数据库'); + process.exit(1); +} + +const db = new Database(dbPath); +db.pragma('foreign_keys = ON'); + +console.log('连接数据库成功'); + +// 读取 seed.sql 中的测试数据部分 +const seedPath = path.join(__dirname, '../src/db/seed.sql'); +const seedSQL = fs.readFileSync(seedPath, 'utf-8'); + +// 提取测试数据部分(从 "=== 复盘功能测试对话 ===" 开始) +const testDataStart = seedSQL.indexOf('-- === 复盘功能测试对话 ==='); +if (testDataStart === -1) { + console.error('错误: 在 seed.sql 中未找到测试数据'); + process.exit(1); +} + +const testDataSQL = seedSQL.substring(testDataStart); + +// 分割 SQL 语句 +const statements = testDataSQL + .split(';') + .map(stmt => stmt.trim()) + .filter(stmt => { + // 移除注释行 + const lines = stmt.split('\n'); + const cleanedLines = lines + .map(line => { + const commentIndex = line.indexOf('--'); + if (commentIndex >= 0) { + return line.substring(0, commentIndex).trim(); + } + return line.trim(); + }) + .filter(line => line.length > 0); + + return cleanedLines.join(' ').length > 0; + }) + .map(stmt => { + // 移除行内注释 + const lines = stmt.split('\n'); + return lines + .map(line => { + const commentIndex = line.indexOf('--'); + if (commentIndex >= 0) { + return line.substring(0, commentIndex).trim(); + } + return line.trim(); + }) + .filter(line => line.length > 0) + .join(' '); + }) + .filter(stmt => stmt.length > 0); + +console.log(`找到 ${statements.length} 条 SQL 语句`); + +// 执行插入 +const transaction = db.transaction(() => { + let successCount = 0; + let skipCount = 0; + + for (const statement of statements) { + try { + const result = db.exec(statement); + successCount++; + console.log(`✓ 执行成功: ${statement.substring(0, 50)}...`); + } catch (err) { + // INSERT OR IGNORE 如果数据已存在会报错,这是正常的 + if (err.message.includes('UNIQUE constraint') || err.message.includes('already exists')) { + skipCount++; + console.log(`⊘ 跳过(已存在): ${statement.substring(0, 50)}...`); + } else { + console.error(`✗ 执行失败:`, err.message); + console.error(` SQL: ${statement.substring(0, 100)}...`); + } + } + } + + console.log(`\n完成:`); + console.log(` 成功插入: ${successCount} 条`); + console.log(` 跳过(已存在): ${skipCount} 条`); +}); + +transaction(); + +db.close(); +console.log('\n数据库连接已关闭'); + diff --git a/desktop/scripts/prebuild.js b/desktop/scripts/prebuild.js new file mode 100644 index 0000000..42f276f --- /dev/null +++ b/desktop/scripts/prebuild.js @@ -0,0 +1,15 @@ +/** + * 预构建入口: + * - 构建 Python FastAPI 后端(PyInstaller onedir) + */ + +import { execSync } from 'child_process'; + +function run(cmd) { + execSync(cmd, { stdio: 'inherit' }); +} + +console.log('[prebuild] building backend (PyInstaller) ...'); +run('npm run build:backend'); +console.log('[prebuild] done'); + diff --git a/desktop/scripts/prepare-python-env.js b/desktop/scripts/prepare-python-env.js new file mode 100644 index 0000000..4f5253b --- /dev/null +++ b/desktop/scripts/prepare-python-env.js @@ -0,0 +1,364 @@ +/** + * 准备内置 Python 运行环境,供打包后使用。 + * - 创建 venv: desktop/python-env + * - 安装 requirements.txt 中的依赖(包含 funasr-onnx 等) + * + * 设计目标: + * 1. 打包时将 python-env 放入 extraResources,客户端无需自行安装 Python。 + * 2. 跨平台(macOS/Windows)调用 pip,避免依赖激活脚本。 + */ + +import { execSync } from 'child_process'; +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); +const projectRoot = path.resolve(__dirname, '..'); +const venvDir = path.join(projectRoot, 'python-env'); +const bootstrapDir = path.join(projectRoot, 'python-bootstrap'); +const miniforgePrefix = path.join(bootstrapDir, 'miniforge'); +const requirementsPath = path.join(projectRoot, 'requirements.txt'); + +const isWin = process.platform === 'win32'; +const isMac = process.platform === 'darwin'; +const desiredPy = process.env.PYTHON_VERSION || '3.10'; +const candidateCmds = [ + process.env.PYTHON, // 用户显式指定 + isWin ? `py -${desiredPy}` : null, // Windows 推荐 py 启动器 + isWin ? `python${desiredPy}` : `python${desiredPy}`, // python3.10 + isWin ? `python${desiredPy.replace('.', '')}` : null, // 兼容 python310 + isWin ? 'python3' : 'python3', + isWin ? 'python' : 'python', +].filter(Boolean); + +const pythonPath = isWin + ? path.join(venvDir, 'Scripts', 'python.exe') + : path.join(venvDir, 'bin', 'python3'); +const pipPath = isWin + ? path.join(venvDir, 'Scripts', 'pip.exe') + : path.join(venvDir, 'bin', 'pip'); + +function run(cmd, options = {}) { + execSync(cmd, { stdio: 'inherit', ...options }); +} + +function runCapture(cmd, options = {}) { + try { + const output = execSync(cmd, { encoding: 'utf-8', ...options }); + return { success: true, stdout: output, stderr: '' }; + } catch (err) { + return { + success: false, + stdout: err.stdout?.toString() || '', + stderr: err.stderr?.toString() || err.message || err.toString() + }; + } +} + +function detectPython() { + for (const cmd of candidateCmds) { + try { + const v = execSync(`"${cmd}" -c "import sys;print(sys.version)"`, { encoding: 'utf-8' }).trim(); + const [major, minor] = v.split('.')[0] ? v.split('.').map((n) => parseInt(n, 10)) : [0, 0]; + if (major === 3 && minor >= 8 && minor <= 12) { + console.log(`[prepare-python-env] using python: ${cmd} (version ${major}.${minor})`); + return cmd; + } + console.warn(`[prepare-python-env] skip ${cmd}, unsupported version ${v}`); + } catch { + // ignore and try next + } + } + return null; +} + +function bootstrapMiniforge() { + if (process.platform !== 'darwin') { + throw new Error( + `[prepare-python-env] 找不到可用的 Python,请安装 3.10~3.12 并设置环境变量 PYTHON 指向该解释器(当前尝试: ${candidateCmds.join(', ')})` + ); + } + + const arch = process.arch === 'arm64' ? 'arm64' : 'x86_64'; + const installer = `Miniforge3-MacOSX-${arch}.sh`; + const url = `https://github.com/conda-forge/miniforge/releases/latest/download/${installer}`; + + if (!fs.existsSync(bootstrapDir)) { + fs.mkdirSync(bootstrapDir, { recursive: true }); + } + + const installerPath = path.join(bootstrapDir, installer); + if (!fs.existsSync(installerPath)) { + console.log(`[prepare-python-env] downloading Miniforge (${arch}) ...`); + try { + run(`curl -L --retry 3 --retry-all-errors --http1.1 "${url}" -o "${installerPath}"`); + } catch (err) { + // 清理损坏的下载,避免下次误判 + if (fs.existsSync(installerPath)) { + fs.unlinkSync(installerPath); + } + throw err; + } + run(`chmod +x "${installerPath}"`); + } else { + console.log('[prepare-python-env] Miniforge installer already downloaded'); + } + + // 解除 macOS 安全属性,避免 “Operation not permitted” + try { + run(`xattr -d com.apple.quarantine "${installerPath}"`, { stdio: 'ignore' }); + } catch { + // ignore + } + + if (!fs.existsSync(path.join(miniforgePrefix, 'bin', 'python'))) { + console.log('[prepare-python-env] installing Miniforge (py310)...'); + run(`bash "${installerPath}" -b -p "${miniforgePrefix}"`); + } else { + console.log('[prepare-python-env] Miniforge already installed'); + } + + const bundledPy = path.join(miniforgePrefix, 'bin', 'python3'); + console.log(`[prepare-python-env] using bootstrapped python: ${bundledPy}`); + return bundledPy; +} + +function ensureCondaEnv(miniforgePython, { forceRebuild = false } = {}) { + let condaBin = isWin + ? path.join(miniforgePrefix, 'Scripts', 'conda.exe') + : path.join(miniforgePrefix, 'bin', 'conda'); + + if (!fs.existsSync(condaBin)) { + console.log(`[prepare-python-env] conda not found at ${condaBin}, bootstrapping Miniforge...`); + const bootstrappedPy = bootstrapMiniforge(); + condaBin = isWin + ? path.join(miniforgePrefix, 'Scripts', 'conda.exe') + : path.join(miniforgePrefix, 'bin', 'conda'); + if (!fs.existsSync(condaBin)) { + throw new Error(`[prepare-python-env] conda still not found at ${condaBin} after bootstrap`); + } + miniforgePython = bootstrappedPy; + } + + if (fs.existsSync(venvDir) && forceRebuild) { + console.log(`[prepare-python-env] removing existing env for rebuild: ${venvDir}`); + fs.rmSync(venvDir, { recursive: true, force: true }); + } + + if (fs.existsSync(pythonPath)) { + console.log(`[prepare-python-env] env already exists: ${pythonPath}`); + return; + } + + console.log(`[prepare-python-env] creating conda env (Python ${desiredPy}) at ${venvDir}`); + run(`"${condaBin}" create -y -p "${venvDir}" python=${desiredPy} pip`); + + // 返回可能更新后的 Miniforge python 路径,便于后续步骤使用同一发行版 + return miniforgePython; +} + +function ensureVenv(pythonCmd, { forceRebuild = false } = {}) { + if (fs.existsSync(venvDir) && forceRebuild) { + console.log(`[prepare-python-env] removing existing venv for rebuild: ${venvDir}`); + fs.rmSync(venvDir, { recursive: true, force: true }); + } + + if (fs.existsSync(pythonPath)) { + console.log(`[prepare-python-env] venv already exists: ${pythonPath}`); + return; + } + + console.log(`[prepare-python-env] creating venv via ${pythonCmd} -m venv "${venvDir}"`); + run(`"${pythonCmd}" -m venv "${venvDir}"`); +} + +function installDeps() { + if (!fs.existsSync(requirementsPath)) { + throw new Error(`requirements.txt not found at ${requirementsPath}`); + } + // 为 pip 安装禁用代理,避免 socks 依赖错误 + const envNoProxy = { + ...process.env, + ALL_PROXY: undefined, + all_proxy: undefined, + HTTP_PROXY: undefined, + http_proxy: undefined, + HTTPS_PROXY: undefined, + https_proxy: undefined, + PIP_NO_PROXY: '*', + PIP_DISABLE_PIP_VERSION_CHECK: '1', + }; + + console.log(`[prepare-python-env] upgrade pip`); + run(`"${pythonPath}" -m pip install --upgrade pip`, { env: envNoProxy }); + + // 全平台统一安装 requirements(默认使用 FunASR ONNX) + console.log(`[prepare-python-env] install requirements (platform=${isWin ? 'win' : 'unix'})`); + + if (isWin) { + // Windows 上:先尝试正常安装,如果失败且是 av 相关错误,则跳过 av 继续安装 + // funasr-onnx 不依赖 av,所以可以安全跳过 + const result = runCapture(`"${pythonPath}" -m pip install -r "${requirementsPath}"`, { env: envNoProxy }); + + if (!result.success) { + const errorOutput = result.stderr + result.stdout; + // 检查是否是 av 相关的编译错误 + if (errorOutput.includes('av') && (errorOutput.includes('LNK1181') || errorOutput.includes('Failed building wheel') || errorOutput.includes('failed-wheel-build'))) { + console.warn('[prepare-python-env] av package build failed (expected on Windows), trying alternative installation...'); + + // 尝试使用 --only-binary 安装,跳过需要编译的包 + const binaryResult = runCapture(`"${pythonPath}" -m pip install -r "${requirementsPath}" --only-binary=:all:`, { env: envNoProxy }); + + if (!binaryResult.success) { + // 如果 --only-binary 也失败,逐个安装关键包(跳过可能有问题的传递依赖) + console.warn('[prepare-python-env] --only-binary failed, installing critical packages individually...'); + const criticalPkgs = [ + 'funasr-onnx==0.4.1', + 'onnxruntime==1.21.1', + 'fastapi>=0.115.0', + 'uvicorn[standard]>=0.30.0', + 'websockets>=12.0', + 'pyinstaller>=6.3.0', + 'python-multipart>=0.0.9', + 'soundfile>=0.12.1', + 'numpy>=1.26.4,<2', + 'jieba>=0.42.1', + 'requests[socks]>=2.31.0', + 'httpx[socks]>=0.27.0', + ]; + + let allSuccess = true; + for (const pkg of criticalPkgs) { + const pkgResult = runCapture(`"${pythonPath}" -m pip install "${pkg}"`, { env: envNoProxy }); + if (!pkgResult.success) { + console.warn(`[prepare-python-env] Failed to install ${pkg}: ${pkgResult.stderr.substring(0, 200)}`); + allSuccess = false; + } + } + + if (!allSuccess) { + console.warn('[prepare-python-env] Some packages failed to install, but continuing...'); + } + } + } else { + // 其他错误,直接抛出 + console.error('[prepare-python-env] Installation failed:', result.stderr); + throw new Error(`pip install failed: ${result.stderr.substring(0, 500)}`); + } + } + } else { + run(`"${pythonPath}" -m pip install -r "${requirementsPath}"`, { env: envNoProxy }); + } +} + +function ensureCondaPackInstalled(miniforgePython) { + console.log('[prepare-python-env] ensure conda-pack is installed in base'); + run(`"${miniforgePython}" -m pip install --upgrade conda-pack`); +} + +function installCondaPackages(packages) { + const mambaBin = path.join(miniforgePrefix, isWin ? 'Scripts' : 'bin', isWin ? 'mamba.exe' : 'mamba'); + const condaBin = path.join(miniforgePrefix, isWin ? 'Scripts' : 'bin', isWin ? 'conda.exe' : 'conda'); + const installer = fs.existsSync(mambaBin) ? mambaBin : condaBin; + if (!fs.existsSync(installer)) { + throw new Error('[prepare-python-env] neither mamba nor conda found'); + } + const pkgList = packages.join(' '); + console.log(`[prepare-python-env] conda installing packages: ${pkgList}`); + run(`"${installer}" install -y -p "${venvDir}" -c conda-forge ${pkgList}`); +} + +function packCondaEnv() { + const tarPath = `${venvDir}.tar.gz`; + console.log(`[prepare-python-env] packing env with conda-pack -> ${tarPath}`); + run(`"${miniforgePrefix}/bin/conda-pack" -p "${venvDir}" -o "${tarPath}" -f`); + + console.log(`[prepare-python-env] repacking env to make it relocatable`); + fs.rmSync(venvDir, { recursive: true, force: true }); + fs.mkdirSync(venvDir, { recursive: true }); + run(`tar -xzf "${tarPath}" -C "${venvDir}"`); + run(`"${path.join(venvDir, 'bin', 'conda-unpack')}"`); + fs.rmSync(tarPath, { force: true }); +} + +/** + * 修复 venv 中的 Python 符号链接为实际文件副本 + * venv 创建的符号链接是绝对路径,打包后在其他机器上会失效 + */ +function fixPythonSymlinks() { + const binDir = path.join(venvDir, isWin ? 'Scripts' : 'bin'); + const pythonLinks = isWin + ? ['python.exe', 'python3.exe'] + : ['python', 'python3', `python${desiredPy}`]; + + for (const linkName of pythonLinks) { + const linkPath = path.join(binDir, linkName); + if (!fs.existsSync(linkPath)) { + continue; + } + + try { + const stat = fs.lstatSync(linkPath); + if (!stat.isSymbolicLink()) { + continue; // 已经是实际文件,跳过 + } + + const target = fs.readlinkSync(linkPath); + // 检查是否是绝对路径的符号链接 + if (path.isAbsolute(target) && fs.existsSync(target)) { + console.log(`[prepare-python-env] fixing symlink: ${linkName} -> ${target}`); + // 删除符号链接,复制实际文件 + fs.unlinkSync(linkPath); + fs.copyFileSync(target, linkPath); + // 确保可执行权限 + if (!isWin) { + fs.chmodSync(linkPath, 0o755); + } + console.log(`[prepare-python-env] replaced symlink with actual file: ${linkName}`); + } + } catch (err) { + console.warn(`[prepare-python-env] warning: failed to fix ${linkName}:`, err.message); + } + } +} + +function main() { + // CI 场景强制使用 Miniforge,避免使用系统 Framework Python 导致打包后动态链接失效 + const forceMiniforge = isMac && process.env.CI === 'true'; + const forceRebuild = forceMiniforge || process.env.FORCE_REBUILD_VENV === 'true'; + + let pythonCmd = forceMiniforge ? null : detectPython(); + + // 如果检测到的 python 是 macOS Framework 路径,也切换到 Miniforge + const isMacFrameworkPython = pythonCmd && isMac && + pythonCmd.includes('/Library/Frameworks/Python.framework'); + + if (!pythonCmd || forceMiniforge || isMacFrameworkPython) { + pythonCmd = bootstrapMiniforge(); + } + + if (isMac) { + const updatedPy = ensureCondaEnv(pythonCmd, { forceRebuild }); + if (updatedPy) { + pythonCmd = updatedPy; + } + ensureCondaPackInstalled(pythonCmd); + installCondaPackages([ + 'ffmpeg', + 'av=11.*', + ]); + installDeps(); + packCondaEnv(); + fixPythonSymlinks(); + } else { + ensureVenv(pythonCmd, { forceRebuild }); + installDeps(); + fixPythonSymlinks(); + } + console.log('[prepare-python-env] done'); +} + +main(); + diff --git a/desktop/src/asr/asr-cache-env.js b/desktop/src/asr/asr-cache-env.js new file mode 100644 index 0000000..81a2cc1 --- /dev/null +++ b/desktop/src/asr/asr-cache-env.js @@ -0,0 +1,66 @@ +import fs from 'fs'; +import path from 'path'; + +function ensureDir(targetPath) { + try { + fs.mkdirSync(targetPath, { recursive: true }); + } catch { + // ignore mkdir errors + } +} + +export function computeAsrCachePaths({ userDataDir, asrCacheBase }) { + const base = asrCacheBase || path.join(userDataDir, 'asr-cache'); + const hfHome = path.join(base, 'hf-home'); + const asrCacheDir = path.join(hfHome, 'hub'); + const msBase = path.join(base, 'modelscope'); + const msHub = path.join(msBase, 'hub'); + return { + asrCacheBase: base, + hfHome, + asrCacheDir, + modelscopeCacheBase: msBase, + modelscopeCacheHub: msHub, + }; +} + +export function applyAsrCacheEnv({ userDataDir, asrCacheBase, force = false }) { + const paths = computeAsrCachePaths({ userDataDir, asrCacheBase }); + + // 允许用户通过环境变量强制覆盖;否则用我们计算的默认值/GUI 配置值。 + if (force || !process.env.ASR_CACHE_BASE) { + process.env.ASR_CACHE_BASE = paths.asrCacheBase; + } + + if (force || !process.env.HF_HOME) { + process.env.HF_HOME = paths.hfHome; + } + + if (force || !process.env.ASR_CACHE_DIR) { + process.env.ASR_CACHE_DIR = paths.asrCacheDir; + } + + if (force) { + process.env.MODELSCOPE_CACHE = paths.modelscopeCacheBase; + process.env.MODELSCOPE_CACHE_HOME = paths.modelscopeCacheBase; + } else if (!process.env.MODELSCOPE_CACHE && !process.env.MODELSCOPE_CACHE_HOME) { + process.env.MODELSCOPE_CACHE = paths.modelscopeCacheBase; + process.env.MODELSCOPE_CACHE_HOME = paths.modelscopeCacheBase; + } else if (!process.env.MODELSCOPE_CACHE_HOME && process.env.MODELSCOPE_CACHE) { + process.env.MODELSCOPE_CACHE_HOME = process.env.MODELSCOPE_CACHE; + } + + // mkdir(不抛错) + ensureDir(process.env.ASR_CACHE_BASE); + ensureDir(process.env.HF_HOME); + ensureDir(process.env.ASR_CACHE_DIR); + if (process.env.MODELSCOPE_CACHE) { + ensureDir(process.env.MODELSCOPE_CACHE); + ensureDir(path.join(process.env.MODELSCOPE_CACHE, 'hub')); + } + + return computeAsrCachePaths({ + userDataDir, + asrCacheBase: process.env.ASR_CACHE_BASE, + }); +} diff --git a/desktop/src/asr/asr-manager.js b/desktop/src/asr/asr-manager.js index 57905ae..7920cc8 100644 --- a/desktop/src/asr/asr-manager.js +++ b/desktop/src/asr/asr-manager.js @@ -3,15 +3,17 @@ import DatabaseManager from '../db/database.js'; import path from 'path'; import fs from 'fs'; import * as logger from '../utils/logger.js'; +import RecognitionCache from './recognition-cache.js'; +import { createSentenceHandlers } from './sentence-handlers.js'; /** * ASR 管理器(在主进程中运行) * 协调 Whisper 服务和数据库操作 */ class ASRManager { - constructor() { + constructor(dbInstance = null) { this.whisperService = null; // 延迟初始化 - this.db = new DatabaseManager(); + this.db = dbInstance || new DatabaseManager(); this.isInitialized = false; this.isRunning = false; @@ -20,16 +22,19 @@ class ASRManager { // 服务器崩溃回调 this.onServerCrash = null; + // 防止并发 initialize 导致重复拉起后端/worker(内存飙升) + this._initializePromise = null; // 当前对话 ID this.currentConversationId = null; + this.modelName = null; // 活跃识别任务 this.activeTranscriptions = new Map(); // sourceId -> Promise // 识别结果去重缓存(避免重复保存相同的识别结果) - this.recentRecognitionCache = new Map(); // sourceId -> [{ text, timestamp }] this.duplicateThreshold = 3000; // 3秒内的相同文本视为重复 + this.recognitionCache = new RecognitionCache({ duplicateThreshold: this.duplicateThreshold }); // 【VAD相关】静音检测配置 this.silenceTimers = new Map(); // sourceId -> timer @@ -43,6 +48,10 @@ class ASRManager { this.currentSegments = new Map(); // sourceId -> { messageId, recordId, lastText } this.streamingSegments = new Map(); // sourceId -> latest partial text + const { handleSentenceComplete, handlePartialResult } = createSentenceHandlers(this); + this.handleSentenceComplete = handleSentenceComplete; + this.handlePartialResult = handlePartialResult; + logger.log('ASRManager created'); } @@ -61,6 +70,22 @@ class ASRManager { setEventEmitter(emitter) { this.eventEmitter = emitter; logger.log('Event emitter set for ASRManager'); + if (emitter && this.whisperService?.setProgressEmitter) { + this.whisperService.setProgressEmitter((progress) => this.emitDownloadProgress(progress)); + } + } + + emitDownloadProgress(progress) { + if (!this.eventEmitter) { + return; + } + const payload = { + modelId: progress?.modelId || this.modelName, + engine: progress?.engine || this.whisperService?.engine, + source: progress?.source || 'preload', + ...progress + }; + this.eventEmitter('asr-model-download-progress', payload); } /** @@ -68,6 +93,11 @@ class ASRManager { * @param {string} conversationId - 对话 ID */ async initialize(conversationId = null) { + if (this._initializePromise) { + return await this._initializePromise; + } + + this._initializePromise = (async () => { try { logger.log('Initializing ASRManager...'); @@ -95,14 +125,27 @@ class ASRManager { logger.log('ASR Config:', config); const pauseThresholdSec = Number(config?.sentence_pause_threshold) || 1.2; - this.SILENCE_TIMEOUT = Math.max(800, Math.round(pauseThresholdSec * 1000)); + // 仅云端(cloud)或者百度(baidu)允许更低的静音断句阈值; + // 注意:百度 WebSocket 自带断句,我们这里调高本地兜底阈值,避免干扰百度 + const isCloudModel = String(config?.model_name || '').includes('cloud') || String(config?.model_name || '').includes('baidu'); + const minSilenceMs = isCloudModel ? 3000 : 800; // 如果是云端/百度,本地静音兜底延长到 3s + this.SILENCE_TIMEOUT = Math.max(minSilenceMs, Math.round(pauseThresholdSec * 1000)); logger.log(`Silence timeout set to ${this.SILENCE_TIMEOUT}ms based on sentence_pause_threshold=${pauseThresholdSec}`); // 确定模型名称:优先使用配置中的值,默认为 'funasr-paraformer' (FunASR) - const modelName = config.model_name || 'funasr-paraformer'; + // Windows 也默认使用 FunASR ONNX + let modelName = config.model_name || 'funasr-paraformer'; + this.modelName = modelName; logger.log(`Selected ASR model: ${modelName}`); + if (this.eventEmitter && typeof this.whisperService.setProgressEmitter === 'function') { + this.whisperService.setProgressEmitter((progress) => this.emitDownloadProgress({ + ...progress, + modelId: progress?.modelId || modelName + })); + } + // 初始化 Whisper 服务 // 模型名称直接从配置获取,不再进行特定服务的名称转换 // 具体的服务实现应该处理模型名称的兼容性,或者由用户在设置中选择正确的模型 @@ -140,6 +183,11 @@ class ASRManager { logger.error('Error initializing ASRManager:', error); throw error; } + })().finally(() => { + this._initializePromise = null; + }); + + return await this._initializePromise; } /** @@ -180,12 +228,12 @@ class ASRManager { } const normalizedResult = this.normalizeText(trimmedText); - const punctuatedText = await this.applyPunctuationIfAvailable(normalizedResult, sourceId); - result.text = punctuatedText; + // 先使用未加标点的文本,后续异步补标点 + result.text = normalizedResult; const record = await this.saveRecognitionRecord(sourceId, result); // 添加到去重缓存 - this.addToRecognitionCache(sourceId, punctuatedText, result.endTime); + this.addToRecognitionCache(sourceId, normalizedResult, result.endTime); return { ...result, @@ -369,22 +417,15 @@ class ASRManager { const normalizedText = this.normalizeText(finalText); logger.log(`Normalized text: "${normalizedText}" (length: ${normalizedText.length})`); - // 【优化】2-pass 结果已经很准确,标点处理可选 - let punctuatedText = normalizedText; - if (!is2Pass) { - // 只对非 2-pass 结果应用标点(2-pass 通常自带标点) - punctuatedText = await this.applyPunctuationIfAvailable(normalizedText, sourceId); - } - logger.log(`Punctuated text: "${punctuatedText}" (length: ${punctuatedText ? punctuatedText.length : 0})`); - - if (!punctuatedText || !punctuatedText.trim()) { - logger.log(`Final sentence empty after normalization, skip saving: "${punctuatedText}"`); + // 先保存未加标点的文本,异步补标点 + if (!normalizedText || !normalizedText.trim()) { + logger.log(`Final sentence empty after normalization, skip saving: "${normalizedText}"`); return null; } logger.log(`Saving recognition record for conversation: ${this.currentConversationId}`); const record = await this.saveRecognitionRecord(sourceId, { - text: punctuatedText, + text: normalizedText, confidence: is2Pass ? 0.98 : 0.95, // 2-pass 结果置信度更高 startTime: Date.now() - this.SILENCE_TIMEOUT, endTime: Date.now(), @@ -394,19 +435,27 @@ class ASRManager { }); if (!record) { - logger.error(`Failed to save recognition record for: "${punctuatedText}"`); + logger.error(`Failed to save recognition record for: "${normalizedText}"`); return null; } logger.log(`Recognition record saved: ${record.id}`); // 添加到去重缓存 - this.addToRecognitionCache(sourceId, punctuatedText, Date.now()); + this.addToRecognitionCache(sourceId, normalizedText, Date.now()); // 【UI更新】发送正式消息 const message = await this.convertRecordToMessage(record.id, this.currentConversationId); logger.log(`Message created from speech: ${message.id}`); + // 异步补标点并更新 + this.enqueuePunctuationUpdate({ + recordId: record.id, + messageId: message.id, + text: normalizedText, + sourceId + }); + return message; } return null; @@ -533,10 +582,10 @@ class ASRManager { // 如果对话ID变化,更新它并清理上下文 if (conversationId && conversationId !== this.currentConversationId) { logger.log(`Conversation changed from ${this.currentConversationId} to ${conversationId}, clearing context`); - + // 【斩断所有分段】切换对话前提交所有进行中的分段 this.commitAllSegments(); - + this.currentConversationId = conversationId; // 【自动上下文学习】切换对话时清理旧上下文 @@ -592,6 +641,12 @@ class ASRManager { // 【斩断所有分段】停止时提交所有进行中的分段 this.commitAllSegments(); + // 【清理】通知后端关闭所有会话连接,避免产生 -3101 等超时报错 + if (typeof this.whisperService.sendResetCommand === 'function') { + await this.whisperService.sendResetCommand('speaker1'); + await this.whisperService.sendResetCommand('speaker2'); + } + logger.log('ASR stopped'); } catch (error) { logger.error('Error stopping ASR:', error); @@ -623,18 +678,7 @@ class ASRManager { * @returns {boolean} 是否是重复 */ isDuplicateRecognition(sourceId, text, timestamp) { - const cache = this.recentRecognitionCache.get(sourceId) || []; - const trimmedText = this.normalizeForComparison(text); - - // 检查最近几秒内是否有相同的文本 - const recentThreshold = timestamp - this.duplicateThreshold; - for (const item of cache) { - if (item.timestamp >= recentThreshold && item.text.toLowerCase() === trimmedText) { - return true; - } - } - - return false; + return this.recognitionCache.isDuplicate(sourceId, text, timestamp); } /** @@ -644,17 +688,7 @@ class ASRManager { * @param {number} timestamp - 时间戳 */ addToRecognitionCache(sourceId, text, timestamp) { - if (!this.recentRecognitionCache.has(sourceId)) { - this.recentRecognitionCache.set(sourceId, []); - } - - const cache = this.recentRecognitionCache.get(sourceId); - cache.push({ text: this.normalizeForComparison(text), timestamp }); - - // 清理过期的缓存(保留最近10秒) - const cutoffTime = timestamp - 10000; - const filtered = cache.filter(item => item.timestamp > cutoffTime); - this.recentRecognitionCache.set(sourceId, filtered); + this.recognitionCache.add(sourceId, text, timestamp); } /** @@ -663,13 +697,7 @@ class ASRManager { * @returns {string} */ normalizeText(text) { - if (!text) return ''; - let normalized = text; - normalized = normalized.replace(/([\u4E00-\u9FFF])\s+(?=[\u4E00-\u9FFF])/g, '$1'); - normalized = normalized.replace(/\s+([,。!?、,.!?])/g, '$1'); - normalized = normalized.replace(/([,。!?、,.!?])\s+/g, '$1'); - normalized = normalized.replace(/\s{2,}/g, ' '); - return normalized.trim(); + return this.recognitionCache.normalizeText(text); } /** @@ -678,8 +706,7 @@ class ASRManager { * @returns {string} */ normalizeForComparison(text) { - const normalized = this.normalizeText(text || '').toLowerCase(); - return normalized.replace(/[,。!?、,.!?]/g, ''); + return this.recognitionCache.normalizeForComparison(text); } async applyPunctuationIfAvailable(text, sourceId) { @@ -718,193 +745,40 @@ class ASRManager { return text; } - clearAllSilenceTimers() { - for (const timer of this.silenceTimers.values()) { - clearTimeout(timer); - } - this.silenceTimers.clear(); - } - /** - * 【混合分句】处理句子完成事件(由 Python 端触发) - * 使用 UPSERT 机制:同一分段内更新现有消息,而不是创建新消息 - * @param {Object} result - 句子完成结果 + * 异步补标点并更新记录与消息 */ - async handleSentenceComplete(result) { - try { - const { sessionId, text, timestamp, trigger, audioDuration, isSegmentEnd } = result; - - // 【斩断信号】如果收到段落结束信号(如 ready_to_stop),先斩断当前分段 - if (isSegmentEnd) { - logger.log(`[Sentence Complete] Segment end signal received for ${sessionId}`); - // 如果没有文本,只是纯粹的斩断信号 - if (!text) { - this.commitCurrentSegment(sessionId); - return null; - } - } - - if (!text || !text.trim()) { - logger.log('[Sentence Complete] Empty text, skipping.'); - // 如果是段落结束信号,仍然需要斩断 - if (isSegmentEnd) { - this.commitCurrentSegment(sessionId); - } - return null; - } - - // 规范化文本 - const normalizedText = this.normalizeText(text); - if (!normalizedText) { - logger.log('[Sentence Complete] Normalized text empty, skipping.'); - return null; - } - - // 获取当前分段状态 - const currentSegment = this.currentSegments.get(sessionId); - - // 【UPSERT 逻辑】检查是否已有进行中的消息 - if (currentSegment && currentSegment.messageId) { - // 检查文本是否有实质性变化(避免无意义的 UPDATE) - if (currentSegment.lastText === normalizedText) { - logger.log(`[Sentence Complete] Text unchanged, skipping update: "${normalizedText.substring(0, 30)}..."`); - return null; + enqueuePunctuationUpdate({ recordId, messageId, text, sourceId }) { + if (!text || !recordId || !messageId) return; + (async () => { + try { + const punctuated = await this.applyPunctuationIfAvailable(text, sourceId); + if (!punctuated || punctuated === text) { + return; } - - logger.log(`[Sentence Complete] Updating existing message ${currentSegment.messageId}: "${normalizedText.substring(0, 50)}..." (trigger: ${trigger})`); - - // UPDATE 现有记录 - const updatedRecord = this.db.updateSpeechRecord(currentSegment.recordId, { - recognized_text: normalizedText, - end_time: timestamp, - audio_duration: audioDuration || (timestamp - (currentSegment.startTime || timestamp)) / 1000 + // 更新语音记录 + this.db.updateSpeechRecord(recordId, { + recognized_text: punctuated }); - - if (!updatedRecord) { - logger.error(`[Sentence Complete] Failed to update speech record: ${currentSegment.recordId}`); - return null; - } - - // UPDATE 消息内容 - const updatedMessage = this.db.updateMessage(currentSegment.messageId, { - content: normalizedText + // 更新消息 + const updatedMessage = this.db.updateMessage(messageId, { + content: punctuated }); - - if (!updatedMessage) { - logger.error(`[Sentence Complete] Failed to update message: ${currentSegment.messageId}`); - return null; - } - - // 更新分段状态 - currentSegment.lastText = normalizedText; - this.currentSegments.set(sessionId, currentSegment); - - // 发送更新事件给渲染进程(使用 update 类型) - if (this.eventEmitter) { - updatedMessage.source_id = sessionId; + if (updatedMessage && this.eventEmitter) { + updatedMessage.source_id = sourceId; this.eventEmitter('asr-sentence-update', updatedMessage); } - - return updatedMessage; - } - - // 检查是否是重复的识别结果(仅对新消息检查) - logger.log(`[Sentence Complete] Creating new message: "${normalizedText.substring(0, 50)}..." (trigger: ${trigger}, session: ${sessionId})`); - - // INSERT 新记录 - const record = await this.saveRecognitionRecord(sessionId, { - text: normalizedText, - confidence: trigger === 'punctuation' ? 0.98 : 0.95, - startTime: timestamp - (audioDuration || this.SILENCE_TIMEOUT), - endTime: timestamp, - audioDuration: audioDuration || this.SILENCE_TIMEOUT / 1000, - isPartial: false, - audioData: null - }); - - if (!record) { - logger.error(`[Sentence Complete] Failed to save record for: "${normalizedText}"`); - return null; - } - - // 添加到去重缓存 - this.addToRecognitionCache(sessionId, normalizedText, timestamp); - - // 转换为消息 - const message = await this.convertRecordToMessage(record.id, this.currentConversationId); - message.source_id = sessionId; - logger.log(`[Sentence Complete] Message created: ${message.id}`); - - // 【保存当前分段状态】后续更新将 UPDATE 这条消息 - this.currentSegments.set(sessionId, { - messageId: message.id, - recordId: record.id, - lastText: normalizedText, - startTime: timestamp - (audioDuration || this.SILENCE_TIMEOUT) - }); - - // 发送事件给渲染进程(实时更新UI) - if (this.eventEmitter) { - logger.log(`[Sentence Complete] Sending event to renderer: ${message.id}`); - this.clearStreamingSegment(sessionId); - this.eventEmitter('asr-sentence-complete', message); - } else { - logger.warn('[Sentence Complete] No event emitter set, UI will not update in real-time'); - } - - // 清除静音定时器(句子已由 Python 端提交) - const pendingTimer = this.silenceTimers.get(sessionId); - if (pendingTimer) { - clearTimeout(pendingTimer); - this.silenceTimers.delete(sessionId); - } - - // 【斩断】如果是段落结束信号,在保存消息后斩断 - if (isSegmentEnd) { - logger.log(`[Sentence Complete] Committing segment after creating message: ${sessionId}`); - this.commitCurrentSegment(sessionId); + } catch (err) { + logger.warn(`Punctuation async update failed: ${err.message}`); } - - return message; - } catch (error) { - logger.error('[Sentence Complete] Error:', error); - return null; - } + })(); } - /** - * 【混合分句】处理实时字幕(由 Python 端触发) - * @param {Object} result - 实时字幕结果 - */ - handlePartialResult(result) { - try { - const { sessionId, partialText, fullText, timestamp } = result; - - if (!partialText && !fullText) { - return; - } - - const normalizedPartial = this.normalizeText(fullText || partialText || ''); - if (!normalizedPartial) { - return; - } - - // 重置静音定时器(用户正在说话) - const existingTimer = this.silenceTimers.get(sessionId); - if (existingTimer) { - clearTimeout(existingTimer); - } - this.isSpeaking = true; - - // 设置新的静音定时器(兜底机制) - const timer = setTimeout(() => this.triggerSilenceCommit(sessionId), this.SILENCE_TIMEOUT); - this.silenceTimers.set(sessionId, timer); - - const effectiveTimestamp = timestamp || Date.now(); - this.emitStreamingUpdate(sessionId, normalizedPartial, effectiveTimestamp); - } catch (error) { - logger.error('[Partial Result] Error:', error); + clearAllSilenceTimers() { + for (const timer of this.silenceTimers.values()) { + clearTimeout(timer); } + this.silenceTimers.clear(); } /** diff --git a/desktop/src/asr/asr-service.js b/desktop/src/asr/asr-service.js index 2c0d5d2..24b41cf 100644 --- a/desktop/src/asr/asr-service.js +++ b/desktop/src/asr/asr-service.js @@ -1,943 +1,563 @@ -import path from 'path'; import fs from 'fs'; -import http from 'http'; -import os from 'os'; +import path from 'path'; +import { app } from 'electron'; import { spawn } from 'child_process'; import { setTimeout as delay } from 'node:timers/promises'; -import WebSocket from 'ws'; -import { app } from 'electron'; import killPort from 'kill-port'; +import portfinder from 'portfinder'; +import treeKill from 'tree-kill'; +import WebSocket from 'ws'; import * as logger from '../utils/logger.js'; +import { + safeDirSize, + getRepoPathsForModel, + resolveModelCache, + resolveFunasrModelScopeCache, + cleanModelScopeLocks +} from './model-cache.js'; +import { float32ToInt16Buffer, ensureDir, createWavBuffer, defaultAudioStoragePath } from './audio-utils.js'; +import FastAPISession from './fastapi-session.js'; import { getAsrModelPreset } from '../shared/asr-models.js'; -const DEFAULT_WLK_HOST = '127.0.0.1'; -const DEFAULT_WLK_PORT = Number(process.env.WHISPERLIVEKIT_PORT || 18765); -const PCM_SAMPLE_RATE = 16000; -const MAX_LINE_HISTORY = 200; -const DEFAULT_SILENCE_THRESHOLD_SECONDS = Number(process.env.WHISPERLIVEKIT_SILENCE_THRESHOLD || 0.6); -const DEFAULT_MAX_SENTENCE_CHARS = Number(process.env.WHISPERLIVEKIT_MAX_SENTENCE_CHARS || 50); - -function getAppModelCacheDir() { - try { - return path.join(app.getPath('userData'), 'hf-home', 'hub'); - } catch { - return null; - } -} - -function getModelCacheCandidates() { - const homeDir = os.homedir(); - const appCacheDir = getAppModelCacheDir(); - const candidates = [ - process.env.ASR_CACHE_DIR, - process.env.HF_HOME ? path.join(process.env.HF_HOME, 'hub') : null, - appCacheDir, - homeDir ? path.join(homeDir, '.cache', 'huggingface', 'hub') : null, - homeDir ? path.join(homeDir, '.cache', 'modelscope', 'hub') : null, - ]; - return [...new Set(candidates.filter(Boolean))]; -} - -function ensureDirectoryExists(dir) { - if (!dir) return; - try { - fs.mkdirSync(dir, { recursive: true }); - } catch { - // ignore failures - } -} - -// 句子级标点和启发式配置(主要面向中文) -const SENTENCE_END_PUNCTUATION = new Set('。!?!?;;'); -const CLAUSE_PUNCTUATION = new Set(',,、::'); -const QUESTION_SUFFIXES = new Set(['吗', '么', '呢', '?', '?']); -const EXCLAMATION_SUFFIXES = new Set(['啊', '呀', '!', '!']); - -function float32ToInt16Buffer(floatArray) { - const int16Array = new Int16Array(floatArray.length); - for (let i = 0; i < floatArray.length; i += 1) { - const sample = Math.max(-1, Math.min(1, floatArray[i])); - int16Array[i] = sample < 0 ? sample * 0x8000 : sample * 0x7FFF; - } - return Buffer.from(int16Array.buffer); -} - -function parseClockToSeconds(clock) { - if (!clock) return 0; - const parts = clock.split(':').map((value) => Number(value)); - if (parts.some(Number.isNaN)) { - return 0; - } - return parts.reduce((acc, part) => acc * 60 + part, 0); -} - -/** - * 文本分句器(仅在 WhisperLiveKit 流水线中使用) - * - * - 在客户端维护一个原始文本缓冲区(rawBuffer),按时间间隔和长度做启发式断句 - * - 只依赖后端给出的 text/start/end,不修改 ASR 服务器行为 - * - 返回值中同时包含: - * - text: 用于 UI 展示的带标点句子 - * - rawLength: 该句在原始缓冲区中消耗的字符数(不包含我们补的标点) - */ -class TextSegmenter { - constructor(config = {}) { - this.rawBuffer = ''; - this.lastEndSeconds = 0; - this.config = { - silenceThresholdSec: config.silenceThresholdSec ?? DEFAULT_SILENCE_THRESHOLD_SECONDS, - maxSentenceChars: config.maxSentenceChars ?? DEFAULT_MAX_SENTENCE_CHARS, - }; - } - - /** - * 处理一条 WhisperLiveKit 的 line - * @param {{ start: string, end: string, text: string }} line - * @returns {{ text: string, rawLength: number }[]} 确认的句子列表 - */ - processLine(line) { - const sentences = []; - const rawText = (line.text || ''); - if (!rawText.trim()) { - this.lastEndSeconds = parseClockToSeconds(line.end); - return sentences; - } - - const startSeconds = parseClockToSeconds(line.start); - const endSeconds = parseClockToSeconds(line.end); - - // 1. 基于时间间隔的断句:如果当前片段和上一个片段之间静音较长,则认为上一句已经结束 - if (this.rawBuffer) { - const gap = startSeconds - this.lastEndSeconds; - if (gap > this.config.silenceThresholdSec) { - const flushed = this.flushInternal(); - if (flushed) { - sentences.push(flushed); - } - } - } - - // 2. 追加当前文本到原始缓冲区 - this.rawBuffer += rawText; - this.lastEndSeconds = endSeconds; - - // 3. 基于长度/标点的兜底切分,避免一句话过长 - this.drainByLengthAndPunctuation(sentences); - - return sentences; - } - - /** - * 根据当前 rawBuffer 的长度和内部标点,尝试切出完整句子 - * @param {Array} outSentences - */ - drainByLengthAndPunctuation(outSentences) { - const { maxSentenceChars } = this.config; - // 循环处理,直到缓冲区长度在安全范围内 - // 或者再也找不到合理的切分点 - // 注意:这里完全基于原始文本,不依赖我们补的标点 - while (this.rawBuffer && this.rawBuffer.length >= maxSentenceChars) { - const boundaryIndex = this.findLastBoundaryIndex(this.rawBuffer, maxSentenceChars); - if (boundaryIndex === -1) { - // 找不到合适的边界,只能整体作为一句 - const flushed = this.flushInternal(); - if (flushed) { - outSentences.push(flushed); - } - break; - } - - const rawSentence = this.rawBuffer.slice(0, boundaryIndex + 1); - this.rawBuffer = this.rawBuffer.slice(boundaryIndex + 1); - - const finalized = this.finalizeRawSentence(rawSentence); - if (finalized) { - outSentences.push(finalized); - } - } - } - - /** - * 在给定窗口内,从后往前寻找最近的断句边界(句末标点优先,其次逗号等) - * @param {string} text - * @param {number} window - * @returns {number} 边界下标,找不到则返回 -1 - */ - findLastBoundaryIndex(text, window) { - const searchEnd = Math.min(text.length, window); - for (let i = searchEnd - 1; i >= 0; i -= 1) { - const ch = text[i]; - if (SENTENCE_END_PUNCTUATION.has(ch) || CLAUSE_PUNCTUATION.has(ch)) { - return i; - } - } - return -1; - } - - /** - * 将原始句子转换为最终展示文本,并返回其原始长度 - * @param {string} rawSentence - * @returns {{ text: string, rawLength: number } | null} - */ - finalizeRawSentence(rawSentence) { - if (!rawSentence) return null; - const raw = rawSentence; - const trimmed = raw.trim(); - if (!trimmed) return null; - const textWithPunctuation = this.applyPunctuation(trimmed); - return { - text: textWithPunctuation, - rawLength: raw.length, - }; - } - - /** - * 内部 flush,不重置时间,只消费 rawBuffer - * @returns {{ text: string, rawLength: number } | null} - */ - flushInternal() { - if (!this.rawBuffer) return null; - const rawSentence = this.rawBuffer; - this.rawBuffer = ''; - return this.finalizeRawSentence(rawSentence); - } - - /** - * 对句子末尾补充合理的标点(仅在没有终止符时才补) - * @param {string} text - * @returns {string} - */ - applyPunctuation(text) { - if (!text) return text; - const lastChar = text[text.length - 1]; - if (SENTENCE_END_PUNCTUATION.has(lastChar)) { - return text; - } - - // 根据句末语气词猜测问号/感叹号 - if (QUESTION_SUFFIXES.has(lastChar)) { - return `${text}?`; - } - if (EXCLAMATION_SUFFIXES.has(lastChar)) { - return `${text}!`; - } - - // 如果以逗号、顿号等结束,将其升级为句号 - if (CLAUSE_PUNCTUATION.has(lastChar)) { - return `${text.slice(0, -1)}。`; - } - - return `${text}。`; - } - - /** - * 显式 flush:通常在会话结束或服务器 ready_to_stop 时调用 - * @returns {{ text: string, rawLength: number }[]} 剩余句子(最多一条) - */ - flush() { - const result = this.flushInternal(); - if (!result) return []; - return [result]; - } - - reset() { - this.rawBuffer = ''; - this.lastEndSeconds = 0; - } -} - -class WhisperLiveKitSession { - constructor({ - sourceId, - wsUrl, - onSentence, - onPartial, - }) { - this.sourceId = sourceId; - this.wsUrl = wsUrl; - this.onSentence = onSentence; - this.onPartial = onPartial; - this.ws = null; - this.isReady = false; - this.segmenter = new TextSegmenter(); - this.pendingBuffers = []; - this.sentLineIds = new Set(); - this.lineOrder = []; - this.lastPartialText = ''; - // 记录已经"确认"的原始字符数,用于与 buffer_transcription 对齐 - this.rawCommittedChars = 0; - // 【修复累积问题】追踪每个 line.start 对应的已处理文本长度 - // 用于检测是否是同一个 line 的更新(累积结果) - this.processedLineStarts = new Map(); // start -> { endTime, textLength } - this.connect(); - } - - setSentenceCallback(callback) { - this.onSentence = callback; - } - - setPartialCallback(callback) { - this.onPartial = callback; - } - - connect() { - this.ws = new WebSocket(this.wsUrl); - this.ws.binaryType = 'arraybuffer'; - - this.ws.on('open', () => { - logger.log(`[WhisperLiveKit][${this.sourceId}] WebSocket connected`); - this.isReady = true; - this.flushPendingBuffers(); - }); - - this.ws.on('message', (data) => { - this.handleMessage(data); - }); - - this.ws.on('close', () => { - logger.log(`[WhisperLiveKit][${this.sourceId}] WebSocket closed`); - this.isReady = false; - }); - - this.ws.on('error', (error) => { - logger.error(`[WhisperLiveKit][${this.sourceId}] WebSocket error:`, error); - }); - } - - flushPendingBuffers() { - if (!this.ws || this.ws.readyState !== WebSocket.OPEN) { - return; - } - while (this.pendingBuffers.length > 0) { - const buffer = this.pendingBuffers.shift(); - this.ws.send(buffer); - } - } - - sendAudio(buffer) { - if (!this.ws || this.ws.readyState !== WebSocket.OPEN) { - this.pendingBuffers.push(buffer); - return; - } - this.ws.send(buffer); - } - - handleMessage(data) { - let payload; - try { - payload = JSON.parse(data.toString()); - } catch (error) { - logger.warn('[WhisperLiveKit] Failed to parse message:', error); - return; - } - - if (!payload) { - return; - } - - if (payload.type === 'config') { - return; - } - - // 会话结束信号:flush 剩余缓冲区 - if (payload.type === 'ready_to_stop') { - const flushedSentences = this.segmenter.flush(); - const timestamp = Date.now(); - flushedSentences.forEach((sentence, index) => { - if (!sentence || !sentence.text) return; - this.rawCommittedChars += sentence.rawLength || 0; - if (this.onSentence) { - this.onSentence({ - sessionId: this.sourceId, - text: sentence.text, - timestamp, - trigger: 'whisperlivekit', - audioDuration: null, - language: payload.detected_language || null, - // 最后一条 flush 的句子标记为段落结束,触发斩断 - isSegmentEnd: index === flushedSentences.length - 1, - }); - } - }); - // 即使没有 flush 出句子,也要通知斩断(会话结束) - if (flushedSentences.length === 0 && this.onSentence) { - this.onSentence({ - sessionId: this.sourceId, - text: null, - timestamp, - trigger: 'ready_to_stop', - isSegmentEnd: true, - }); - } - return; - } - - if (payload.error) { - logger.warn(`[WhisperLiveKit][${this.sourceId}] Error from backend: ${payload.error}`); - return; - } - - const timestamp = Date.now(); - - if (Array.isArray(payload.lines)) { - payload.lines.forEach((line) => { - const text = (line.text || '').trim(); - if (!text) { - return; - } - - const startSeconds = parseClockToSeconds(line.start); - const endSeconds = parseClockToSeconds(line.end); - const duration = Math.max(0, endSeconds - startSeconds); - - // 【修复累积问题】检测是否是同一个 line 的更新 - // WhisperLiveKit 发送的是累积结果,同一个 start 的 line 会不断更新 - const processedInfo = this.processedLineStarts.get(line.start); - if (processedInfo) { - // 如果已经处理过这个 start,检查文本是否有新增 - const prevTextLength = processedInfo.textLength; - if (text.length <= prevTextLength) { - // 文本没有新增,跳过(可能是重复或回退) - return; - } - // 文本有新增,只处理增量部分 - const incrementalText = text.slice(prevTextLength); - if (!incrementalText.trim()) { - return; - } - // 更新记录 - this.processedLineStarts.set(line.start, { - endTime: endSeconds, - textLength: text.length, - }); - // 创建增量 line 对象 - const incrementalLine = { - start: line.start, - end: line.end, - text: incrementalText, - detected_language: line.detected_language, - }; - // 处理增量 - const sentences = this.segmenter.processLine(incrementalLine); - sentences.forEach((sentence, index) => { - if (!sentence || !sentence.text) return; - this.rawCommittedChars += sentence.rawLength || 0; - if (this.onSentence) { - const isSegmentEnd = index < sentences.length - 1; - this.onSentence({ - sessionId: this.sourceId, - text: sentence.text, - timestamp, - trigger: 'whisperlivekit', - audioDuration: duration, - language: line.detected_language || null, - isSegmentEnd, - }); - } - }); - return; - } - - // 新的 start,正常处理 - this.processedLineStarts.set(line.start, { - endTime: endSeconds, - textLength: text.length, - }); - - // 清理过期的 start 记录(保留最近的) - if (this.processedLineStarts.size > MAX_LINE_HISTORY) { - const oldestStart = this.processedLineStarts.keys().next().value; - this.processedLineStarts.delete(oldestStart); - } - - // 旧的 lineId 去重逻辑仍然保留,作为额外的安全网 - const lineId = `${line.start}-${line.end}-${text}`; - if (this.sentLineIds.has(lineId)) { - return; - } - this.sentLineIds.add(lineId); - this.lineOrder.push(lineId); - if (this.lineOrder.length > MAX_LINE_HISTORY) { - const oldest = this.lineOrder.shift(); - this.sentLineIds.delete(oldest); - } - - // 将行文本送入分句器,按句子粒度输出 - const sentences = this.segmenter.processLine(line); - sentences.forEach((sentence, index) => { - if (!sentence || !sentence.text) return; - // 记录在原始文本中已经"确认"的字符数,用于后续 partial 去重 - this.rawCommittedChars += sentence.rawLength || 0; - if (this.onSentence) { - // 如果分句器输出了多个句子,前面的句子标记为段落结束(基于静音间隔的断句) - // 只有最后一个句子继续保持"进行中"状态 - const isSegmentEnd = index < sentences.length - 1; - this.onSentence({ - sessionId: this.sourceId, - text: sentence.text, - timestamp, - trigger: 'whisperlivekit', - audioDuration: duration, - language: line.detected_language || null, - isSegmentEnd, - }); - } - }); - }); - } - - // 处理流式 partial,减去已经确认的原始文本部分 - const fullRaw = payload.buffer_transcription || ''; - if (typeof fullRaw === 'string' && fullRaw.length > 0) { - let rawPartial = fullRaw; - if (this.rawCommittedChars > 0 && fullRaw.length > this.rawCommittedChars) { - rawPartial = fullRaw.slice(this.rawCommittedChars); - } else if (this.rawCommittedChars >= fullRaw.length) { - rawPartial = ''; - } - - const partialText = rawPartial.trim(); - if (partialText && partialText !== this.lastPartialText) { - this.lastPartialText = partialText; - if (this.onPartial) { - this.onPartial({ - sessionId: this.sourceId, - partialText, - fullText: fullRaw, - timestamp, - isSpeaking: true, - }); - } - } - } - } +const DEFAULT_HOST = '127.0.0.1'; +const SERVER_READY_TEXT = 'Application startup complete'; - reset() { - this.lastPartialText = ''; - this.sentLineIds.clear(); - this.lineOrder = []; - this.segmenter.reset(); - this.rawCommittedChars = 0; - // 【修复累积问题】清理 line start 追踪 - this.processedLineStarts.clear(); +function detectPythonPath() { + const envPython = process.env.ASR_PYTHON_PATH; + if (envPython && fs.existsSync(envPython)) { + return envPython; } - - close() { - if (this.ws && (this.ws.readyState === WebSocket.OPEN || this.ws.readyState === WebSocket.CONNECTING)) { - this.ws.close(); - } - this.pendingBuffers = []; + const projectRoot = path.resolve(app.getAppPath(), app.isPackaged ? '../..' : '.'); + const venvPy = path.join(projectRoot, '.venv', process.platform === 'win32' ? 'Scripts' : 'bin', process.platform === 'win32' ? 'python.exe' : 'python3'); + if (fs.existsSync(venvPy)) { + return venvPy; } + return process.platform === 'win32' ? 'python' : 'python3'; } class ASRService { constructor() { - this.modelName = 'medium'; - this.language = 'zh'; - this.backendPolicy = 'simulstreaming'; - this.serverHost = DEFAULT_WLK_HOST; - this.serverPort = DEFAULT_WLK_PORT; - this.pythonPath = this.detectPythonPath(); - this.wlkProcess = null; + this.modelName = 'siliconflow-cloud'; + this.engine = 'funasr'; + this.pythonPath = detectPythonPath(); this.isInitialized = false; - this.whisperLiveKitReady = false; + this.lastProgressBytes = 0; + this.lastProgressTimestamp = 0; + this.modelPreset = null; + this.modelCacheDir = null; + this.modelCacheFound = false; + this.modelCachePreDownloaded = false; + this.shouldReportProgress = true; + this.serverProcess = null; + this.isDownloading = false; + this.serverHost = DEFAULT_HOST; + this.serverPort = null; + this.serverReady = false; this.sessions = new Map(); this.onSentenceComplete = null; this.onPartialResult = null; - this.onServerCrash = null; // 服务器崩溃回调 + this.onServerCrash = null; + this.progressEmitter = null; this.retainAudioFiles = false; - this.serverStartRetries = 0; - this.maxServerRetries = 3; - - this.tempDir = path.join(app.getPath('temp'), 'asr'); - if (!fs.existsSync(this.tempDir)) { - fs.mkdirSync(this.tempDir, { recursive: true }); - } - - logger.log(`[WhisperLiveKit] Python path detected: ${this.pythonPath}`); - } - - resolveModelCacheDir(modelName) { - const candidates = getModelCacheCandidates(); - const preset = getAsrModelPreset(modelName); - const repoId = preset?.repoId || (typeof modelName === 'string' && modelName.includes('/') ? modelName : null); - const repoSafe = repoId ? `models--${repoId.replace(/\//g, '--')}` : null; - const msRepoId = preset?.modelScopeRepoId; - - for (const candidate of candidates) { - if (!candidate) continue; - try { - if (repoSafe && fs.existsSync(path.join(candidate, repoSafe))) { - ensureDirectoryExists(candidate); - return candidate; - } - if (msRepoId && fs.existsSync(path.join(candidate, msRepoId))) { - ensureDirectoryExists(candidate); - return candidate; - } - } catch { - // 忽略探测过程中出现的问题,继续尝试下一个目录 - } - } - - const fallback = candidates[0] || getAppModelCacheDir() || path.join(os.homedir(), '.cache', 'huggingface', 'hub'); - ensureDirectoryExists(fallback); - return fallback; + this.audioStoragePath = defaultAudioStoragePath(); + ensureDir(this.audioStoragePath); + this.isStopping = false; // 标记是否为预期的关闭,避免误报崩溃 + // 防止并发 initialize/startBackendServer 造成重复拉起多个后端进程(导致内存飙升) + this._initializePromise = null; + this._startPromise = null; } - /** - * 设置服务器崩溃回调 - * @param {Function} callback - (exitCode) => void - */ setServerCrashCallback(callback) { this.onServerCrash = callback; } - detectPythonPath() { - const envPython = process.env.ASR_PYTHON_PATH; - if (envPython && fs.existsSync(envPython)) { - logger.log('[WhisperLiveKit] Using ASR_PYTHON_PATH'); - return envPython; - } - - const projectRoot = path.resolve(app.getAppPath(), app.isPackaged ? '../..' : '.'); - const venvPython = path.join(projectRoot, '.venv', 'bin', 'python'); - if (fs.existsSync(venvPython)) { - logger.log('[WhisperLiveKit] Using virtualenv python'); - return venvPython; - } + setProgressEmitter(emitter) { + this.progressEmitter = emitter; + } - return 'python3'; + emitDownloadProgress(payload = {}) { + if (typeof this.progressEmitter !== 'function') return; + this.progressEmitter({ + modelId: this.modelName, + engine: this.engine, + source: payload.source || 'preload', + ...payload, + }); } - async initialize(modelName = 'medium', options = {}) { - if (this.isInitialized) { - return true; + async initialize(modelName = 'siliconflow-cloud', options = {}) { + if (this._initializePromise) { + return await this._initializePromise; } - this.modelName = modelName || this.modelName; - this.retainAudioFiles = options.retainAudioFiles || false; - this.audioStoragePath = options.audioStoragePath || this.tempDir; - - await this.ensureWhisperLiveKitInstalled(); + this._initializePromise = (async () => { + this.modelName = modelName || this.modelName; + const preset = getAsrModelPreset(modelName); + this.modelPreset = preset; + this.engine = preset?.engine || 'funasr'; + this.retainAudioFiles = options.retainAudioFiles || false; + this.audioStoragePath = options.audioStoragePath || this.audioStoragePath; + ensureDir(this.audioStoragePath); - // 预加载模型(如果失败,服务器启动时会自动下载) - await this.preloadModel(); - - await this.startWhisperLiveKitServer(); + // 确保服务器在预加载时就启动(不要懒加载) + if (!this.serverProcess) { + await this.startBackendServer(); + } else if (!this.serverReady) { + // 如果进程存在但还没准备好,等待健康检查 + await this.waitForHealth(); + } + this.serverReady = true; + this.isInitialized = true; + return true; + })().finally(() => { + this._initializePromise = null; + }); - this.isInitialized = true; - logger.log('[WhisperLiveKit] Service initialized'); - return true; + return await this._initializePromise; } - async ensureWhisperLiveKitInstalled() { - if (this.whisperLiveKitReady) { - return; + async startBackendServer() { + if (this._startPromise) { + return await this._startPromise; } - - try { - await this.runPythonCommand(['-m', 'pip', 'show', 'whisperlivekit']); - await this.runPythonCommand(['-m', 'pip', 'show', 'faster-whisper']); - this.whisperLiveKitReady = true; - return; - } catch { - logger.log('[WhisperLiveKit] Installing whisperlivekit and faster-whisper via pip...'); + // 若已有进程,则不要重复拉起;只需等它 ready + if (this.serverProcess) { + if (!this.serverReady) { + await this.waitForHealth(); + } + return true; } - await this.runPythonCommand(['-m', 'pip', 'install', '--upgrade', 'whisperlivekit', 'faster-whisper']); - this.whisperLiveKitReady = true; - } + this._startPromise = (async () => { + // Pick a free port dynamically + const port = await portfinder.getPortPromise({ port: Number(process.env.ASR_PORT) || 18000 }); + this.serverPort = port; + this.isDownloading = false; - async preloadModel() { try { - logger.log(`[WhisperLiveKit] Preloading ${this.modelName} model...`); - // 使用faster-whisper直接加载模型来预热缓存,指定 download_root 确保使用正确的缓存目录 - const modelCacheDir = this.resolveModelCacheDir(this.modelName); - - await this.runPythonCommand([ - '-c', - `from faster_whisper import WhisperModel; print("Loading ${this.modelName} model from ${modelCacheDir}..."); model = WhisperModel('${this.modelName}', device='cpu', compute_type='int8', download_root='${modelCacheDir}'); print("${this.modelName} model loaded successfully")` - ]); - logger.log(`[WhisperLiveKit] ${this.modelName} model preloaded successfully`); - return true; - } catch (error) { - logger.warn(`[WhisperLiveKit] Model preload failed, will download during server start: ${error.message}`); - return false; + await killPort(port); + } catch { + // ignore } - } - runPythonCommand(args) { - return new Promise((resolve, reject) => { - const proc = spawn(this.pythonPath, args, { - env: { - ...process.env, - PYTHONUNBUFFERED: '1', - }, - }); + const projectRoot = app.getAppPath(); + const binName = process.platform === 'win32' ? 'asr-backend.exe' : 'asr-backend'; + const packagedBin = path.join(process.resourcesPath, 'backend', 'asr-backend', binName); + const backendEntry = app.isPackaged && fs.existsSync(packagedBin) + ? packagedBin + : path.join(projectRoot, 'backend', 'main.py'); - let stderr = ''; + const useBinary = app.isPackaged && fs.existsSync(packagedBin); - proc.stderr.on('data', (data) => { - stderr += data.toString(); - }); + if (!fs.existsSync(backendEntry)) { + throw new Error(`[ASR] Backend entry not found: ${backendEntry}`); + } - proc.on('error', (error) => { - reject(error); - }); + const { cacheDir, found } = resolveModelCache(this.modelName); + this.modelCacheDir = cacheDir; + this.modelCacheFound = found; - proc.on('close', (code) => { - if (code === 0) { - resolve(); - } else { - reject(new Error(stderr || `Python command failed with exit code ${code}`)); + // FunASR 模型:尝试自动复用已存在的 ModelScope 缓存,避免重复下载 + let msCacheEnv = process.env.MODELSCOPE_CACHE || path.join(app.getPath('userData'), 'ms-cache'); + if (this.engine === 'funasr' && this.modelPreset?.onnxModels) { + const funasrMs = resolveFunasrModelScopeCache(this.modelPreset); + if (funasrMs?.cacheDir) { + logger.log(`[ASR] FunASR ModelScope cache resolved: ${funasrMs.cacheDir} (found=${funasrMs.found})`); + msCacheEnv = funasrMs.cacheDir; + if (funasrMs.found) { + // 记录已存在缓存,便于后续进度判定 + this.modelCacheFound = true; + logger.log(`[ASR] Using existing ModelScope cache at: ${msCacheEnv}`); } - }); - }); - } - - async startWhisperLiveKitServer() { - if (this.wlkProcess) { - return; + } } - try { - await killPort(this.serverPort); - logger.log(`[WhisperLiveKit] Released port ${this.serverPort}`); - } catch (error) { - // Ignore errors if port wasn't occupied - logger.log(`[WhisperLiveKit] Port cleanup info: ${error.message}`); + // 计算已有缓存,若已下载完成则不再上报进度 + const presetSize = this.modelPreset?.sizeBytes || null; + const isRemote = this.modelPreset?.isRemote || false; + + const repoPathsSet = new Set(getRepoPathsForModel(this.modelPreset, cacheDir)); + // funasr 需要把 ModelScope onnx 目录也纳入探测 + if (this.engine === 'funasr' && this.modelPreset?.onnxModels && msCacheEnv) { + getRepoPathsForModel(this.modelPreset, msCacheEnv).forEach((p) => repoPathsSet.add(p)); + } + const repoPaths = Array.from(repoPathsSet); + const initialDownloaded = repoPaths.length ? repoPaths.reduce((sum, p) => sum + safeDirSize(p), 0) : 0; + + let cachedEnough = false; + if (isRemote) { + cachedEnough = true; + } else { + cachedEnough = presetSize + ? initialDownloaded >= presetSize * 0.9 + : initialDownloaded > 50 * 1024 * 1024; // 没有 size 时粗判>50MB + } + + this.modelCachePreDownloaded = cachedEnough; + this.shouldReportProgress = !cachedEnough; + + // Large 模型默认不使用量化,精度更高 + const isLargeModel = this.modelName.toLowerCase().includes('large'); + const useQuantize = this.modelPreset?.quantize !== false && !isLargeModel; + + // 如果检测到已有完整缓存,启用离线模式避免每次启动都联网检查版本 + // 对于远程模型,不需要离线模式 + const useOfflineMode = !isRemote && this.modelCacheFound && cachedEnough; + + const env = { + ...process.env, + ASR_ENGINE: this.engine, + ASR_MODEL: this.modelName, + ASR_HOST: this.serverHost, + ASR_PORT: String(this.serverPort), + ASR_QUANTIZE: useQuantize ? 'true' : 'false', + HF_HOME: process.env.HF_HOME || path.join(app.getPath('userData'), 'hf-home'), + ASR_CACHE_DIR: this.engine === 'funasr' ? msCacheEnv : cacheDir, + MODELSCOPE_CACHE: msCacheEnv, + MODELSCOPE_CACHE_HOME: msCacheEnv, + // 启用离线模式:跳过 ModelScope 版本检查,直接使用本地缓存 + MODELSCOPE_OFFLINE: useOfflineMode ? '1' : '', + HF_HUB_OFFLINE: useOfflineMode ? '1' : '', + PYTHONUNBUFFERED: '1', + PYTHONIOENCODING: 'utf-8', + // API Key for Cloud Mode + SILICONFLOW_API_KEY: process.env.SILICONFLOW_API_KEY || '', + BAIDU_APP_ID: process.env.BAIDU_APP_ID || '', + BAIDU_API_KEY: process.env.BAIDU_API_KEY || '', + BAIDU_SECRET_KEY: process.env.BAIDU_SECRET_KEY || '' + }; + + if (useOfflineMode) { + logger.log(`[ASR] Offline mode enabled: using local cache without version check`); } - // 等待一小段时间确保端口完全释放 - await delay(500); - - const modelCacheDir = this.resolveModelCacheDir(this.modelName); - - const args = [ - '-m', - 'whisperlivekit.basic_server', - '--model', - this.modelName, - '--model_cache_dir', - modelCacheDir, - '--language', - this.language, - '--host', - this.serverHost, - '--port', - String(this.serverPort), - '--backend-policy', - this.backendPolicy, - '--backend', - 'faster-whisper', - '--pcm-input', - ]; - - logger.log(`[WhisperLiveKit] Spawning server: ${this.pythonPath} ${args.join(' ')}`); - - // 使用 Promise 来跟踪服务器启动过程中的崩溃 - const serverStartPromise = new Promise((resolve, reject) => { - this.wlkProcess = spawn(this.pythonPath, args, { - env: { - ...process.env, - PYTHONUNBUFFERED: '1', - }, - }); - - let startupComplete = false; + ensureDir(env.HF_HOME); + ensureDir(cacheDir); + ensureDir(env.MODELSCOPE_CACHE); + cleanModelScopeLocks(env.MODELSCOPE_CACHE); - this.wlkProcess.stdout.on('data', (data) => { - logger.log(`[WhisperLiveKit][stdout] ${data.toString().trim()}`); - }); + if (useBinary) { + logger.log(`[ASR] Spawning packaged backend: ${backendEntry}`); + this.serverProcess = spawn(backendEntry, [], { env }); + } else { + logger.log(`[ASR] Spawning FastAPI backend: ${this.pythonPath} ${backendEntry} (engine=${this.engine}, model=${this.modelName}, port=${this.serverPort})`); + this.serverProcess = spawn(this.pythonPath, [backendEntry], { env }); + } - this.wlkProcess.stderr.on('data', (data) => { - logger.log(`[WhisperLiveKit][stderr] ${data.toString().trim()}`); - }); + logger.log(`[ASR] cache dir: ${cacheDir} (found=${found})`); - this.wlkProcess.on('close', (code) => { - logger.warn(`[WhisperLiveKit] Server exited with code ${code}`); - this.wlkProcess = null; - this.isInitialized = false; + this.serverProcess.stdout.on('data', (data) => { + const text = data.toString(); + logger.log(`[ASR Backend][stdout] ${text.trim()}`); + if (text.includes(SERVER_READY_TEXT) || text.includes('Uvicorn running')) { + this.serverReady = true; + } + }); - // 如果服务器在启动阶段就崩溃了,reject promise - if (!startupComplete) { - reject(new Error(`Server crashed during startup with code ${code}`)); - return; + this.serverProcess.stderr.on('data', (data) => { + const text = data.toString(); + const lines = text.split('\n'); + for (const rawLine of lines) { + const line = rawLine.trim(); + if (!line) continue; + logger.log(`[ASR Backend][stderr] ${line}`); + + // 简单关键字检测:只要出现 Downloading Model 视为正在下载 + if (!this.isDownloading && line.includes('Downloading Model')) { + this.isDownloading = true; } + // 下载完成/就绪的关键字,清除下载标记 + if ( + this.isDownloading && + (line.includes('All models loaded successfully') + || line.includes('Worker is READY') + || line.includes('Worker is READY!') + || line.includes('Received READY signal')) + ) { + this.isDownloading = false; + } + } + }); - // 如果是启动后崩溃,通知上层 + this.serverProcess.on('close', (code, signal) => { + const isExpectedStop = this.isStopping || code === 0; + this.isDownloading = false; + if (isExpectedStop) { + logger.log(`[ASR Backend] exited normally (code=${code}, signal=${signal ?? 'none'})`); + } else { + logger.error(`[ASR Backend] exited with code ${code}, signal=${signal ?? 'none'}`); if (this.onServerCrash) { this.onServerCrash(code); } - }); - - this.wlkProcess.on('error', (error) => { - logger.error(`[WhisperLiveKit] Server process error:`, error); - if (!startupComplete) { - reject(error); - } - }); + } + this.serverProcess = null; + this.serverReady = false; + this.isStopping = false; + }); - // 标记启动阶段完成的函数 - this._markStartupComplete = () => { - startupComplete = true; - resolve(); - }; + this.serverProcess.on('error', (error) => { + logger.error('[ASR Backend] process error:', error); + this.serverProcess = null; + this.serverReady = false; + this.isDownloading = false; }); - // 并行等待:服务器就绪 或 服务器崩溃 try { - await Promise.race([ - this.waitForServerReady().then(() => { - if (this._markStartupComplete) { - this._markStartupComplete(); + await this.waitForHealth(); + return true; + } catch (err) { + // 启动失败时务必清理子进程,避免后台残留导致内存飙升 + try { + this.isStopping = true; + if (this.serverProcess?.pid) { + try { + treeKill(this.serverProcess.pid); + } catch { + this.serverProcess.kill(); } - }), - serverStartPromise - ]); - } catch (error) { - // 服务器启动失败,尝试重试 - this.serverStartRetries++; - if (this.serverStartRetries < this.maxServerRetries) { - logger.warn(`[WhisperLiveKit] Server start failed, retry ${this.serverStartRetries}/${this.maxServerRetries}...`); - await delay(2000); // 等待2秒后重试 - return this.startWhisperLiveKitServer(); + } + } catch { + // ignore + } finally { + this.serverProcess = null; + this.serverReady = false; + this.isStopping = false; } - throw new Error(`Server failed to start after ${this.maxServerRetries} retries: ${error.message}`); + throw err; } + })().finally(() => { + this._startPromise = null; + }); - this.serverStartRetries = 0; // 重置重试计数 + return await this._startPromise; } - async waitForServerReady(timeoutMs = 60000) { // 增加到60秒,处理模型下载 + async waitForHealth(timeoutMs = 180000) { // 默认 3 分钟,会在有下载进展时自动延长 const start = Date.now(); + const maxTimeoutMs = Math.max(timeoutMs, 15 * 60 * 1000); // 最长 15 分钟 + let deadline = start + timeoutMs; + let lastProgressSeenAt = start; + const url = `http://${this.serverHost}:${this.serverPort}/health`; + const totalSize = this.modelPreset?.sizeBytes || null; + const repoPaths = getRepoPathsForModel(this.modelPreset, this.modelCacheDir); + const calcDownloaded = () => { + if (!repoPaths || repoPaths.length === 0) return 0; + let downloaded = 0; + for (const p of repoPaths) { + downloaded += safeDirSize(p); + } + return downloaded; + }; - const tryRequest = () => new Promise((resolve, reject) => { - const req = http.get({ - hostname: this.serverHost, - port: this.serverPort, - path: '/', - timeout: 2000, - }, (res) => { - res.resume(); - resolve(); + // 预先发送一次进度,便于 UI 立即显示(仅首次下载时) + if (this.shouldReportProgress && totalSize && repoPaths.length > 0) { + const initialDownloaded = calcDownloaded(); + this.lastProgressBytes = initialDownloaded; + this.lastProgressTimestamp = Date.now(); + this.emitDownloadProgress({ + status: 'start', + downloadedBytes: initialDownloaded, + totalBytes: totalSize, + bytesPerSecond: 0, + activeDownload: true, }); + } - req.on('error', reject); - req.on('timeout', () => { - req.destroy(new Error('timeout')); - }); - }); + let lastReportAt = 0; + let lastLogAt = 0; - while (Date.now() - start < timeoutMs) { + while (Date.now() < deadline) { try { - await tryRequest(); - logger.log('[WhisperLiveKit] Server is ready'); - return; + // Node 20 has global fetch + const res = await fetch(url, { method: 'GET' }); + if (res.ok) { + const finalDownloaded = calcDownloaded(); + if (this.shouldReportProgress) { + this.emitDownloadProgress({ + status: 'complete', + downloadedBytes: finalDownloaded, + totalBytes: totalSize || finalDownloaded, + bytesPerSecond: 0, + activeDownload: false, + isDownloaded: true, + }); + } + this.isDownloading = false; + this.serverReady = true; + return true; + } } catch { - await delay(500); + // ignore } - } + const waited = Date.now() - start; + const now = Date.now(); + + // 进度上报(每秒一次,避免频繁扫描目录) + if (this.shouldReportProgress && now - lastReportAt >= 1000 && repoPaths.length > 0) { + const downloaded = calcDownloaded(); + const deltaBytes = downloaded - this.lastProgressBytes; + const deltaMs = now - this.lastProgressTimestamp || 1; + const bytesPerSecond = deltaMs > 0 ? deltaBytes / (deltaMs / 1000) : 0; + this.lastProgressBytes = downloaded; + this.lastProgressTimestamp = now; + lastReportAt = now; + + this.emitDownloadProgress({ + status: 'progress', + downloadedBytes: downloaded, + totalBytes: totalSize || downloaded, + bytesPerSecond, + activeDownload: true, + }); + if (deltaBytes > 0) { + lastProgressSeenAt = now; + // 有进展则滚动延时,最多不超过 maxTimeoutMs + const extended = Math.min(start + maxTimeoutMs, now + 120000); // 额外给 2 分钟窗口 + if (extended > deadline) { + deadline = extended; + } + } - throw new Error('WhisperLiveKit server failed to start within timeout'); + // 日志保持原有节奏(约 5s 一次) + if (now - lastLogAt >= 5000) { + let progressText = ''; + if (totalSize) { + const pct = Math.min(99, Math.max(0, Math.round((downloaded / totalSize) * 100))); + progressText = ` (approx ${pct}% of model cached)`; + } + logger.log(`[ASR] Waiting for backend health... ${Math.round(waited / 1000)}s elapsed${progressText}`); + lastLogAt = now; + } + } + await delay(500); + } + if (this.shouldReportProgress) { + this.emitDownloadProgress({ + status: 'error', + downloadedBytes: this.lastProgressBytes, + totalBytes: totalSize || this.lastProgressBytes, + activeDownload: false, + message: 'FastAPI backend health check timeout', + }); + } + throw new Error('FastAPI backend health check timeout'); } - createSession(sourceId) { - const wsUrl = `ws://${this.serverHost}:${this.serverPort}/asr`; - const session = new WhisperLiveKitSession({ - sourceId, - wsUrl, - onSentence: (result) => { - if (this.onSentenceComplete) { - this.onSentenceComplete(result); - } - }, - onPartial: (result) => { - if (this.onPartialResult) { - this.onPartialResult(result); - } - }, - }); - this.sessions.set(sourceId, session); - return session; + getDownloadStatus() { + return { downloading: this.isDownloading }; } getSession(sourceId) { if (this.sessions.has(sourceId)) { return this.sessions.get(sourceId); } - return this.createSession(sourceId); - } - detectSilence(audioData) { - let sum = 0; - for (let i = 0; i < audioData.length; i += 1) { - sum += Math.abs(audioData[i]); - } - const average = sum / audioData.length; - return average < 0.0015; + const wsUrl = `ws://${this.serverHost}:${this.serverPort}/ws/transcribe?session_id=${encodeURIComponent(sourceId)}`; + const ws = new WebSocket(wsUrl); + + const session = new Promise((resolve, reject) => { + ws.on('open', () => { + const s = new FastAPISession(ws, sourceId, this.onSentenceComplete, this.onPartialResult); + resolve(s); + }); + ws.on('error', (err) => { + this.sessions.delete(sourceId); + reject(err); + }); + ws.on('close', () => { + this.sessions.delete(sourceId); + }); + }); + + this.sessions.set(sourceId, session); + return session; } async addAudioChunk(audioData, timestamp, sourceId = 'default') { if (!this.isInitialized) { - throw new Error('WhisperLiveKit service not initialized'); + throw new Error('ASR service not initialized'); } - if (!audioData || audioData.length === 0) { return null; } - - if (this.detectSilence(audioData)) { - return null; + if (!this.serverReady) { + await this.waitForHealth(); } - const session = this.getSession(sourceId); + const sessionPromise = this.getSession(sourceId); + const session = await sessionPromise; const buffer = float32ToInt16Buffer(audioData); session.sendAudio(buffer); - - // 识别结果通过回调异步返回 return null; } async sendResetCommand(sessionId) { - const session = this.sessions.get(sessionId); - if (session) { - session.reset(); - } + const sessionPromise = this.sessions.get(sessionId); + if (!sessionPromise) return; + const session = await sessionPromise; + session.reset(); } - async forceCommitSentence() { - // WhisperLiveKit 内部已经处理分句,额外的强制提交不需要 - return false; + async forceCommitSentence(sessionId) { + const sessionPromise = this.sessions.get(sessionId); + if (!sessionPromise) return false; + const session = await sessionPromise; + session.sendControl({ type: 'force_commit' }); + return true; } async commitSentence() { - // WhisperLiveKit 没有单独的提交接口,返回 null 以保持兼容 return null; } setSentenceCompleteCallback(callback) { this.onSentenceComplete = callback; - this.sessions.forEach((session) => session.setSentenceCallback(callback)); + this.sessions.forEach(async (sessionPromise) => { + try { + const session = await sessionPromise; + session.setCallbacks(this.onSentenceComplete, this.onPartialResult); + } catch { + // ignore + } + }); } setPartialResultCallback(callback) { this.onPartialResult = callback; - this.sessions.forEach((session) => session.setPartialCallback(callback)); + this.sessions.forEach(async (sessionPromise) => { + try { + const session = await sessionPromise; + session.setCallbacks(this.onSentenceComplete, this.onPartialResult); + } catch { + // ignore + } + }); } async stop() { - this.sessions.forEach((session) => session.close()); + for (const sessionPromise of this.sessions.values()) { + try { + const session = await sessionPromise; + session.close(); + } catch { + // ignore + } + } this.sessions.clear(); } async destroy() { await this.stop(); - if (this.wlkProcess) { - this.wlkProcess.kill(); - this.wlkProcess = null; + if (this.serverProcess) { + this.isStopping = true; + try { + treeKill(this.serverProcess.pid); + } catch { + this.serverProcess.kill(); + } + this.serverProcess = null; } + this.serverReady = false; this.isInitialized = false; } @@ -948,50 +568,17 @@ class ASRService { const filename = `${recordId}_${sourceId}.wav`; const conversationDir = path.join(this.audioStoragePath, conversationId); - if (!fs.existsSync(conversationDir)) { - fs.mkdirSync(conversationDir, { recursive: true }); - } + ensureDir(conversationDir); const filepath = path.join(conversationDir, filename); const float32Array = audioData instanceof Float32Array ? audioData : new Float32Array(audioData); - const wavBuffer = this.createWavBuffer(float32Array); + const wavBuffer = createWavBuffer(float32Array); fs.writeFileSync(filepath, wavBuffer); return filepath; } - createWavBuffer(audioData) { - const numChannels = 1; - const bitsPerSample = 16; - const bytesPerSample = bitsPerSample / 8; - const blockAlign = numChannels * bytesPerSample; - const dataLength = audioData.length * bytesPerSample; - const buffer = Buffer.alloc(44 + dataLength); - - buffer.write('RIFF', 0); - buffer.writeUInt32LE(36 + dataLength, 4); - buffer.write('WAVE', 8); - buffer.write('fmt ', 12); - buffer.writeUInt32LE(16, 16); - buffer.writeUInt16LE(1, 20); - buffer.writeUInt16LE(numChannels, 22); - buffer.writeUInt32LE(PCM_SAMPLE_RATE, 24); - buffer.writeUInt32LE(PCM_SAMPLE_RATE * blockAlign, 28); - buffer.writeUInt16LE(blockAlign, 32); - buffer.writeUInt16LE(bitsPerSample, 34); - buffer.write('data', 36); - buffer.writeUInt32LE(dataLength, 40); - - for (let i = 0; i < audioData.length; i += 1) { - const sample = Math.max(-1, Math.min(1, audioData[i])); - const int16 = sample < 0 ? sample * 0x8000 : sample * 0x7FFF; - buffer.writeInt16LE(int16, 44 + i * 2); - } - - return buffer; - } - clearContext() { - // WhisperLiveKit 自带上下文管理,不需要额外处理 + // handled server side } } diff --git a/desktop/src/asr/audio-capture-service.js b/desktop/src/asr/audio-capture-service.js index 4c7a0d8..2514aed 100644 --- a/desktop/src/asr/audio-capture-service.js +++ b/desktop/src/asr/audio-capture-service.js @@ -24,6 +24,17 @@ class AudioCaptureService { this.silenceSkipCount = new Map(); // sourceId -> 连续跳过静音的次数 this.maxSilenceSkipLog = 5; // 最多打印几次静音跳过日志 + // 【断句】基于静音时长的分句(用于生成多条消息) + // 注意:这是“停顿时长阈值”(秒/毫秒),不同于上面的能量阈值 silenceThreshold + this.sentencePauseThresholdMs = 600; // 默认更灵敏(0.6s),会自动从 ASR 默认配置刷新 + this._vadConfigLastRefreshAt = 0; + this.enableSilenceSentenceCommit = false; // 仅云端 ASR 启用,FunASR 不受影响 + this.shouldSkipSilence = true; // 是否在本地跳过静音包(百度需要设为 false 以防 -3101) + this.silenceDurationMs = new Map(); // sourceId -> 连续静音累计时长(ms),仅在 inSpeech=true 时累积 + this.inSpeech = new Map(); // sourceId -> 是否处于“说话段”中(只要发送过非静音音频即认为进入) + this.lastSilenceCommitAt = new Map(); // sourceId -> 上次触发断句的时间戳(ms) + this.silenceCommitCooldownMs = 500; // 防抖,避免同一段静音重复触发 + // 音频数据累积 this.audioAccumulators = new Map(); // sourceId -> Float32Array this.lastSendTime = new Map(); // sourceId -> timestamp @@ -91,6 +102,8 @@ class AudioCaptureService { await this.initialize(); } + await this.refreshVadConfigFromASRDefault(); + // 如果已经在捕获,先停止 if (this.streams.has(sourceId)) { await this.stopCapture(sourceId); @@ -135,6 +148,8 @@ class AudioCaptureService { await this.initialize(); } + await this.refreshVadConfigFromASRDefault(); + // 如果已经在捕获,先停止 if (this.streams.has(sourceId)) { await this.stopCapture(sourceId); @@ -314,6 +329,8 @@ class AudioCaptureService { // 初始化音频累积器 this.audioAccumulators.set(sourceId, new Float32Array()); this.lastSendTime.set(sourceId, Date.now()); + this.silenceDurationMs.set(sourceId, 0); + this.inSpeech.set(sourceId, false); // 设置音频处理回调 scriptProcessor.onaudioprocess = (event) => { @@ -393,7 +410,32 @@ class AudioCaptureService { } // 【VAD】静音检测 - 跳过静音数据,避免 ASR 模型产生幻觉 - if (this.isSilence(accumulator)) { + // 注意:如果 shouldSkipSilence 为 false(如百度模式),则不跳过,以防服务端超时 + if (this.shouldSkipSilence && this.isSilence(accumulator)) { + // 【断句】若之前处于说话状态,则累计静音时长;超过阈值触发“分句提交” + const wasInSpeech = this.enableSilenceSentenceCommit && !!this.inSpeech.get(sourceId); + if (wasInSpeech) { + const chunkDurationMs = (accumulator.length / this.sampleRate) * 1000; + const prev = this.silenceDurationMs.get(sourceId) || 0; + const next = prev + chunkDurationMs; + this.silenceDurationMs.set(sourceId, next); + + const pauseMs = this.sentencePauseThresholdMs || 600; + const lastCommitAt = this.lastSilenceCommitAt.get(sourceId) || 0; + const canCommit = timestamp - lastCommitAt >= this.silenceCommitCooldownMs; + if (next >= pauseMs && canCommit) { + this.lastSilenceCommitAt.set(sourceId, timestamp); + // 触发主进程的“静音断句提交”(commitCurrentSegment + force_commit) + if (window.electronAPI && typeof window.electronAPI.send === 'function') { + window.electronAPI.send('asr-silence-commit', { sourceId, timestamp, pauseMs }); + console.log(`[AudioCaptureService] 🧩 Silence commit triggered for ${sourceId} (silence=${Math.round(next)}ms >= ${pauseMs}ms)`); + } + // 断句后认为当前说话段结束,等待下一次非静音重新进入说话段 + this.inSpeech.set(sourceId, false); + this.silenceDurationMs.set(sourceId, 0); + } + } + // 清空累积器,避免累积 this.audioAccumulators.set(sourceId, new Float32Array()); this.lastSendTime.set(sourceId, timestamp); @@ -413,6 +455,14 @@ class AudioCaptureService { this.silenceSkipCount.set(sourceId, 0); } + // 【断句】进入说话段/重置静音累计 + if (this.enableSilenceSentenceCommit) { + if (!this.inSpeech.get(sourceId)) { + this.inSpeech.set(sourceId, true); + } + this.silenceDurationMs.set(sourceId, 0); + } + // 音频归一化处理 const normalizedAudio = this.normalizeAudio(accumulator); @@ -524,6 +574,9 @@ class AudioCaptureService { this.audioAccumulators.delete(sourceId); this.lastSendTime.delete(sourceId); + this.silenceDurationMs.delete(sourceId); + this.inSpeech.delete(sourceId); + this.lastSilenceCommitAt.delete(sourceId); console.log(`[AudioCaptureService] ✅ Capture stopped for ${sourceId}`); @@ -606,6 +659,50 @@ class AudioCaptureService { this.stopAllCaptures(); console.log('[AudioCaptureService] Destroyed'); } + + /** + * 从 ASR 默认配置刷新“停顿阈值”(用于静音断句) + * - 仅用于渲染进程侧断句(不影响后端模型 VAD) + */ + async refreshVadConfigFromASRDefault(force = false) { + try { + const api = window.electronAPI; + if (!api?.asrGetConfigs) { + return; + } + + const now = Date.now(); + if (!force && this._vadConfigLastRefreshAt && now - this._vadConfigLastRefreshAt < 5000) { + return; + } + this._vadConfigLastRefreshAt = now; + + const configs = await api.asrGetConfigs(); + const defaultConfig = configs?.find((c) => c?.is_default === 1) || configs?.[0]; + const modelName = String(defaultConfig?.model_name || ''); + // 仅云端 ASR 启用“静音断句生成多条消息”,避免影响 FunASR + // 注意:百度 WebSocket 自带断句,不再由前端干预,避免 1005 错误 + this.enableSilenceSentenceCommit = modelName.includes('cloud') && !modelName.includes('baidu'); + + // 对于百度,我们【不要】在本地跳过静音包。 + // 因为百度服务端如果超过 10s-20s 收不到音频包,会报 -3101 超时错误。 + // 我们把所有数据(包括静音)都发给百度,让百度强大的服务端 VAD 去处理。 + this.shouldSkipSilence = !modelName.includes('baidu'); + + const pauseSecRaw = Number(defaultConfig?.sentence_pause_threshold); + if (!Number.isFinite(pauseSecRaw) || pauseSecRaw <= 0) { + return; + } + // 允许更低的阈值,但给一个安全下限,避免 0 导致频繁断句 + const pauseMs = Math.max(250, Math.round(pauseSecRaw * 1000)); + if (pauseMs !== this.sentencePauseThresholdMs) { + this.sentencePauseThresholdMs = pauseMs; + console.log(`[AudioCaptureService] Updated sentencePauseThresholdMs=${pauseMs}ms (enableSilenceSentenceCommit=${this.enableSilenceSentenceCommit}) from ASR config (model=${modelName}, sentence_pause_threshold=${pauseSecRaw}s)`); + } + } catch (err) { + console.warn('[AudioCaptureService] Failed to refresh VAD config from ASR settings:', err); + } + } } // 导出单例 diff --git a/desktop/src/asr/audio-utils.js b/desktop/src/asr/audio-utils.js new file mode 100644 index 0000000..9843e39 --- /dev/null +++ b/desktop/src/asr/audio-utils.js @@ -0,0 +1,55 @@ +import fs from 'fs'; +import path from 'path'; +import { app } from 'electron'; + +const PCM_SAMPLE_RATE = 16000; + +export function float32ToInt16Buffer(floatArray) { + const int16Array = new Int16Array(floatArray.length); + for (let i = 0; i < floatArray.length; i += 1) { + const sample = Math.max(-1, Math.min(1, floatArray[i])); + int16Array[i] = sample < 0 ? sample * 0x8000 : sample * 0x7FFF; + } + return Buffer.from(int16Array.buffer); +} + +export function ensureDir(dirPath) { + if (!dirPath) return; + fs.mkdirSync(dirPath, { recursive: true }); +} + +export function createWavBuffer(audioData) { + const numChannels = 1; + const bitsPerSample = 16; + const bytesPerSample = bitsPerSample / 8; + const blockAlign = numChannels * bytesPerSample; + const dataLength = audioData.length * bytesPerSample; + const buffer = Buffer.alloc(44 + dataLength); + + buffer.write('RIFF', 0); + buffer.writeUInt32LE(36 + dataLength, 4); + buffer.write('WAVE', 8); + buffer.write('fmt ', 12); + buffer.writeUInt32LE(16, 16); + buffer.writeUInt16LE(1, 20); + buffer.writeUInt16LE(numChannels, 22); + buffer.writeUInt32LE(PCM_SAMPLE_RATE, 24); + buffer.writeUInt32LE(PCM_SAMPLE_RATE * blockAlign, 28); + buffer.writeUInt16LE(blockAlign, 32); + buffer.writeUInt16LE(bitsPerSample, 34); + buffer.write('data', 36); + buffer.writeUInt32LE(dataLength, 40); + + for (let i = 0; i < audioData.length; i += 1) { + const sample = Math.max(-1, Math.min(1, audioData[i])); + const int16 = sample < 0 ? sample * 0x8000 : sample * 0x7FFF; + buffer.writeInt16LE(int16, 44 + i * 2); + } + + return buffer; +} + +export function defaultAudioStoragePath() { + return path.join(app.getPath('temp'), 'asr'); +} + diff --git a/desktop/src/asr/fastapi-session.js b/desktop/src/asr/fastapi-session.js new file mode 100644 index 0000000..1acf44f --- /dev/null +++ b/desktop/src/asr/fastapi-session.js @@ -0,0 +1,77 @@ +import WebSocket from 'ws'; +import * as logger from '../utils/logger.js'; + +class FastAPISession { + constructor(ws, sourceId, onSentence, onPartial) { + this.ws = ws; + this.sourceId = sourceId; + this.onSentence = onSentence; + this.onPartial = onPartial; + this.bind(); + } + + setCallbacks(onSentence, onPartial) { + this.onSentence = onSentence; + this.onPartial = onPartial; + } + + bind() { + this.ws.on('message', (data) => { + try { + const payload = JSON.parse(data.toString()); + if (!payload) return; + if (payload.type === 'sentence_complete' && this.onSentence) { + this.onSentence({ + sessionId: payload.session_id || this.sourceId, + text: payload.text, + timestamp: payload.timestamp, + trigger: payload.trigger || 'asr', + audioDuration: payload.audio_duration, + language: payload.language, + isSegmentEnd: payload.isSegmentEnd || payload.is_segment_end, + sentenceIndex: payload.sentence_index, + totalSentences: payload.total_sentences, + rawText: payload.raw_text, + startTime: payload.start_time, + endTime: payload.end_time, + }); + } else if (payload.type === 'partial' && this.onPartial) { + this.onPartial({ + sessionId: payload.session_id || this.sourceId, + partialText: payload.text, + fullText: payload.full_text, + timestamp: payload.timestamp, + isSpeaking: true, + }); + } + } catch (error) { + logger.warn('[ASR][WS] Failed to parse message:', error); + } + }); + } + + sendAudio(buffer) { + if (this.ws.readyState === WebSocket.OPEN) { + this.ws.send(buffer); + } + } + + sendControl(payload) { + if (this.ws.readyState === WebSocket.OPEN) { + this.ws.send(JSON.stringify(payload)); + } + } + + reset() { + this.sendControl({ type: 'reset_session' }); + } + + close() { + if (this.ws && (this.ws.readyState === WebSocket.OPEN || this.ws.readyState === WebSocket.CONNECTING)) { + this.ws.close(); + } + } +} + +export default FastAPISession; + diff --git a/desktop/src/asr/funasr-asr-service.js b/desktop/src/asr/funasr-asr-service.js index cd4775b..cfc848b 100644 --- a/desktop/src/asr/funasr-asr-service.js +++ b/desktop/src/asr/funasr-asr-service.js @@ -36,12 +36,26 @@ class FunASRService { } const __dirname = path.dirname(fileURLToPath(import.meta.url)); - this.workerScriptPath = path.join(__dirname, 'asr_funasr_worker.py'); + // Worker 脚本在 backend/asr/ 目录下 + const projectRoot = path.resolve(__dirname, '../..'); + this.workerScriptPath = this.resolveAsarUnpacked( + path.join(projectRoot, 'backend', 'asr', 'asr_funasr_worker.py') + ); logger.log(`[FunASR] Python path: ${this.pythonPath}`); logger.log(`[FunASR] Worker script: ${this.workerScriptPath}`); } + /** + * asar 场景下,Python 进程无法直接读取 asar 内部文件,需要使用解包路径 + */ + resolveAsarUnpacked(targetPath) { + if (!targetPath) return targetPath; + return targetPath.includes('app.asar') + ? targetPath.replace('app.asar', 'app.asar.unpacked') + : targetPath; + } + setServerCrashCallback(callback) { this.onServerCrash = callback; } @@ -51,19 +65,29 @@ class FunASRService { if (envPython && fs.existsSync(envPython)) { return envPython; } - const projectRoot = path.resolve(app.getAppPath(), app.isPackaged ? '../..' : '.'); - const venvPython = path.join(projectRoot, '.venv', 'bin', 'python'); + + // 优先使用打包内置的 Python(extraResources/python-env) + const resourcesPath = process.resourcesPath; + if (resourcesPath) { + const bundledPython = process.platform === 'win32' + ? path.join(resourcesPath, 'python-env', 'Scripts', 'python.exe') + : path.join(resourcesPath, 'python-env', 'bin', 'python3'); + if (fs.existsSync(bundledPython)) { + return bundledPython; + } + } + + // 开发环境或回退:项目根下的 python-env/.venv + const projectRoot = path.resolve(app.getAppPath(), app.isPackaged ? '..' : '.'); + const venvPython = path.join(projectRoot, 'python-env', process.platform === 'win32' ? 'Scripts' : 'bin', process.platform === 'win32' ? 'python.exe' : 'python3'); if (fs.existsSync(venvPython)) { return venvPython; } - // Windows 下可能是 python.exe - if (process.platform === 'win32') { - const venvPythonWin = path.join(projectRoot, '.venv', 'Scripts', 'python.exe'); - if (fs.existsSync(venvPythonWin)) { - return venvPythonWin; - } + const legacyVenv = path.join(projectRoot, '.venv', process.platform === 'win32' ? 'Scripts' : 'bin', process.platform === 'win32' ? 'python.exe' : 'python3'); + if (fs.existsSync(legacyVenv)) { + return legacyVenv; } - return 'python3'; + return process.platform === 'win32' ? 'python' : 'python3'; } async initialize(modelName = 'funasr-paraformer', options = {}) { diff --git a/desktop/src/asr/model-cache.js b/desktop/src/asr/model-cache.js new file mode 100644 index 0000000..f68e129 --- /dev/null +++ b/desktop/src/asr/model-cache.js @@ -0,0 +1,199 @@ +import fs from 'fs'; +import os from 'os'; +import path from 'path'; +import { app } from 'electron'; +import * as logger from '../utils/logger.js'; +import { getAsrModelPreset } from '../shared/asr-models.js'; + +export function safeDirSize(targetPath) { + try { + const stat = fs.statSync(targetPath, { throwIfNoEntry: false }); + if (!stat) return 0; + if (stat.isFile()) return stat.size; + if (!stat.isDirectory()) return 0; + let total = 0; + const stack = [targetPath]; + while (stack.length) { + const dir = stack.pop(); + const entries = fs.readdirSync(dir, { withFileTypes: true }); + for (const entry of entries) { + const full = path.join(dir, entry.name); + if (entry.isFile()) { + try { + total += fs.statSync(full).size; + } catch { + // ignore stat errors + } + } else if (entry.isDirectory()) { + stack.push(full); + } + } + } + return total; + } catch { + return 0; + } +} + +export function getRepoPathsForModel(preset, cacheDir) { + const paths = []; + if (!preset || !cacheDir) return paths; + + if (preset.repoId) { + const repoSafe = `models--${preset.repoId.replace(/\//g, '--')}`; + paths.push(path.join(cacheDir, repoSafe)); + } + if (preset.modelScopeRepoId) { + paths.push(path.join(cacheDir, 'models', preset.modelScopeRepoId)); + paths.push(path.join(cacheDir, preset.modelScopeRepoId)); + paths.push(path.join(os.homedir(), '.cache', 'modelscope', 'hub', 'models', preset.modelScopeRepoId)); + paths.push(path.join(os.homedir(), '.cache', 'modelscope', 'hub', preset.modelScopeRepoId)); + } + + if (preset.onnxModels) { + const modelDirs = Array.from(new Set(Object.values(preset.onnxModels).filter(Boolean))); + modelDirs.forEach((modelDir) => { + paths.push(path.join(cacheDir, modelDir)); + paths.push(path.join(cacheDir, 'models', modelDir)); + }); + } + return paths; +} + +export function cleanModelScopeLocks(cacheDir, maxAgeMs = 10 * 60 * 1000) { + if (!cacheDir) return; + const lockDir = path.join(cacheDir, '.lock'); + try { + const entries = fs.readdirSync(lockDir, { withFileTypes: true }); + const now = Date.now(); + entries.forEach((entry) => { + if (!entry.isFile()) return; + const full = path.join(lockDir, entry.name); + try { + const stat = fs.statSync(full); + if (stat.mtimeMs < now - maxAgeMs) { + fs.unlinkSync(full); + logger.log(`[ASR] Removed stale ModelScope lock: ${entry.name}`); + } + } catch { + // ignore + } + }); + } catch { + // ignore if lock dir missing + } +} + +function getModelCacheCandidates() { + const homeDir = os.homedir(); + const userDataDir = app.getPath('userData'); + const msEnv = process.env.MODELSCOPE_CACHE || process.env.MODELSCOPE_CACHE_HOME; + const msBase = msEnv && path.basename(msEnv).toLowerCase() === 'hub' ? path.dirname(msEnv) : msEnv; + const msHub = msBase ? path.join(msBase, 'hub') : (msEnv && path.basename(msEnv).toLowerCase() === 'hub' ? msEnv : null); + const appMsBase = path.join(userDataDir, 'asr-cache', 'modelscope'); + const appMsHub = path.join(appMsBase, 'hub'); + + return [ + msHub, + msBase, + process.env.ASR_CACHE_DIR, + process.env.HF_HOME ? path.join(process.env.HF_HOME, 'hub') : null, + appMsHub, // model-manager.js 默认下载位置(ModelScope hub) + appMsBase, // model-manager.js 默认下载位置(ModelScope base) + path.join(userDataDir, 'hf-home', 'hub'), + path.join(userDataDir, 'ms-cache'), + homeDir ? path.join(homeDir, '.cache', 'huggingface', 'hub') : null, + homeDir ? path.join(homeDir, '.cache', 'modelscope', 'hub') : null, + ].filter(Boolean); +} + +export function resolveModelCache(modelName) { + const preset = getAsrModelPreset(modelName); + const repoId = preset?.repoId || (typeof modelName === 'string' && modelName.includes('/') ? modelName : null); + const repoSafe = repoId ? `models--${repoId.replace(/\//g, '--')}` : null; + const msRepoId = preset?.modelScopeRepoId; + const candidates = getModelCacheCandidates(); + + for (const candidate of candidates) { + try { + if (repoSafe && fs.existsSync(path.join(candidate, repoSafe))) { + return { cacheDir: candidate, found: true }; + } + if (msRepoId && fs.existsSync(path.join(candidate, 'models', msRepoId))) { + return { cacheDir: candidate, found: true }; + } + } catch { + // ignore and continue + } + } + + if (msRepoId) { + const msDefault = path.join(os.homedir(), '.cache', 'modelscope', 'hub'); + if (fs.existsSync(path.join(msDefault, 'models', msRepoId))) { + return { cacheDir: msDefault, found: true }; + } + } + + return { cacheDir: candidates[0] || path.join(app.getPath('userData'), 'hf-home', 'hub'), found: false }; +} + +export function resolveFunasrModelScopeCache(preset) { + if (!preset?.onnxModels) { + return null; + } + const modelDirs = Array.from(new Set(Object.values(preset.onnxModels).filter(Boolean))); + + const systemMsCache = path.join(os.homedir(), '.cache', 'modelscope', 'hub'); + try { + let systemHit = false; + let systemBytes = 0; + for (const dir of modelDirs) { + const p1 = path.join(systemMsCache, dir); + const p2 = path.join(systemMsCache, 'models', dir); + if (fs.existsSync(p1)) { + systemHit = true; + systemBytes += safeDirSize(p1); + } else if (fs.existsSync(p2)) { + systemHit = true; + systemBytes += safeDirSize(p2); + } + } + if (systemHit && systemBytes > 0) { + return { cacheDir: systemMsCache, found: true }; + } + } catch { + // ignore and continue + } + + const candidates = getModelCacheCandidates(); + let best = null; + let bestBytes = -1; + for (const candidate of candidates) { + if (candidate === systemMsCache) continue; + + try { + let hit = false; + let bytes = 0; + for (const dir of modelDirs) { + const p1 = path.join(candidate, dir); + const p2 = path.join(candidate, 'models', dir); + if (fs.existsSync(p1)) { + hit = true; + bytes += safeDirSize(p1); + } else if (fs.existsSync(p2)) { + hit = true; + bytes += safeDirSize(p2); + } + } + if (hit && bytes > bestBytes) { + best = { cacheDir: candidate, found: true }; + bestBytes = bytes; + } + } catch { + // ignore and continue + } + } + if (best) return best; + + return { cacheDir: systemMsCache, found: false }; +} diff --git a/desktop/src/asr/model-manager.js b/desktop/src/asr/model-manager.js index 58bfdcb..d35525d 100644 --- a/desktop/src/asr/model-manager.js +++ b/desktop/src/asr/model-manager.js @@ -6,7 +6,20 @@ import { spawn } from 'child_process'; import { EventEmitter } from 'events'; import { ASR_MODEL_PRESETS, getAsrModelPreset } from '../shared/asr-models.js'; -const DOWNLOAD_SCRIPT = path.join(app.getAppPath(), 'scripts', 'download_asr_model.py'); +const DOWNLOAD_FUNASR_SCRIPT = path.join(app.getAppPath(), 'scripts', 'download_funasr_model.py'); + +function normalizeModelScopeCache(cachePath) { + if (!cachePath) { + return { base: null, hub: null }; + } + const normalized = path.resolve(cachePath); + // ModelScope 的默认目录结构通常是: /hub/... + // 但历史上我们也可能把 env 直接设成了 ".../hub"。这里做兼容归一化。 + if (path.basename(normalized).toLowerCase() === 'hub') { + return { base: path.dirname(normalized), hub: normalized }; + } + return { base: normalized, hub: path.join(normalized, 'hub') }; +} function safeReaddir(targetPath) { try { @@ -83,12 +96,30 @@ function getModelScopeRepoPath(cacheDir, repoId) { export default class ASRModelManager extends EventEmitter { constructor() { super(); - // Primary cache directory (app-specific or configured) - this.cacheDir = process.env.ASR_CACHE_DIR - || (process.env.HF_HOME ? path.join(process.env.HF_HOME, 'hub') : path.join(app.getPath('userData'), 'hf-home', 'hub')); - fs.mkdirSync(this.cacheDir, { recursive: true }); + // 应用级缓存根目录(可通过环境变量覆盖) + this.appCacheBase = process.env.ASR_CACHE_BASE || path.join(app.getPath('userData'), 'asr-cache'); + this.hfHome = process.env.HF_HOME || path.join(this.appCacheBase, 'hf-home'); + const msEnv = process.env.MODELSCOPE_CACHE || process.env.MODELSCOPE_CACHE_HOME; + const msNormalized = normalizeModelScopeCache(msEnv || path.join(this.appCacheBase, 'modelscope')); + this.msCacheBase = msNormalized.base; + this.msCacheHub = msNormalized.hub; + + // Primary cache directory (共享给 HF 默认 hub) + this.cacheDir = process.env.ASR_CACHE_DIR || path.join(this.hfHome, 'hub'); + try { + fs.mkdirSync(this.cacheDir, { recursive: true }); + fs.mkdirSync(this.hfHome, { recursive: true }); + if (this.msCacheBase) { + fs.mkdirSync(this.msCacheBase, { recursive: true }); + } + if (this.msCacheHub) { + fs.mkdirSync(this.msCacheHub, { recursive: true }); + } + } catch { + // ignore mkdir errors + } - // Also check system default HuggingFace cache (where faster-whisper actually downloads models) + // Also check system default HuggingFace cache (preexisting downloads) this.systemHfCache = path.join(os.homedir(), '.cache', 'huggingface', 'hub'); // And system default ModelScope cache this.systemMsCache = path.join(os.homedir(), '.cache', 'modelscope', 'hub'); @@ -96,6 +127,8 @@ export default class ASRModelManager extends EventEmitter { // List of cache directories to check (in priority order) this.cacheDirs = [ this.cacheDir, // App-configured cache + this.msCacheHub, // App ModelScope hub + this.msCacheBase, // App ModelScope base (兼容某些工具只写到 base) this.systemHfCache, // System default HF cache this.systemMsCache // System default ModelScope cache ].filter(dir => { @@ -115,10 +148,24 @@ export default class ASRModelManager extends EventEmitter { if (envPython && fs.existsSync(envPython)) { return envPython; } + const resourcesPath = process.resourcesPath; const projectRoot = app.isPackaged - ? path.join(process.resourcesPath, '..') + ? path.join(resourcesPath || app.getAppPath(), '..') : app.getAppPath(); + + // 优先使用打包内置的 python-env(extraResources) + const bundledPython = process.platform === 'win32' + ? path.join(resourcesPath || '', 'python-env', 'Scripts', 'python.exe') + : path.join(resourcesPath || '', 'python-env', 'bin', 'python3'); + + // 开发/调试:使用仓库下的 python-env/.venv + const repoPythonEnv = process.platform === 'win32' + ? path.join(projectRoot, 'python-env', 'Scripts', 'python.exe') + : path.join(projectRoot, 'python-env', 'bin', 'python3'); + const candidates = [ + bundledPython, + repoPythonEnv, path.join(projectRoot, '.venv', 'bin', 'python'), path.join(projectRoot, '.venv', 'Scripts', 'python.exe'), 'python3', @@ -217,12 +264,118 @@ export default class ASRModelManager extends EventEmitter { return null; } + /** + * 获取 FunASR ONNX 模型的状态 + * 这些模型由 funasr_onnx 库自己管理下载,缓存在 ~/.cache/modelscope/hub/ 目录 + */ + getFunASROnnxModelStatus(modelId, preset) { + const onnxModels = preset.onnxModels || {}; + const modelDirs = Object.values(onnxModels); + + // funasr_onnx 使用的缓存目录 + // 注意:ModelScope 有时会将模型放在 hub/models/ 下,有时直接在 hub/ 下 + const funasrCacheDirs = [ + path.join(os.homedir(), '.cache', 'modelscope', 'hub'), + path.join(os.homedir(), '.cache', 'modelscope', 'hub', 'models'), + this.msCache, + path.join(this.msCache, 'models'), + this.systemMsCache, + path.join(this.systemMsCache, 'models'), + ]; + + let totalDownloadedBytes = 0; + let modelsFound = 0; + let latestUpdatedAt = null; + let foundPaths = []; + + for (const modelDir of modelDirs) { + if (!modelDir) continue; + + // modelDir 格式: "damo/speech_fsmn_vad_zh-cn-16k-common-onnx" + for (const cacheDir of funasrCacheDirs) { + const modelPath = path.join(cacheDir, modelDir); + try { + if (fs.existsSync(modelPath)) { + const size = directorySize(modelPath); + totalDownloadedBytes += size; + modelsFound++; + foundPaths.push(modelPath); + + try { + const stat = fs.statSync(modelPath); + if (!latestUpdatedAt || stat.mtimeMs > latestUpdatedAt) { + latestUpdatedAt = stat.mtimeMs; + } + } catch { + // ignore + } + break; // Found this model, move to next + } + } catch { + // ignore + } + } + } + + const totalModels = modelDirs.length; + const isDownloaded = modelsFound >= totalModels; + + // 如果所有模型都找到了,使用第一个找到的路径作为快照路径 + const snapshotPath = foundPaths.length > 0 ? path.dirname(foundPaths[0]) : null; + + console.log(`[ASR ModelManager] FunASR ONNX Status for ${modelId}:`, { + modelsFound, + totalModels, + isDownloaded, + totalDownloadedBytes, + foundPaths: foundPaths.slice(0, 2), // 只打印前两个 + }); + + return { + modelId, + repoId: preset.repoId, + modelScopeRepoId: preset.modelScopeRepoId, + sizeBytes: preset.sizeBytes || 0, + downloadedBytes: totalDownloadedBytes, + isDownloaded, + snapshotPath, + updatedAt: latestUpdatedAt, + activeDownload: this.activeDownloads.has(modelId), + source: 'funasr_onnx', + // FunASR 特有信息 + onnxModelsFound: modelsFound, + onnxModelsTotal: totalModels, + }; + } + getModelStatus(modelId) { const preset = getAsrModelPreset(modelId); if (!preset) { return null; } + // 云端模型:无需下载,本地恒定可用(但依赖网络与 API) + if (preset.engine === 'siliconflow' || preset.isRemote) { + return { + modelId, + repoId: preset.repoId || null, + modelScopeRepoId: preset.modelScopeRepoId || null, + sizeBytes: 0, + downloadedBytes: 0, + isDownloaded: true, + snapshotPath: null, + updatedAt: Date.now(), + activeDownload: false, + source: 'remote' + }; + } + + // FunASR ONNX 模型特殊处理 + // 这些模型由 funasr_onnx 库自己管理下载,缓存在 ~/.cache/modelscope/hub/damo/ 目录 + if (preset.engine === 'funasr' && preset.onnxModels) { + return this.getFunASROnnxModelStatus(modelId, preset); + } + // Check HuggingFace cache const hfSnapshotPath = this.findSnapshotDir(preset); let hfDownloadedBytes = 0; @@ -238,7 +391,7 @@ export default class ASRModelManager extends EventEmitter { } // Check ModelScope cache - // ModelScope structure: cacheDir / repoId (e.g. gpustack/faster-whisper-medium) + // ModelScope structure: cacheDir / repoId (e.g. damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-onnx) // or sometimes cacheDir / repoId / .mv / ... // Simple check: cacheDir / repoId let msSnapshotPath = null; @@ -326,7 +479,7 @@ export default class ASRModelManager extends EventEmitter { } } - startDownload(modelId, source = 'huggingface') { + startDownload(modelId, source = 'huggingface', allowFallback = true) { if (this.activeDownloads.has(modelId)) { return { status: 'running' }; } @@ -334,37 +487,66 @@ export default class ASRModelManager extends EventEmitter { if (!preset) { throw new Error(`Unknown ASR model: ${modelId}`); } + + // 云端模型不需要下载 + if (preset.engine === 'siliconflow' || preset.isRemote) { + const status = this.getModelStatus(modelId); + this.broadcast('asr-model-download-complete', { + modelId, + repoId: preset.repoId, + status + }); + return { status: 'completed' }; + } + + // FunASR ONNX 模型由 funasr-onnx 库自己管理下载 + // 我们可以调用辅助脚本来触发这个下载过程 + if (preset.engine === 'funasr' && preset.onnxModels) { + const status = this.getFunASROnnxModelStatus(modelId, preset); + if (status.isDownloaded) { + console.log(`[ASR ModelManager] FunASR ONNX model ${modelId} is already downloaded`); + this.broadcast('asr-model-download-complete', { + modelId, + repoId: preset.repoId, + status, + }); + return { status: 'completed' }; + } + + // 未下载,继续执行,使用 download_funasr_model.py + console.log(`[ASR ModelManager] FunASR ONNX model ${modelId} will be downloaded via script`); + // 不返回 'auto-download',而是继续执行后续的 spawn 逻辑,但要替换 script 和 args + } + const pythonExecutable = this.pythonPath; if (!pythonExecutable) { throw new Error('Python executable not found'); } - const repoId = source === 'modelscope' && preset.modelScopeRepoId ? preset.modelScopeRepoId : preset.repoId; + const repoId = preset.modelScopeRepoId || preset.repoId; - console.log(`[ASR ModelManager] Starting download: modelId=${modelId}, source=${source}, repoId=${repoId}`); - console.log(`[ASR ModelManager] Python path: ${pythonExecutable}`); - console.log(`[ASR ModelManager] Download script: ${DOWNLOAD_SCRIPT}`); - - const jobs = Math.max(2, Math.min(8, Math.floor((os.cpus()?.length || 4) / 2)) || 2); + const scriptPath = DOWNLOAD_FUNASR_SCRIPT; const args = [ - DOWNLOAD_SCRIPT, + scriptPath, '--model-id', preset.id, - '--repo-id', - repoId, '--cache-dir', - this.cacheDir, - '--jobs', - String(jobs), - '--source', - source + this.msCacheBase || this.msCacheHub // FunASR 默认用 ModelScope 缓存 ]; + console.log(`[ASR ModelManager] Starting download: modelId=${modelId}, source=${source}, repoId=${repoId}`); + console.log(`[ASR ModelManager] Python path: ${pythonExecutable}`); + console.log(`[ASR ModelManager] Download script: ${scriptPath}`); console.log(`[ASR ModelManager] Spawn command: ${pythonExecutable} ${args.join(' ')}`); + const hfHomeEnv = this.hfHome || process.env.HF_HOME; + const msCacheEnv = this.msCacheBase || process.env.MODELSCOPE_CACHE; const env = { ...process.env, ASR_CACHE_DIR: this.cacheDir, + HF_HOME: hfHomeEnv, + MODELSCOPE_CACHE: msCacheEnv, + MODELSCOPE_CACHE_HOME: msCacheEnv, PYTHONIOENCODING: 'utf-8', }; const child = spawn(pythonExecutable, args, { env }); @@ -422,7 +604,7 @@ export default class ASRModelManager extends EventEmitter { status, }); } else { - console.error(`[ASR ModelManager] Download failed: modelId=${modelId}, code=${code}`); + console.error(`[ASR ModelManager] Download failed: modelId=${modelId}, code=${code}, source=${source}`); this.broadcast('asr-model-download-error', { modelId, repoId: repoId, @@ -461,16 +643,54 @@ export default class ASRModelManager extends EventEmitter { if (payload.totalBytes) { ctx.totalBytes = payload.totalBytes; } + if (payload.message) { + this.broadcast('asr-model-download-log', { + modelId: ctx.modelId, + repoId: ctx.repoId, + message: payload.message, + }); + } if (payload.snapshotRelativePath) { ctx.snapshotPath = path.isAbsolute(payload.snapshotRelativePath) ? payload.snapshotRelativePath : path.join(this.cacheDir, payload.snapshotRelativePath); + + // ModelScope 下载的落盘路径可能与 cacheDir 结构不同,尝试解析实际目录 + if (payload.source === 'modelscope') { + const resolvedMsPath = + getModelScopeRepoPath(this.cacheDir, ctx.repoId) || + getModelScopeRepoPath(this.msCache, ctx.repoId) || + getModelScopeRepoPath(this.systemMsCache, ctx.repoId) || + getModelScopeRepoPath(this.systemHfCache, ctx.repoId); + if (resolvedMsPath) { + ctx.snapshotPath = resolvedMsPath; + } + } + } + // 记录 downloads 目录的现有子目录,后续用于估算部分下载进度(HF 临时文件) + ctx.downloadsDir = path.join(this.cacheDir, 'downloads'); + try { + const baselineEntries = safeReaddir(ctx.downloadsDir).filter((entry) => entry.isDirectory()); + ctx.downloadsBaseDirs = new Set(baselineEntries.map((entry) => entry.name)); + } catch { + ctx.downloadsBaseDirs = null; } if (!ctx.timer) { ctx.timer = setInterval(() => this.emitProgress(ctx), 1000); } } else if (payload.event === 'completed') { + if (payload.localDir) { + ctx.snapshotPath = payload.localDir; + } this.emitProgress(ctx, true); + } else if (payload.event === 'warning') { + // 仅记录警告,避免在 HuggingFace 失败但可回退时直接报错 + this.broadcast('asr-model-download-log', { + modelId: ctx.modelId, + repoId: ctx.repoId, + message: payload.message || 'download warning', + traceback: payload.traceback, + }); } else if (payload.event === 'error') { this.broadcast('asr-model-download-error', { modelId: ctx.modelId, @@ -486,10 +706,26 @@ export default class ASRModelManager extends EventEmitter { } emitProgress(ctx, force = false) { - if (!ctx.snapshotPath) { + // 若 snapshot 路径尚未确认,但 modelscope 目录已创建,尝试动态解析 + if ((!ctx.snapshotPath || !fs.existsSync(ctx.snapshotPath)) && ctx.source === 'modelscope') { + const resolvedMsPath = + getModelScopeRepoPath(this.cacheDir, ctx.repoId) || + getModelScopeRepoPath(this.msCache, ctx.repoId) || + getModelScopeRepoPath(this.systemMsCache, ctx.repoId) || + getModelScopeRepoPath(this.systemHfCache, ctx.repoId); + if (resolvedMsPath) { + ctx.snapshotPath = resolvedMsPath; + } + } + + if (!ctx.snapshotPath && !ctx.downloadsDir) { return; } - const downloadedBytes = directorySize(ctx.snapshotPath); + + const snapshotBytes = ctx.snapshotPath ? directorySize(ctx.snapshotPath) : 0; + // HF 下载时,临时文件位于 cacheDir/downloads/,用于显示部分下载进度 + const tempBytes = this.computeDownloadTempBytes(ctx); + const downloadedBytes = Math.max(snapshotBytes, tempBytes); const totalBytes = ctx.totalBytes || downloadedBytes; const now = Date.now(); const elapsedMs = now - (ctx.lastTimestamp || now); @@ -510,6 +746,28 @@ export default class ASRModelManager extends EventEmitter { } } + computeDownloadTempBytes(ctx) { + if (!ctx.downloadsDir) { + return 0; + } + let total = 0; + const baseline = ctx.downloadsBaseDirs || new Set(); + let entries = []; + try { + entries = safeReaddir(ctx.downloadsDir).filter((entry) => entry.isDirectory()); + } catch { + entries = []; + } + for (const entry of entries) { + if (baseline.has(entry.name)) { + continue; + } + const dirPath = path.join(ctx.downloadsDir, entry.name); + total += directorySize(dirPath); + } + return total; + } + cancelDownload(modelId) { const ctx = this.activeDownloads.get(modelId); if (!ctx) { @@ -556,4 +814,3 @@ export default class ASRModelManager extends EventEmitter { }); } } - diff --git a/desktop/src/asr/recognition-cache.js b/desktop/src/asr/recognition-cache.js new file mode 100644 index 0000000..7dedeac --- /dev/null +++ b/desktop/src/asr/recognition-cache.js @@ -0,0 +1,49 @@ +class RecognitionCache { + constructor({ duplicateThreshold = 3000 } = {}) { + this.duplicateThreshold = duplicateThreshold; + this.cache = new Map(); // sourceId -> [{ text, timestamp }] + } + + normalizeText(text) { + if (!text) return ''; + let normalized = typeof text === 'string' ? text : String(text); + normalized = normalized.replace(/([\u4E00-\u9FFF])\s+(?=[\u4E00-\u9FFF])/g, '$1'); + normalized = normalized.replace(/\s+([,。!?、,.!?])/g, '$1'); + normalized = normalized.replace(/([,。!?、,.!?])\s+/g, '$1'); + normalized = normalized.replace(/\s{2,}/g, ' '); + return normalized.trim(); + } + + normalizeForComparison(text) { + const normalized = this.normalizeText(text || '').toLowerCase(); + return normalized.replace(/[,。!?、,.!?]/g, ''); + } + + isDuplicate(sourceId, text, timestamp) { + const cache = this.cache.get(sourceId) || []; + const trimmedText = this.normalizeForComparison(text); + const recentThreshold = timestamp - this.duplicateThreshold; + for (const item of cache) { + if (item.timestamp >= recentThreshold && item.text === trimmedText) { + return true; + } + } + return false; + } + + add(sourceId, text, timestamp) { + if (!sourceId) return; + if (!this.cache.has(sourceId)) { + this.cache.set(sourceId, []); + } + const normalized = this.normalizeForComparison(text); + const cache = this.cache.get(sourceId); + cache.push({ text: normalized, timestamp }); + const cutoffTime = timestamp - 10000; + const filtered = cache.filter((item) => item.timestamp > cutoffTime); + this.cache.set(sourceId, filtered); + } +} + +export default RecognitionCache; + diff --git a/desktop/src/asr/sentence-handlers.js b/desktop/src/asr/sentence-handlers.js new file mode 100644 index 0000000..28771ac --- /dev/null +++ b/desktop/src/asr/sentence-handlers.js @@ -0,0 +1,241 @@ +import * as logger from '../utils/logger.js'; + +export function createSentenceHandlers(manager) { + // 仅用于 cloud 模式:云端可能返回“增量片段”而非“累积全文”,直接覆盖会导致前文丢失 + const mergeIncrementalText = (prevText, nextText) => { + const prev = (prevText || '').trim(); + const next = (nextText || '').trim(); + if (!prev) return next; + if (!next) return prev; + if (next.includes(prev)) return next; // 累积全文 + if (prev.includes(next)) return prev; // 回退/截断,保留更长的 + + const maxOverlap = Math.min(prev.length, next.length, 80); + for (let k = maxOverlap; k >= 1; k--) { + if (prev.slice(-k) === next.slice(0, k)) { + return prev + next.slice(k); + } + } + + // 无明显重叠:根据字符类型决定是否插入空格(主要处理英文/数字) + const last = prev[prev.length - 1]; + const first = next[0]; + const needSpace = /[A-Za-z0-9]$/.test(last) && /^[A-Za-z0-9]/.test(first); + return prev + (needSpace ? ' ' : '') + next; + }; + + const handleSentenceComplete = async (result) => { + try { + const { + sessionId, + text, + timestamp, + trigger, + audioDuration, + isSegmentEnd, + sentenceIndex, // 新增:当前句子在序列中的索引 + totalSentences // 新增:总句子数量 + } = result; + + const sentenceIdx = sentenceIndex ?? result.sentence_index ?? 0; + const totalSents = totalSentences ?? result.total_sentences ?? 1; + const isMultiSentence = totalSents > 1; + + if (isMultiSentence && sentenceIdx > 0) { + logger.log(`[Sentence Complete] Multi-sentence mode: sentence ${sentenceIdx + 1}/${totalSents}`); + manager.commitCurrentSegment(sessionId); + } + + if (isSegmentEnd) { + logger.log(`[Sentence Complete] Segment end signal received for ${sessionId}`); + if (!text) { + manager.commitCurrentSegment(sessionId); + return null; + } + } + + if (!text || !text.trim()) { + logger.log('[Sentence Complete] Empty text, skipping.'); + if (isSegmentEnd) { + manager.commitCurrentSegment(sessionId); + } + return null; + } + + const normalizedText = manager.normalizeText(text); + if (!normalizedText) { + logger.log('[Sentence Complete] Normalized text empty, skipping.'); + return null; + } + + const currentSegment = manager.currentSegments.get(sessionId); + const effectiveTimestamp = timestamp || Date.now(); + + if (currentSegment && currentSegment.messageId) { + const isCloudModel = String(manager.modelName || '').includes('cloud'); + let effectiveText = normalizedText; + + if (isCloudModel) { + // 云端模式:智能拼接新段落(每段独立返回) + // 检查是否是重复内容(新文本包含了旧文本) + if (normalizedText.includes(currentSegment.lastText)) { + // 云端返回的是累积文本,直接使用 + effectiveText = normalizedText; + } else if (currentSegment.lastText.includes(normalizedText)) { + // 新文本是旧文本的子集,保留旧的 + logger.log(`[Sentence Complete] New text is subset of old, skipping update`); + return null; + } else { + // 完全不同的文本,拼接(用空格分隔) + effectiveText = currentSegment.lastText + ' ' + normalizedText; + } + } else { + // 本地模式(FunASR):直接替换 + // FunASR 的 Pass 2 每次都是对整段音频重新识别,结果是完整的,直接替换即可 + effectiveText = normalizedText; + } + + if (currentSegment.lastText === effectiveText) { + logger.log(`[Sentence Complete] Text unchanged, skipping update: "${normalizedText.substring(0, 30)}..."`); + return null; + } + + logger.log(`[Sentence Complete] Updating existing message ${currentSegment.messageId}: "${effectiveText.substring(0, 50)}..." (trigger: ${trigger})`); + + const updatedRecord = manager.db.updateSpeechRecord(currentSegment.recordId, { + recognized_text: effectiveText, + end_time: effectiveTimestamp, + audio_duration: audioDuration || (effectiveTimestamp - (currentSegment.startTime || effectiveTimestamp)) / 1000 + }); + + if (!updatedRecord) { + logger.error(`[Sentence Complete] Failed to update speech record: ${currentSegment.recordId}`); + return null; + } + + const updatedMessage = manager.db.updateMessage(currentSegment.messageId, { + content: effectiveText + }); + + if (!updatedMessage) { + logger.error(`[Sentence Complete] Failed to update message: ${currentSegment.messageId}`); + return null; + } + + currentSegment.lastText = effectiveText; + manager.currentSegments.set(sessionId, currentSegment); + + if (manager.eventEmitter) { + updatedMessage.source_id = sessionId; + manager.eventEmitter('asr-sentence-update', updatedMessage); + } + + manager.enqueuePunctuationUpdate({ + recordId: currentSegment.recordId, + messageId: currentSegment.messageId, + text: effectiveText, + sourceId: sessionId + }); + + if (isSegmentEnd) { + logger.log(`[Sentence Complete] Committing segment after update: ${sessionId}`); + manager.commitCurrentSegment(sessionId); + } + + return updatedMessage; + } + + logger.log(`[Sentence Complete] Creating new message: "${normalizedText.substring(0, 50)}..." (trigger: ${trigger}, session: ${sessionId})`); + + const record = await manager.saveRecognitionRecord(sessionId, { + text: normalizedText, + confidence: trigger === 'punctuation' ? 0.98 : 0.95, + startTime: effectiveTimestamp - (audioDuration || manager.SILENCE_TIMEOUT), + endTime: effectiveTimestamp, + audioDuration: audioDuration || manager.SILENCE_TIMEOUT / 1000, + isPartial: false, + audioData: null + }); + + if (!record) { + logger.error(`[Sentence Complete] Failed to save record for: "${normalizedText}"`); + return null; + } + + manager.addToRecognitionCache(sessionId, normalizedText, effectiveTimestamp); + + const message = await manager.convertRecordToMessage(record.id, manager.currentConversationId); + message.source_id = sessionId; + logger.log(`[Sentence Complete] Message created: ${message.id}`); + + manager.currentSegments.set(sessionId, { + messageId: message.id, + recordId: record.id, + lastText: normalizedText, + startTime: effectiveTimestamp - (audioDuration || manager.SILENCE_TIMEOUT) + }); + + if (manager.eventEmitter) { + logger.log(`[Sentence Complete] Sending event to renderer: ${message.id}`); + manager.clearStreamingSegment(sessionId); + manager.eventEmitter('asr-sentence-complete', message); + } else { + logger.warn('[Sentence Complete] No event emitter set, UI will not update in real-time'); + } + + manager.enqueuePunctuationUpdate({ + recordId: record.id, + messageId: message.id, + text: normalizedText, + sourceId: sessionId + }); + + const pendingTimer = manager.silenceTimers.get(sessionId); + if (pendingTimer) { + clearTimeout(pendingTimer); + manager.silenceTimers.delete(sessionId); + } + + if (isSegmentEnd) { + logger.log(`[Sentence Complete] Committing segment after creating message: ${sessionId}`); + manager.commitCurrentSegment(sessionId); + } + + return message; + } catch (error) { + logger.error('[Sentence Complete] Error:', error); + return null; + } + }; + + const handlePartialResult = (result) => { + try { + const { sessionId, partialText, fullText, timestamp } = result; + + if (!partialText && !fullText) { + return; + } + + const normalizedPartial = manager.normalizeText(fullText || partialText || ''); + if (!normalizedPartial) { + return; + } + + const existingTimer = manager.silenceTimers.get(sessionId); + if (existingTimer) { + clearTimeout(existingTimer); + } + manager.isSpeaking = true; + + const timer = setTimeout(() => manager.triggerSilenceCommit(sessionId), manager.SILENCE_TIMEOUT); + manager.silenceTimers.set(sessionId, timer); + + const effectiveTimestamp = timestamp || Date.now(); + manager.emitStreamingUpdate(sessionId, normalizedPartial, effectiveTimestamp); + } catch (error) { + logger.error('[Partial Result] Error:', error); + } + }; + + return { handleSentenceComplete, handlePartialResult }; +} diff --git a/desktop/src/asr/whisper-service-factory.js b/desktop/src/asr/whisper-service-factory.js index c8ba9d3..946e3c5 100644 --- a/desktop/src/asr/whisper-service-factory.js +++ b/desktop/src/asr/whisper-service-factory.js @@ -1,172 +1,7 @@ import ASRService from './asr-service.js'; -import LocalWhisperService from './local-asr-service.js'; -import FunASRService from './funasr-asr-service.js'; -import { ASR_MODEL_PRESETS, getAsrModelPreset } from '../shared/asr-models.js'; -import * as logger from '../utils/logger.js'; - -class FallbackAwareASRService { - constructor() { - this.primary = new ASRService(); - this.fallback = new LocalWhisperService(); - this.funasr = new FunASRService(); - this.currentService = null; - this.usingBackend = null; // 'whisper' or 'funasr' - - // 保存回调引用,以便切换服务时重新绑定 - this._onSentenceComplete = null; - this._onPartialResult = null; - this._onServerCrash = null; - - // 保存初始化参数 - this._initModelName = 'medium'; - this._initOptions = {}; - } - - async initialize(modelName, options) { - this._initModelName = modelName; - this._initOptions = options; - - // 检测模型类型(faster-whisper 或 funasr) - const preset = getAsrModelPreset(modelName); - const engine = preset?.engine || 'faster-whisper'; - - if (engine === 'funasr') { - logger.log(`[ASRFactory] Initializing FunASR service for model: ${modelName}`); - await this.funasr.initialize(modelName, options); - this.currentService = this.funasr; - this.usingBackend = 'funasr'; - } else { - logger.log(`[ASRFactory] Initializing Faster-Whisper service for model: ${modelName}`); - await this.switchToFasterWhisper(); - } - } - - async switchToFasterWhisper() { - if (this.usingBackend === 'whisper') return; - - try { - await this.fallback.initialize(this._initModelName, this._initOptions); - this.currentService = this.fallback; - this.usingBackend = 'whisper'; - - // 重新绑定回调 - if (this._onSentenceComplete) this.currentService.setSentenceCompleteCallback(this._onSentenceComplete); - if (this._onPartialResult) this.currentService.setPartialResultCallback(this._onPartialResult); - - // 绑定 fallback 的崩溃回调 - if (this._onServerCrash) { - this.currentService.setServerCrashCallback(this._onServerCrash); - } - - logger.log('[ASRFactory] Using Faster-Whisper Service'); - } catch (error) { - logger.error('[ASRFactory] Failed to initialize Faster-Whisper:', error); - throw error; - } - } - - // 兼容性方法,仍支持 switchToFallback 名称 - async switchToFallback() { - return this.switchToFasterWhisper(); - } - - setServerCrashCallback(callback) { - this._onServerCrash = callback; - - // 设置当前使用的服务的崩溃回调 - if (this.currentService && typeof this.currentService.setServerCrashCallback === 'function') { - this.currentService.setServerCrashCallback(callback); - } - } - - // --- 代理方法 --- - - setSentenceCompleteCallback(callback) { - this._onSentenceComplete = callback; - if (this.currentService) { - this.currentService.setSentenceCompleteCallback(callback); - } - } - - setPartialResultCallback(callback) { - this._onPartialResult = callback; - if (this.currentService) { - this.currentService.setPartialResultCallback(callback); - } - } - - async addAudioChunk(...args) { - if (!this.currentService) { - throw new Error('ASR service not initialized'); - } - return this.currentService.addAudioChunk(...args); - } - - async start(...args) { - if (!this.currentService) { - return; - } - if (this.currentService.start) { - return this.currentService.start(...args); - } - } - - async stop() { - if (this.currentService) { - await this.currentService.stop(); - } - } - - async destroy() { - if (this.currentService) { - await this.currentService.destroy(); - } - } - - async forceCommitSentence(...args) { - if (!this.currentService) { - return; - } - if (this.currentService.forceCommitSentence) { - return this.currentService.forceCommitSentence(...args); - } - } - - async commitSentence(...args) { - if (!this.currentService) { - return null; - } - if (this.currentService.commitSentence) { - return this.currentService.commitSentence(...args); - } - return null; - } - - async saveAudioFile(...args) { - if (!this.currentService) { - return null; - } - return this.currentService.saveAudioFile(...args); - } - - clearContext(...args) { - if (this.currentService && this.currentService.clearContext) { - this.currentService.clearContext(...args); - } - } - - // 代理属性访问(如果需要访问 isInitialized 等) - get isInitialized() { - return this.currentService ? this.currentService.isInitialized : false; - } - - get retainAudioFiles() { - return this.currentService ? this.currentService.retainAudioFiles : false; - } -} export async function createWhisperService() { - return new FallbackAwareASRService(); + return new ASRService(); } export default createWhisperService; \ No newline at end of file diff --git a/desktop/src/core/app-settings.js b/desktop/src/core/app-settings.js new file mode 100644 index 0000000..992c8e6 --- /dev/null +++ b/desktop/src/core/app-settings.js @@ -0,0 +1,25 @@ +import Store from 'electron-store'; + +const store = new Store({ + name: 'livegalgame-settings', +}); + +export function getAsrCacheBaseSetting() { + const value = store.get('asr.cacheBase'); + return typeof value === 'string' && value.trim() ? value : null; +} + +export function setAsrCacheBaseSetting(cacheBase) { + if (cacheBase === null || cacheBase === undefined || String(cacheBase).trim() === '') { + store.delete('asr.cacheBase'); + return null; + } + const normalized = String(cacheBase).trim(); + store.set('asr.cacheBase', normalized); + return normalized; +} + +export function clearAsrCacheBaseSetting() { + store.delete('asr.cacheBase'); +} + diff --git a/desktop/src/core/modules/ipc-handlers.js b/desktop/src/core/modules/ipc-handlers.js index a1ebec8..5b1f050 100644 --- a/desktop/src/core/modules/ipc-handlers.js +++ b/desktop/src/core/modules/ipc-handlers.js @@ -1,9 +1,20 @@ -import electron from 'electron'; +import { ipcMain, systemPreferences } from 'electron'; import DatabaseManager from '../../db/database.js'; import ASRManager from '../../asr/asr-manager.js'; import ASRModelManager from '../../asr/model-manager.js'; - -const { ipcMain, systemPreferences } = electron; +import LLMSuggestionService from './llm-suggestion-service.js'; +import ReviewService from './review-service.js'; +import MemoryService from './memory-service.js'; +import { registerWindowHandlers } from './ipc-handlers/window-handlers.js'; +import { registerDatabaseHandlers } from './ipc-handlers/database-handlers.js'; +import { registerLLMHandlers } from './ipc-handlers/llm-handlers.js'; +import { registerSuggestionHandlers } from './ipc-handlers/suggestion-handlers.js'; +import { registerReviewHandlers } from './ipc-handlers/review-handlers.js'; +import { registerMemoryHandlers } from './ipc-handlers/memory-handlers.js'; +import { registerASRModelHandlers } from './ipc-handlers/asr-model-handlers.js'; +import { registerASRAudioHandlers } from './ipc-handlers/asr-audio-handlers.js'; +import { registerMediaHandlers } from './ipc-handlers/media-handlers.js'; +import { registerAppConfigHandlers } from './ipc-handlers/app-config-handlers.js'; /** * IPC 处理器管理器 - 负责注册所有 IPC 通信处理器 @@ -14,6 +25,9 @@ export class IPCManager { this.db = null; this.modelManager = null; this.asrManager = null; + this.llmSuggestionService = null; + this.reviewService = null; + this.memoryService = null; this.asrModelPreloading = false; this.asrModelPreloaded = false; this.asrServerCrashCallback = null; @@ -51,709 +65,165 @@ export class IPCManager { } } + /** + * 初始化 LLM 建议服务 + */ + initLLMSuggestionService() { + if (!this.llmSuggestionService) { + this.llmSuggestionService = new LLMSuggestionService(() => this.db); + } + } + + /** + * 初始化 Memory Service(结构化画像/事件侧车) + */ + initMemoryService() { + if (!this.memoryService) { + this.memoryService = new MemoryService(); + } + } + /** + * 初始化 Review Service + */ + initReviewService() { + if (!this.reviewService) { + this.reviewService = new ReviewService(() => this.db); + } + } + /** * 注册所有 IPC 处理器 */ registerHandlers() { - console.log('Registering IPC handlers...'); + console.log('[IPCHandlers] Registering IPC handlers...'); this.initDatabase(); this.initModelManager(); + this.initLLMSuggestionService(); + this.initReviewService(); + this.initMemoryService(); this.setupWindowHandlers(); + this.setupAppConfigHandlers(); this.setupDatabaseHandlers(); this.setupLLMHandlers(); + this.setupSuggestionHandlers(); + this.setupReviewHandlers(); + this.setupMemoryHandlers(); this.setupASRModelHandlers(); this.setupASRAudioHandlers(); this.setupMediaHandlers(); - console.log('All IPC handlers registered'); + console.log('[IPCHandlers] All IPC handlers registered successfully'); } /** - * 设置窗口相关 IPC 处理器 + * 设置复盘相关 IPC 处理器 */ - setupWindowHandlers() { - // 显示HUD - ipcMain.on('show-hud', async () => { - if (!this.windowManager.getHUDWindow()) { - await this.windowManager.createHUDWindow( - () => this.checkASRReady(), - () => {} - ); - } else { - this.windowManager.showHUD(); - } - console.log('HUD显示'); - }); - - // 隐藏HUD - ipcMain.on('hide-hud', () => { - this.windowManager.hideHUD(); - console.log('HUD隐藏'); + setupReviewHandlers() { + registerReviewHandlers({ + reviewService: this.reviewService }); - - // 关闭HUD - ipcMain.on('close-hud', () => { - this.windowManager.closeHUD(); - console.log('HUD关闭'); - }); - - // HUD拖拽相关 - ipcMain.on('start-hud-drag', (event, pos) => { - this.windowManager.startHUDrag(pos); - }); - - ipcMain.on('update-hud-drag', (event, pos) => { - this.windowManager.updateHUDrag(pos); - }); - - ipcMain.on('end-hud-drag', () => { - this.windowManager.endHUDrag(); - }); - - // 主窗口控制 - ipcMain.on('minimize-window', () => { - this.windowManager.minimizeMainWindow(); - }); - - ipcMain.on('close-window', () => { - this.windowManager.closeMainWindow(); - }); - - // 主窗口拖拽相关 - ipcMain.on('start-drag', (event, pos) => { - this.windowManager.startMainDrag(pos); - }); - - ipcMain.on('update-drag', (event, pos) => { - this.windowManager.updateMainDrag(pos); - }); - - ipcMain.on('end-drag', () => { - this.windowManager.endMainDrag(); - }); - - console.log('Window IPC handlers registered'); } /** - * 设置数据库相关 IPC 处理器 + * 设置窗口相关 IPC 处理器 */ - setupDatabaseHandlers() { - // 获取所有角色 - ipcMain.handle('db-get-all-characters', () => { - try { - return this.db.getAllCharacters(); - } catch (error) { - console.error('Error getting all characters:', error); - return []; - } - }); - - // 获取单个角色 - ipcMain.handle('db-get-character-by-id', (event, id) => { - try { - return this.db.getCharacterById(id); - } catch (error) { - console.error('Error getting character:', error); - return null; - } - }); - - // 创建角色 - ipcMain.handle('db-create-character', (event, characterData) => { - try { - return this.db.createCharacter(characterData); - } catch (error) { - console.error('Error creating character:', error); - return null; - } - }); - - // 创建对话 - ipcMain.handle('db-create-conversation', (event, conversationData) => { - try { - return this.db.createConversation(conversationData); - } catch (error) { - console.error('Error creating conversation:', error); - return null; - } - }); - - // 获取角色的对话 - ipcMain.handle('db-get-conversations-by-character', (event, characterId) => { - try { - return this.db.getConversationsByCharacter(characterId); - } catch (error) { - console.error('Error getting conversations:', error); - return []; - } - }); - - // 创建消息 - ipcMain.handle('db-create-message', (event, messageData) => { - try { - return this.db.createMessage(messageData); - } catch (error) { - console.error('Error creating message:', error); - return null; - } - }); - - // 获取对话的消息 - ipcMain.handle('db-get-messages-by-conversation', (event, conversationId) => { - try { - return this.db.getMessagesByConversation(conversationId); - } catch (error) { - console.error('Error getting messages:', error); - return []; - } - }); - - // 更新对话 - ipcMain.handle('db-update-conversation', (event, conversationId, updates) => { - try { - return this.db.updateConversation(conversationId, updates); - } catch (error) { - console.error('Error updating conversation:', error); - return null; - } + setupWindowHandlers() { + registerWindowHandlers({ + windowManager: this.windowManager, + checkASRReady: () => this.checkASRReady() }); + } - // 获取统计数据 - ipcMain.handle('db-get-statistics', () => { - try { - return this.db.getStatistics(); - } catch (error) { + /** + * 设置应用级配置相关 IPC 处理器(如模型缓存目录) + */ + setupAppConfigHandlers() { + registerAppConfigHandlers({ + onAsrCacheChanged: async () => { + // 1) 让 ModelManager 重新读取环境变量(下载落盘位置) + this.modelManager = new ASRModelManager(); + // 2) 重载 ASR 后端,保证其读取到新的缓存目录 try { - console.error('Error getting statistics:', error); - } catch (logError) { - // 如果 console.error 也失败,忽略 + await this.reloadASRModel(); + } catch (error) { + console.warn('[ASR] Reload after cache change failed:', error); } - return { - characterCount: 0, - conversationCount: 0, - messageCount: 0, - avgAffinity: 0 - }; - } - }); - - // 获取角色页面统计数据 - ipcMain.handle('db-get-character-page-statistics', () => { - try { - return this.db.getCharacterPageStatistics(); - } catch (error) { - console.error('Error getting character page statistics:', error); - return { - characterCount: 0, - activeConversationCount: 0, - avgAffinity: 0 - }; - } - }); - - // 获取最近对话 - ipcMain.handle('db-get-recent-conversations', (event, limit) => { - try { - return this.db.getRecentConversations(limit || 10); - } catch (error) { - console.error('Error getting recent conversations:', error); - return []; - } - }); - - // 获取所有对话 - ipcMain.handle('db-get-all-conversations', () => { - try { - return this.db.getAllConversations(); - } catch (error) { - console.error('Error getting all conversations:', error); - return []; - } - }); - - // 更新消息 - ipcMain.handle('db-update-message', (event, messageId, updates) => { - try { - return this.db.updateMessage(messageId, updates); - } catch (error) { - console.error('Error updating message:', error); - return null; - } - }); - - // 获取对话的AI分析数据 - ipcMain.handle('db-get-conversation-ai-data', (event, conversationId) => { - try { - return this.db.getConversationAIData(conversationId); - } catch (error) { - console.error('Error getting conversation AI data:', error); - return { - analysisReport: null, - keyMoments: [], - personalityAnalysis: null, - actionSuggestions: [] - }; - } - }); - - // 获取角色详情 - ipcMain.handle('db-get-character-details', (event, characterId) => { - try { - return this.db.getCharacterDetails(characterId); - } catch (error) { - console.error('Error getting character details:', error); - return null; - } - }); - - // 更新角色详情的自定义字段 - ipcMain.handle('db-update-character-details-custom-fields', (event, characterId, customFields) => { - try { - return this.db.updateCharacterDetailsCustomFields(characterId, customFields); - } catch (error) { - console.error('Error updating character details custom fields:', error); - return false; - } - }); - - // 重新生成角色详情(从会话中) - ipcMain.handle('db-regenerate-character-details', (event, characterId) => { - try { - return this.db.generateCharacterDetailsFromConversations(characterId); - } catch (error) { - console.error('Error regenerating character details:', error); - return null; - } - }); - - // 删除对话 - ipcMain.handle('db-delete-conversation', (event, conversationId) => { - try { - return this.db.deleteConversation(conversationId); - } catch (error) { - console.error('Error deleting conversation:', error); - return false; - } - }); - - // 删除角色 - ipcMain.handle('db-delete-character', (event, characterId) => { - try { - return this.db.deleteCharacter(characterId); - } catch (error) { - console.error('Error deleting character:', error); - return false; } }); + } - console.log('Database IPC handlers registered'); + /** + * 设置数据库相关 IPC 处理器 + */ + setupDatabaseHandlers() { + registerDatabaseHandlers({ db: this.db }); } /** * 设置 LLM 配置相关 IPC 处理器 */ setupLLMHandlers() { - // 保存LLM配置 - ipcMain.handle('llm-save-config', (event, configData) => { - try { - return this.db.saveLLMConfig(configData); - } catch (error) { - console.error('Error saving LLM config:', error); - throw error; - } - }); - - // 获取所有LLM配置 - ipcMain.handle('llm-get-all-configs', () => { - try { - return this.db.getAllLLMConfigs(); - } catch (error) { - console.error('Error getting LLM configs:', error); - return []; - } - }); - - // 获取默认LLM配置 - ipcMain.handle('llm-get-default-config', () => { - try { - return this.db.getDefaultLLMConfig(); - } catch (error) { - console.error('Error getting default LLM config:', error); - return null; - } - }); - - // 获取指定ID的LLM配置 - ipcMain.handle('llm-get-config-by-id', (event, id) => { - try { - return this.db.getLLMConfigById(id); - } catch (error) { - console.error('Error getting LLM config:', error); - return null; - } - }); - - // 删除LLM配置 - ipcMain.handle('llm-delete-config', (event, id) => { - try { - return this.db.deleteLLMConfig(id); - } catch (error) { - console.error('Error deleting LLM config:', error); - throw error; - } - }); - - // 测试LLM连接 - ipcMain.handle('llm-test-connection', async (event, configData) => { - try { - return await this.db.testLLMConnection(configData); - } catch (error) { - console.error('Error testing LLM connection:', error); - return { success: false, message: error.message || '连接测试失败' }; - } - }); + registerLLMHandlers({ db: this.db }); + } - // 设置默认LLM配置 - ipcMain.handle('llm-set-default-config', (event, id) => { - try { - return this.db.setDefaultLLMConfig(id); - } catch (error) { - console.error('Error setting default LLM config:', error); - throw error; - } + /** + * 设置 LLM 建议相关 IPC 处理器 + */ + setupSuggestionHandlers() { + registerSuggestionHandlers({ + db: this.db, + llmSuggestionService: this.llmSuggestionService, + ensureSuggestionService: () => this.initLLMSuggestionService() }); + } - console.log('LLM IPC handlers registered'); + /** + * 设置 Memory Service 相关 IPC 处理器 + */ + setupMemoryHandlers() { + this.initMemoryService(); + registerMemoryHandlers({ memoryService: this.memoryService }); } /** * 设置 ASR 模型管理相关 IPC 处理器 */ setupASRModelHandlers() { - ipcMain.handle('asr-get-model-presets', () => { - try { - return this.modelManager.getModelPresets(); - } catch (error) { - console.error('Error getting ASR model presets:', error); - return []; - } - }); - - ipcMain.handle('asr-get-model-status', (event, modelId) => { - try { - return this.modelManager.getModelStatus(modelId); - } catch (error) { - console.error('Error getting ASR model status:', error); - return null; - } - }); - - ipcMain.handle('asr-get-all-model-statuses', () => { - try { - return this.modelManager.getAllModelStatuses(); - } catch (error) { - console.error('Error getting ASR model statuses:', error); - return []; - } - }); - - ipcMain.handle('asr-download-model', (event, modelId, source) => { - try { - return this.modelManager.startDownload(modelId, source); - } catch (error) { - console.error('Error starting ASR model download:', error); - throw error; - } - }); - - ipcMain.handle('asr-cancel-model-download', (event, modelId) => { - try { - return this.modelManager.cancelDownload(modelId); - } catch (error) { - console.error('Error cancelling ASR model download:', error); - throw error; + registerASRModelHandlers({ + getModelManager: () => { + if (!this.modelManager) { + this.modelManager = new ASRModelManager(); + } + return this.modelManager; } }); - - console.log('ASR Model IPC handlers registered'); } /** * 设置 ASR 音频处理相关 IPC 处理器 */ setupASRAudioHandlers() { - // 初始化 ASR 管理器 - ipcMain.handle('asr-initialize', async (event, conversationId) => { - try { - this.asrManager = this.getOrCreateASRManager(); - if (this.asrModelPreloaded && this.asrManager.isInitialized) { - this.asrManager.currentConversationId = conversationId; - if (!this.asrManager.isRunning) { - await this.asrManager.start(conversationId); - } - return true; - } - return await this.asrManager.initialize(conversationId); - } catch (error) { - console.error('Error initializing ASR:', error); - throw error; - } - }); - - // 处理音频数据 - ipcMain.on('asr-audio-data', async (event, data) => { - try { - if (!this.asrManager) { - console.warn('[ASR] ASRManager not initialized, audio data ignored'); - return; - } - - if (!this.asrManager.isInitialized) { - console.warn('[ASR] ASRManager not initialized (isInitialized=false), audio data ignored'); - return; - } - - if (!this.asrManager.isRunning) { - console.warn('[ASR] ASRManager not running, audio data ignored'); - return; - } - - const result = await this.asrManager.processAudioData(data); - - // 如果有识别结果,发送给所有窗口 - if (result) { - this.emitASREvent('asr-sentence-complete', result); - } - } catch (error) { - console.error('Error processing audio data:', error); - - // 发送错误消息 - this.emitASREvent('asr-error', { - sourceId: data.sourceId, - error: error.message - }); - } - }); - - // 检查 ASR 模型是否就绪 - ipcMain.handle('asr-check-ready', async () => { - return await this.checkASRReady(); - }); - - // 开始 ASR - ipcMain.handle('asr-start', async (event, conversationId) => { - try { - console.log(`[ASR] Starting ASR with conversationId: ${conversationId}`); - this.asrManager = this.getOrCreateASRManager(); - await this.asrManager.start(conversationId); - console.log('[ASR] ASR started successfully'); - return { success: true }; - } catch (error) { - console.error('[ASR] Error starting ASR:', error); - throw error; - } - }); - - // 停止 ASR - ipcMain.handle('asr-stop', async () => { - try { - if (this.asrManager) { - await this.asrManager.stop(); + registerASRAudioHandlers({ + getOrCreateASRManager: () => this.getOrCreateASRManager(), + emitASREvent: (eventName, payload) => { + if (this.emitASREvent) { + this.emitASREvent(eventName, payload); } - return { success: true }; - } catch (error) { - console.error('Error stopping ASR:', error); - throw error; - } - }); - - // 获取 ASR 配置 - ipcMain.handle('asr-get-configs', () => { - try { - return this.db.getASRConfigs(); - } catch (error) { - console.error('Error getting ASR configs:', error); - return []; - } + }, + checkASRReady: () => this.checkASRReady(), + reloadASRModel: () => this.reloadASRModel(), + db: this.db, + getASRPreloadState: () => this.getASRPreloadState(), + setASRPreloadState: (preloading, preloaded) => this.setASRPreloadState(preloading, preloaded) }); - - // 创建 ASR 配置 - ipcMain.handle('asr-create-config', (event, configData) => { - try { - return this.db.createASRConfig(configData); - } catch (error) { - console.error('Error creating ASR config:', error); - throw error; - } - }); - - // 更新 ASR 配置 - ipcMain.handle('asr-update-config', (event, id, updates) => { - try { - return this.db.updateASRConfig(id, updates); - } catch (error) { - console.error('Error updating ASR config:', error); - throw error; - } - }); - - // 重新加载 ASR 模型 - ipcMain.handle('asr-reload-model', async () => { - try { - await this.reloadASRModel(); - return { success: true }; - } catch (error) { - console.error('[ASR] Error reloading ASR model:', error); - throw error; - } - }); - - // 设置默认 ASR 配置 - ipcMain.handle('asr-set-default-config', (event, id) => { - try { - return this.db.setDefaultASRConfig(id); - } catch (error) { - console.error('Error setting default ASR config:', error); - throw error; - } - }); - - // 获取音频源配置 - ipcMain.handle('asr-get-audio-sources', () => { - try { - return this.db.getAudioSources(); - } catch (error) { - console.error('Error getting audio sources:', error); - return []; - } - }); - - // 创建音频源配置 - ipcMain.handle('asr-create-audio-source', (event, sourceData) => { - try { - return this.db.createAudioSource(sourceData); - } catch (error) { - console.error('Error creating audio source:', error); - throw error; - } - }); - - // 更新音频源配置 - ipcMain.handle('asr-update-audio-source', (event, id, updates) => { - try { - return this.db.updateAudioSource(id, updates); - } catch (error) { - console.error('Error updating audio source:', error); - throw error; - } - }); - - // 获取对话的语音识别记录 - ipcMain.handle('asr-get-speech-records', (event, conversationId) => { - try { - return this.db.getSpeechRecordsByConversation(conversationId); - } catch (error) { - console.error('Error getting speech records:', error); - return []; - } - }); - - // 将语音识别记录转换为消息 - ipcMain.handle('asr-convert-to-message', async (event, recordId, conversationId) => { - try { - this.asrManager = this.getOrCreateASRManager(); - return await this.asrManager.convertRecordToMessage(recordId, conversationId); - } catch (error) { - console.error('Error converting record to message:', error); - throw error; - } - }); - - // 清理过期的音频文件 - ipcMain.handle('asr-cleanup-audio-files', async (event, retentionDays) => { - try { - this.asrManager = this.getOrCreateASRManager(); - return this.asrManager.cleanupExpiredAudioFiles(retentionDays); - } catch (error) { - console.error('Error cleaning up audio files:', error); - throw error; - } - }); - - console.log('ASR Audio IPC handlers registered'); } /** * 设置媒体权限相关 IPC 处理器 */ setupMediaHandlers() { - // 媒体权限 API (macOS) - 直接使用已导入的模块 - - // 检查媒体访问权限状态 - ipcMain.handle('check-media-access-status', async (event, mediaType) => { - try { - if (process.platform === 'darwin') { - const status = systemPreferences.getMediaAccessStatus(mediaType); - console.log(`[Permission] ${mediaType} access status: ${status}`); - return { status, platform: 'darwin' }; - } - return { status: 'granted', platform: process.platform }; - } catch (error) { - console.error(`Error checking ${mediaType} access status:`, error); - return { status: 'unknown', error: error.message }; - } - }); - - // 请求媒体访问权限 (macOS) - ipcMain.handle('request-media-access', async (event, mediaType) => { - try { - if (process.platform === 'darwin') { - const currentStatus = systemPreferences.getMediaAccessStatus(mediaType); - console.log(`[Permission] Current ${mediaType} status: ${currentStatus}`); - - if (currentStatus === 'granted') { - return { granted: true, status: 'granted' }; - } - - if (currentStatus === 'denied') { - return { - granted: false, - status: 'denied', - message: '权限已被拒绝,请在系统偏好设置 > 安全性与隐私 > 隐私 中手动开启' - }; - } - - console.log(`[Permission] Requesting ${mediaType} access...`); - const granted = await systemPreferences.askForMediaAccess(mediaType); - console.log(`[Permission] ${mediaType} access ${granted ? 'granted' : 'denied'}`); - return { granted, status: granted ? 'granted' : 'denied' }; - } - - return { granted: true, status: 'granted', platform: process.platform }; - } catch (error) { - console.error(`Error requesting ${mediaType} access:`, error); - return { granted: false, error: error.message }; - } - }); - - // 检查屏幕录制权限 (macOS) - ipcMain.handle('check-screen-capture-access', async () => { - try { - if (process.platform === 'darwin') { - const status = systemPreferences.getMediaAccessStatus('screen'); - console.log(`[Permission] Screen capture access status: ${status}`); - return { status, platform: 'darwin' }; - } - return { status: 'granted', platform: process.platform }; - } catch (error) { - console.error('Error checking screen capture access:', error); - return { status: 'unknown', error: error.message }; - } - }); - - console.log('Media Permission IPC handlers registered'); + registerMediaHandlers(); } /** @@ -761,7 +231,8 @@ export class IPCManager { */ getOrCreateASRManager() { if (!this.asrManager) { - this.asrManager = new ASRManager(); + this.initDatabase(); + this.asrManager = new ASRManager(this.db); this.asrManager.setEventEmitter(this.emitASREvent); // 设置服务器崩溃回调 @@ -782,6 +253,16 @@ export class IPCManager { * 检查 ASR 模型是否就绪 */ async checkASRReady() { + const isDownloading = this.asrManager?.whisperService?.isDownloading === true; + + if (isDownloading) { + return { + ready: false, + message: '正在下载语音模型,首次下载可能较慢,请耐心等待...', + downloading: true + }; + } + if (this.asrModelPreloading) { return { ready: false, @@ -825,6 +306,7 @@ export class IPCManager { */ async reloadASRModel() { console.log('[ASR] 重新加载 ASR 模型'); + this.asrModelPreloading = true; if (this.asrManager) { try { await this.asrManager.stop(); @@ -839,8 +321,18 @@ export class IPCManager { this.asrManager = null; } - this.asrModelPreloaded = false; - this.asrModelPreloading = false; + // 重新创建并初始化,确保新后端立即拉起 + try { + const asrManager = this.getOrCreateASRManager(); + await asrManager.initialize(null); + this.asrModelPreloaded = true; + } catch (error) { + console.error('[ASR] 重新加载并初始化 ASR 模型失败:', error); + this.asrModelPreloaded = false; + throw error; + } finally { + this.asrModelPreloading = false; + } } /** @@ -873,4 +365,4 @@ export class IPCManager { } } } -} \ No newline at end of file +} diff --git a/desktop/src/core/modules/ipc-handlers/app-config-handlers.js b/desktop/src/core/modules/ipc-handlers/app-config-handlers.js new file mode 100644 index 0000000..4bf918c --- /dev/null +++ b/desktop/src/core/modules/ipc-handlers/app-config-handlers.js @@ -0,0 +1,114 @@ +import { ipcMain, app } from 'electron'; +import electron from 'electron'; +import path from 'path'; +import fs from 'fs'; +import { getAsrCacheBaseSetting, setAsrCacheBaseSetting, clearAsrCacheBaseSetting } from '../../app-settings.js'; +import { applyAsrCacheEnv, computeAsrCachePaths } from '../../../asr/asr-cache-env.js'; + +function safeHandle(channel, handler) { + try { + if (typeof ipcMain.removeHandler === 'function') { + ipcMain.removeHandler(channel); + } + } catch { + // ignore + } + ipcMain.handle(channel, handler); +} + +function normalizeDirInput(value) { + if (value === null || value === undefined) return null; + const str = String(value).trim(); + if (!str) return null; + return path.resolve(str); +} + +function isExistingDirectory(dirPath) { + try { + const stat = fs.statSync(dirPath, { throwIfNoEntry: false }); + return !!stat && stat.isDirectory(); + } catch { + return false; + } +} + +/** + * 注册应用级配置(如模型缓存目录)相关 IPC 处理器 + * @param {object} deps + * @param {Function} deps.onAsrCacheChanged - async () => void + */ +export function registerAppConfigHandlers({ onAsrCacheChanged }) { + safeHandle('app-get-model-cache-paths', () => { + const userDataDir = app.getPath('userData'); + const persistedBase = getAsrCacheBaseSetting(); + const envBase = process.env.ASR_CACHE_BASE || null; + const effectiveBase = envBase || persistedBase || null; + const computed = computeAsrCachePaths({ userDataDir, asrCacheBase: effectiveBase }); + + return { + ok: true, + persistedAsrCacheBase: persistedBase, + env: { + ASR_CACHE_BASE: process.env.ASR_CACHE_BASE || '', + HF_HOME: process.env.HF_HOME || '', + ASR_CACHE_DIR: process.env.ASR_CACHE_DIR || '', + MODELSCOPE_CACHE: process.env.MODELSCOPE_CACHE || '', + MODELSCOPE_CACHE_HOME: process.env.MODELSCOPE_CACHE_HOME || '', + }, + computed, + defaults: { + userDataAsrCacheBase: path.join(userDataDir, 'asr-cache'), + }, + }; + }); + + safeHandle('app-select-directory', async (_event, options = {}) => { + const parent = electron.BrowserWindow.getFocusedWindow() || null; + const result = await electron.dialog.showOpenDialog(parent, { + title: options?.title || '选择目录', + properties: ['openDirectory', 'createDirectory'], + }); + if (result.canceled) { + return { canceled: true, path: null }; + } + const selected = result.filePaths?.[0] ? path.resolve(result.filePaths[0]) : null; + return { canceled: false, path: selected }; + }); + + safeHandle('app-set-asr-cache-base', async (_event, newBaseRaw) => { + const userDataDir = app.getPath('userData'); + const newBase = normalizeDirInput(newBaseRaw); + + if (!newBase) { + clearAsrCacheBaseSetting(); + // 强制回落到默认 userData/asr-cache + const computed = applyAsrCacheEnv({ userDataDir, asrCacheBase: null, force: true }); + if (typeof onAsrCacheChanged === 'function') { + await onAsrCacheChanged(); + } + return { ok: true, cleared: true, computed }; + } + + // 目录不存在则尝试创建(跨平台) + try { + fs.mkdirSync(newBase, { recursive: true }); + } catch (error) { + return { ok: false, message: `无法创建目录: ${error?.message || String(error)}` }; + } + + if (!isExistingDirectory(newBase)) { + return { ok: false, message: '选择的路径不是可用的目录' }; + } + + setAsrCacheBaseSetting(newBase); + const computed = applyAsrCacheEnv({ userDataDir, asrCacheBase: newBase, force: true }); + + if (typeof onAsrCacheChanged === 'function') { + await onAsrCacheChanged(); + } + + return { ok: true, cleared: false, computed }; + }); + + console.log('App config IPC handlers registered'); +} diff --git a/desktop/src/core/modules/ipc-handlers/asr-audio-handlers.js b/desktop/src/core/modules/ipc-handlers/asr-audio-handlers.js new file mode 100644 index 0000000..8a52a96 --- /dev/null +++ b/desktop/src/core/modules/ipc-handlers/asr-audio-handlers.js @@ -0,0 +1,260 @@ +import { ipcMain } from 'electron'; + +/** + * 注册 ASR 音频处理相关 IPC 处理器 + * @param {object} deps + * @param {Function} deps.getOrCreateASRManager + * @param {Function} deps.emitASREvent + * @param {Function} deps.checkASRReady + * @param {Function} deps.reloadASRModel + * @param {object} deps.db + * @param {Function} deps.getASRPreloadState + * @param {Function} deps.setASRPreloadState + */ +export function registerASRAudioHandlers({ + getOrCreateASRManager, + emitASREvent, + checkASRReady, + reloadASRModel, + db, + getASRPreloadState, + setASRPreloadState +}) { + ipcMain.handle('asr-initialize', async (event, conversationId) => { + try { + const asrManager = getOrCreateASRManager(); + const { preloaded } = getASRPreloadState(); + if (preloaded && asrManager.isInitialized) { + asrManager.currentConversationId = conversationId; + if (!asrManager.isRunning) { + await asrManager.start(conversationId); + } + return true; + } + return await asrManager.initialize(conversationId); + } catch (error) { + console.error('Error initializing ASR:', error); + throw error; + } + }); + + ipcMain.on('asr-audio-data', async (event, data) => { + try { + const asrManager = getOrCreateASRManager(); + + if (!asrManager.isInitialized) { + console.warn('[ASR] ASRManager not initialized (isInitialized=false), audio data ignored'); + return; + } + + if (!asrManager.isRunning) { + console.warn('[ASR] ASRManager not running, audio data ignored'); + return; + } + + const result = await asrManager.processAudioData(data); + if (result) { + emitASREvent('asr-sentence-complete', result); + } + } catch (error) { + console.error('Error processing audio data:', error); + emitASREvent('asr-error', { + sourceId: data.sourceId, + error: error.message + }); + } + }); + + // 【仅云端】渲染进程 VAD 静音断句触发:提交当前分段,生成多条消息 + ipcMain.on('asr-silence-commit', async (event, payload) => { + try { + const asrManager = getOrCreateASRManager(); + if (!asrManager?.isInitialized || !asrManager?.isRunning) { + return; + } + const modelName = String(asrManager.modelName || ''); + // 二次保险:只对 cloud 模型生效,避免影响 FunASR + if (!modelName.includes('cloud')) { + return; + } + const sourceId = payload?.sourceId; + if (!sourceId) return; + await asrManager.triggerSilenceCommit(sourceId); + } catch (error) { + console.error('Error handling asr-silence-commit:', error); + } + }); + + ipcMain.handle('asr-check-ready', async () => { + return await checkASRReady(); + }); + + ipcMain.handle('asr-start', async (event, conversationId) => { + try { + console.log(`[ASR] Starting ASR with conversationId: ${conversationId}`); + const asrManager = getOrCreateASRManager(); + await asrManager.start(conversationId); + console.log('[ASR] ASR started successfully'); + return { success: true }; + } catch (error) { + console.error('[ASR] Error starting ASR:', error); + throw error; + } + }); + + ipcMain.handle('asr-stop', async () => { + try { + const asrManager = getOrCreateASRManager(); + await asrManager.stop(); + return { success: true }; + } catch (error) { + console.error('Error stopping ASR:', error); + throw error; + } + }); + + ipcMain.handle('asr-get-configs', () => { + try { + return db.getASRConfigs(); + } catch (error) { + console.error('Error getting ASR configs:', error); + return []; + } + }); + + ipcMain.handle('asr-create-config', (event, configData) => { + try { + return db.createASRConfig(configData); + } catch (error) { + console.error('Error creating ASR config:', error); + throw error; + } + }); + + ipcMain.handle('asr-update-config', (event, id, updates) => { + try { + return db.updateASRConfig(id, updates); + } catch (error) { + console.error('Error updating ASR config:', error); + throw error; + } + }); + + ipcMain.handle('asr-reload-model', async () => { + try { + await reloadASRModel(); + return { success: true }; + } catch (error) { + console.error('[ASR] Error reloading ASR model:', error); + throw error; + } + }); + + ipcMain.handle('asr-set-default-config', (event, id) => { + try { + return db.setDefaultASRConfig(id); + } catch (error) { + console.error('Error setting default ASR config:', error); + throw error; + } + }); + + ipcMain.handle('asr-get-audio-sources', () => { + try { + return db.getAudioSources(); + } catch (error) { + console.error('Error getting audio sources:', error); + return []; + } + }); + + ipcMain.handle('asr-create-audio-source', (event, sourceData) => { + try { + return db.createAudioSource(sourceData); + } catch (error) { + console.error('Error creating audio source:', error); + throw error; + } + }); + + ipcMain.handle('asr-update-audio-source', (event, id, updates) => { + try { + return db.updateAudioSource(id, updates); + } catch (error) { + console.error('Error updating audio source:', error); + throw error; + } + }); + + ipcMain.handle('asr-get-speech-records', (event, conversationId) => { + try { + return db.getSpeechRecordsByConversation(conversationId); + } catch (error) { + console.error('Error getting speech records:', error); + return []; + } + }); + + ipcMain.handle('asr-convert-to-message', async (event, recordId, conversationId) => { + try { + const asrManager = getOrCreateASRManager(); + return await asrManager.convertRecordToMessage(recordId, conversationId); + } catch (error) { + console.error('Error converting record to message:', error); + throw error; + } + }); + + ipcMain.handle('asr-cleanup-audio-files', async (event, retentionDays) => { + try { + const asrManager = getOrCreateASRManager(); + return asrManager.cleanupExpiredAudioFiles(retentionDays); + } catch (error) { + console.error('Error cleaning up audio files:', error); + throw error; + } + }); + + ipcMain.handle('asr-get-audio-data-url', async (event, filePath) => { + try { + if (!filePath) return null; + const fs = await import('fs/promises'); + const buffer = await fs.readFile(filePath); + const base64 = buffer.toString('base64'); + // Assume WAV for simplicity, or detect from extension + const ext = filePath.split('.').pop().toLowerCase(); + const mimeType = ext === 'webm' ? 'audio/webm' : 'audio/wav'; + return `data:${mimeType};base64,${base64}`; + } catch (error) { + console.error('Error reading audio file:', error); + return null; + } + }); + + ipcMain.handle('asr-delete-audio-file', async (event, { recordId, filePath }) => { + try { + const asrManager = getOrCreateASRManager(); + + // Delete physical file + if (filePath) { + const fs = await import('fs/promises'); + await fs.unlink(filePath).catch(err => { + console.warn('Physical file already gone or could not be deleted:', err); + }); + } + + // Update database + if (recordId) { + db.deleteSpeechRecordAudio(recordId); + } + + return { success: true }; + } catch (error) { + console.error('Error deleting audio file:', error); + return { success: false, error: error.message }; + } + }); + + console.log('ASR Audio IPC handlers registered'); +} + diff --git a/desktop/src/core/modules/ipc-handlers/asr-model-handlers.js b/desktop/src/core/modules/ipc-handlers/asr-model-handlers.js new file mode 100644 index 0000000..44c7f1b --- /dev/null +++ b/desktop/src/core/modules/ipc-handlers/asr-model-handlers.js @@ -0,0 +1,55 @@ +import { ipcMain } from 'electron'; + +/** + * 注册 ASR 模型管理相关 IPC 处理器 + * @param {object} deps + * @param {Function} deps.getModelManager + */ +export function registerASRModelHandlers({ getModelManager }) { + ipcMain.handle('asr-get-model-presets', () => { + try { + return getModelManager().getModelPresets(); + } catch (error) { + console.error('Error getting ASR model presets:', error); + return []; + } + }); + + ipcMain.handle('asr-get-model-status', (event, modelId) => { + try { + return getModelManager().getModelStatus(modelId); + } catch (error) { + console.error('Error getting ASR model status:', error); + return null; + } + }); + + ipcMain.handle('asr-get-all-model-statuses', () => { + try { + return getModelManager().getAllModelStatuses(); + } catch (error) { + console.error('Error getting ASR model statuses:', error); + return []; + } + }); + + ipcMain.handle('asr-download-model', (event, modelId, source) => { + try { + return getModelManager().startDownload(modelId, source); + } catch (error) { + console.error('Error starting ASR model download:', error); + throw error; + } + }); + + ipcMain.handle('asr-cancel-model-download', (event, modelId) => { + try { + return getModelManager().cancelDownload(modelId); + } catch (error) { + console.error('Error cancelling ASR model download:', error); + throw error; + } + }); + + console.log('ASR Model IPC handlers registered'); +} diff --git a/desktop/src/core/modules/ipc-handlers/database-handlers.js b/desktop/src/core/modules/ipc-handlers/database-handlers.js new file mode 100644 index 0000000..9fae87a --- /dev/null +++ b/desktop/src/core/modules/ipc-handlers/database-handlers.js @@ -0,0 +1,220 @@ +import { ipcMain } from 'electron'; + +/** + * 注册数据库相关 IPC 处理器 + * @param {object} deps + * @param {object} deps.db + */ +export function registerDatabaseHandlers({ db }) { + ipcMain.handle('db-get-all-characters', () => { + try { + return db.getAllCharacters(); + } catch (error) { + console.error('Error getting all characters:', error); + return []; + } + }); + + ipcMain.handle('db-get-character-by-id', (event, id) => { + try { + return db.getCharacterById(id); + } catch (error) { + console.error('Error getting character:', error); + return null; + } + }); + + ipcMain.handle('db-create-character', (event, characterData) => { + try { + return db.createCharacter(characterData); + } catch (error) { + console.error('Error creating character:', error); + return null; + } + }); + + ipcMain.handle('db-create-conversation', (event, conversationData) => { + try { + return db.createConversation(conversationData); + } catch (error) { + console.error('Error creating conversation:', error); + return null; + } + }); + + ipcMain.handle('db-get-conversations-by-character', (event, characterId) => { + try { + return db.getConversationsByCharacter(characterId); + } catch (error) { + console.error('Error getting conversations:', error); + return []; + } + }); + + ipcMain.handle('db-create-message', (event, messageData) => { + try { + return db.createMessage(messageData); + } catch (error) { + console.error('Error creating message:', error); + return null; + } + }); + + ipcMain.handle('db-get-messages-by-conversation', (event, conversationId) => { + try { + return db.getMessagesByConversation(conversationId); + } catch (error) { + console.error('Error getting messages:', error); + return []; + } + }); + + ipcMain.handle('db-update-conversation', (event, conversationId, updates) => { + try { + return db.updateConversation(conversationId, updates); + } catch (error) { + console.error('Error updating conversation:', error); + return null; + } + }); + + ipcMain.handle('db-get-statistics', () => { + try { + return db.getStatistics(); + } catch (error) { + try { + console.error('Error getting statistics:', error); + } catch (logError) { + // ignore secondary logging errors + } + return { + characterCount: 0, + conversationCount: 0, + messageCount: 0, + avgAffinity: 0 + }; + } + }); + + ipcMain.handle('db-get-character-page-statistics', () => { + try { + return db.getCharacterPageStatistics(); + } catch (error) { + console.error('Error getting character page statistics:', error); + return { + characterCount: 0, + activeConversationCount: 0, + avgAffinity: 0 + }; + } + }); + + ipcMain.handle('db-get-recent-conversations', (event, limit) => { + try { + return db.getRecentConversations(limit || 10); + } catch (error) { + console.error('Error getting recent conversations:', error); + return []; + } + }); + + ipcMain.handle('db-get-all-conversations', () => { + try { + return db.getAllConversations(); + } catch (error) { + console.error('Error getting all conversations:', error); + return []; + } + }); + + ipcMain.handle('db-get-conversation-by-id', (event, conversationId) => { + try { + return db.getConversationById(conversationId); + } catch (error) { + console.error('Error getting conversation by id:', error); + return null; + } + }); + + ipcMain.handle('db-update-message', (event, messageId, updates) => { + try { + return db.updateMessage(messageId, updates); + } catch (error) { + console.error('Error updating message:', error); + return null; + } + }); + + ipcMain.handle('db-get-conversation-ai-data', (event, conversationId) => { + try { + return db.getConversationAIData(conversationId); + } catch (error) { + console.error('Error getting conversation AI data:', error); + return { + analysisReport: null, + keyMoments: [], + personalityAnalysis: null, + actionSuggestions: [] + }; + } + }); + + ipcMain.handle('db-select-action-suggestion', (event, payload = {}) => { + try { + return db.selectActionSuggestion(payload); + } catch (error) { + console.error('Error selecting action suggestion:', error); + return false; + } + }); + + ipcMain.handle('db-get-character-details', (event, characterId) => { + try { + return db.getCharacterDetails(characterId); + } catch (error) { + console.error('Error getting character details:', error); + return null; + } + }); + + ipcMain.handle( + 'db-update-character-details-custom-fields', + (event, characterId, customFields) => { + try { + return db.updateCharacterDetailsCustomFields(characterId, customFields); + } catch (error) { + console.error('Error updating character details custom fields:', error); + return false; + } + } + ); + + ipcMain.handle('db-regenerate-character-details', (event, characterId) => { + try { + return db.generateCharacterDetailsFromConversations(characterId); + } catch (error) { + console.error('Error regenerating character details:', error); + return null; + } + }); + + ipcMain.handle('db-delete-conversation', (event, conversationId) => { + try { + return db.deleteConversation(conversationId); + } catch (error) { + console.error('Error deleting conversation:', error); + return false; + } + }); + + ipcMain.handle('db-delete-character', (event, characterId) => { + try { + return db.deleteCharacter(characterId); + } catch (error) { + console.error('Error deleting character:', error); + return false; + } + }); + + console.log('Database IPC handlers registered'); +} diff --git a/desktop/src/core/modules/ipc-handlers/llm-handlers.js b/desktop/src/core/modules/ipc-handlers/llm-handlers.js new file mode 100644 index 0000000..03337e9 --- /dev/null +++ b/desktop/src/core/modules/ipc-handlers/llm-handlers.js @@ -0,0 +1,102 @@ +import { ipcMain } from 'electron'; + +/** + * 注册 LLM 配置相关 IPC 处理器 + * @param {object} deps + * @param {object} deps.db + */ +export function registerLLMHandlers({ db }) { + ipcMain.handle('llm-save-config', (event, configData) => { + try { + return db.saveLLMConfig(configData); + } catch (error) { + console.error('Error saving LLM config:', error); + throw error; + } + }); + + ipcMain.handle('llm-get-all-configs', () => { + try { + return db.getAllLLMConfigs(); + } catch (error) { + console.error('Error getting LLM configs:', error); + return []; + } + }); + + ipcMain.handle('llm-get-default-config', () => { + try { + return db.getDefaultLLMConfig(); + } catch (error) { + console.error('Error getting default LLM config:', error); + return null; + } + }); + + ipcMain.handle('llm-get-config-by-id', (event, id) => { + try { + return db.getLLMConfigById(id); + } catch (error) { + console.error('Error getting LLM config:', error); + return null; + } + }); + + ipcMain.handle('llm-delete-config', (event, id) => { + try { + return db.deleteLLMConfig(id); + } catch (error) { + console.error('Error deleting LLM config:', error); + throw error; + } + }); + + ipcMain.handle('llm-test-connection', async (event, configData) => { + try { + return await db.testLLMConnection(configData); + } catch (error) { + console.error('Error testing LLM connection:', error); + return { success: false, message: error.message || '连接测试失败' }; + } + }); + + ipcMain.handle('llm-set-default-config', (event, id) => { + try { + return db.setDefaultLLMConfig(id); + } catch (error) { + console.error('Error setting default LLM config:', error); + throw error; + } + }); + + ipcMain.handle('llm-get-feature-configs', () => { + try { + return db.getAllLLMFeatureConfigs(); + } catch (error) { + console.error('Error getting LLM feature configs:', error); + return {}; + } + }); + + ipcMain.handle('llm-get-feature-config', (event, feature) => { + try { + return db.getLLMFeatureConfig(feature); + } catch (error) { + console.error('Error getting LLM feature config:', error); + return null; + } + }); + + ipcMain.handle('llm-set-feature-config', (event, payload = {}) => { + try { + const { feature, llm_config_id } = payload; + return db.setLLMFeatureConfig(feature, llm_config_id || null); + } catch (error) { + console.error('Error setting LLM feature config:', error); + throw error; + } + }); + + console.log('LLM IPC handlers registered'); +} + diff --git a/desktop/src/core/modules/ipc-handlers/media-handlers.js b/desktop/src/core/modules/ipc-handlers/media-handlers.js new file mode 100644 index 0000000..359daa9 --- /dev/null +++ b/desktop/src/core/modules/ipc-handlers/media-handlers.js @@ -0,0 +1,82 @@ +import { ipcMain, systemPreferences, desktopCapturer } from 'electron'; + +/** + * 注册媒体权限相关 IPC 处理器 + */ +export function registerMediaHandlers() { + ipcMain.handle('check-media-access-status', async (event, mediaType) => { + try { + if (process.platform === 'darwin') { + const status = systemPreferences.getMediaAccessStatus(mediaType); + console.log(`[Permission] ${mediaType} access status: ${status}`); + return { status, platform: 'darwin' }; + } + return { status: 'granted', platform: process.platform }; + } catch (error) { + console.error(`Error checking ${mediaType} access status:`, error); + return { status: 'unknown', error: error.message }; + } + }); + + ipcMain.handle('request-media-access', async (event, mediaType) => { + try { + if (process.platform === 'darwin') { + const currentStatus = systemPreferences.getMediaAccessStatus(mediaType); + console.log(`[Permission] Current ${mediaType} status: ${currentStatus}`); + + if (currentStatus === 'granted') { + return { granted: true, status: 'granted' }; + } + + if (currentStatus === 'denied') { + return { + granted: false, + status: 'denied', + message: '权限已被拒绝,请在系统偏好设置 > 安全性与隐私 > 隐私 中手动开启' + }; + } + + console.log(`[Permission] Requesting ${mediaType} access...`); + const granted = await systemPreferences.askForMediaAccess(mediaType); + console.log(`[Permission] ${mediaType} access ${granted ? 'granted' : 'denied'}`); + return { granted, status: granted ? 'granted' : 'denied' }; + } + + return { granted: true, status: 'granted', platform: process.platform }; + } catch (error) { + console.error(`Error requesting ${mediaType} access:`, error); + return { granted: false, error: error.message }; + } + }); + + ipcMain.handle('check-screen-capture-access', async () => { + try { + if (process.platform === 'darwin') { + const status = systemPreferences.getMediaAccessStatus('screen'); + console.log(`[Permission] Screen capture access status: ${status}`); + return { status, platform: 'darwin' }; + } + return { status: 'granted', platform: process.platform }; + } catch (error) { + console.error('Error checking screen capture access:', error); + return { status: 'unknown', error: error.message }; + } + }); + + // DesktopCapturer sources (返回可序列化的字段,避免 NativeImage 无法通过 IPC 传输) + ipcMain.handle('get-desktop-sources', async (event, options = {}) => { + try { + const sources = await desktopCapturer.getSources(options); + return sources.map((source) => ({ + id: source.id, + name: source.name, + display_id: source.display_id || null + })); + } catch (error) { + console.error('Error getting desktop sources:', error); + return []; + } + }); + + console.log('Media Permission IPC handlers registered'); +} diff --git a/desktop/src/core/modules/ipc-handlers/memory-handlers.js b/desktop/src/core/modules/ipc-handlers/memory-handlers.js new file mode 100644 index 0000000..f91d23a --- /dev/null +++ b/desktop/src/core/modules/ipc-handlers/memory-handlers.js @@ -0,0 +1,50 @@ +import { ipcMain } from 'electron'; + +/** + * 注册 Memory Service 相关 IPC 处理器 + * 仅依赖结构化过滤,无向量召回。 + */ +export function registerMemoryHandlers({ memoryService }) { + if (!memoryService) { + console.warn('[MemoryHandlers] memoryService not provided, skip registration'); + return; + } + + ipcMain.handle('memory-query-profiles', async (event, payload = {}) => { + try { + return await memoryService.queryProfiles(payload); + } catch (error) { + console.error('[MemoryHandlers] query-profiles failed', error); + throw error; + } + }); + + ipcMain.handle('memory-query-events', async (event, payload = {}) => { + try { + return await memoryService.queryEvents(payload); + } catch (error) { + console.error('[MemoryHandlers] query-events failed', error); + throw error; + } + }); + + ipcMain.handle('memory-upsert-profile', async (event, payload = {}) => { + try { + return await memoryService.upsertProfile(payload); + } catch (error) { + console.error('[MemoryHandlers] upsert-profile failed', error); + throw error; + } + }); + + ipcMain.handle('memory-upsert-event', async (event, payload = {}) => { + try { + return await memoryService.upsertEvent(payload); + } catch (error) { + console.error('[MemoryHandlers] upsert-event failed', error); + throw error; + } + }); + + console.log('[MemoryHandlers] Memory handlers registered'); +} diff --git a/desktop/src/core/modules/ipc-handlers/review-handlers.js b/desktop/src/core/modules/ipc-handlers/review-handlers.js new file mode 100644 index 0000000..2f682c4 --- /dev/null +++ b/desktop/src/core/modules/ipc-handlers/review-handlers.js @@ -0,0 +1,61 @@ + +import { ipcMain } from 'electron'; + +export function registerReviewHandlers({ reviewService }) { + if (!reviewService) { + console.error('[IPC] ReviewService is required for review handlers'); + return; + } + + // 生成复盘 + ipcMain.handle('review:generate', async (event, payload) => { + try { + const { conversationId, force = false, requestId = null } = + typeof payload === 'object' && payload !== null + ? payload + : { conversationId: payload, force: false, requestId: null }; + + if (!conversationId) { + throw new Error('conversationId is required'); + } + + const finalRequestId = + requestId || `review-${Date.now()}-${Math.random().toString(36).slice(2, 6)}`; + + const sendProgress = (progress = {}) => { + try { + if (!event?.sender || event.sender.isDestroyed?.()) return; + event.sender.send('review:progress', { requestId: finalRequestId, ...progress }); + } catch { + // ignore + } + }; + + console.log(`[IPC] Handling review:generate for conversation ${conversationId}, force=${force}, requestId=${finalRequestId}`); + sendProgress({ stage: 'start', percent: 0, message: '开始生成复盘...' }); + + const review = await reviewService.generateReview(conversationId, { + force, + onProgress: sendProgress + }); + + sendProgress({ stage: 'done', percent: 1, message: '复盘完成' }); + return { success: true, data: review }; + } catch (error) { + console.error(`[IPC] review:generate failed:`, error); + return { success: false, error: error.message }; + } + }); + + // 获取复盘 + ipcMain.handle('review:get', async (event, conversationId) => { + try { + console.log(`[IPC] Handling review:get for conversation ${conversationId}`); + const review = reviewService.getExistingReview(conversationId); + return { success: true, data: review }; + } catch (error) { + console.error(`[IPC] review:get failed:`, error); + return { success: false, error: error.message }; + } + }); +} diff --git a/desktop/src/core/modules/ipc-handlers/suggestion-handlers.js b/desktop/src/core/modules/ipc-handlers/suggestion-handlers.js new file mode 100644 index 0000000..40ab743 --- /dev/null +++ b/desktop/src/core/modules/ipc-handlers/suggestion-handlers.js @@ -0,0 +1,108 @@ +import { ipcMain } from 'electron'; + +/** + * 注册 LLM 建议相关 IPC 处理器 + * @param {object} deps + * @param {object} deps.db + * @param {object} deps.llmSuggestionService + * @param {Function} deps.ensureSuggestionService + */ +export function registerSuggestionHandlers({ db, llmSuggestionService, ensureSuggestionService }) { + ipcMain.handle('suggestion-get-config', () => { + try { + return db.getSuggestionConfig(); + } catch (error) { + console.error('Error getting suggestion config:', error); + return null; + } + }); + + ipcMain.handle('suggestion-update-config', (event, updates) => { + try { + return db.updateSuggestionConfig(updates); + } catch (error) { + console.error('Error updating suggestion config:', error); + throw error; + } + }); + + ipcMain.handle('llm-generate-suggestions', async (event, payload = {}) => { + try { + ensureSuggestionService(); + return await llmSuggestionService.generateSuggestions(payload); + } catch (error) { + console.error('Error generating LLM suggestions:', error); + throw error; + } + }); + + ipcMain.on('llm-start-suggestion-stream', async (event, payload = {}) => { + console.log('[IPCHandlers] Received llm-start-suggestion-stream request:', payload); + const webContents = event.sender; + const streamId = payload.streamId || `llm-suggestion-stream-${Date.now()}`; + console.log(`[IPCHandlers] Assigned streamId: ${streamId}`); + + const send = (channel, data) => { + console.log(`[IPCHandlers] Sending to renderer: ${channel}`, { streamId, ...data }); + if (webContents.isDestroyed()) { + console.warn('[IPCHandlers] WebContents destroyed, cannot send event'); + return; + } + webContents.send(channel, { streamId, ...data }); + }; + + try { + console.log('[IPCHandlers] Initializing LLM suggestion service'); + ensureSuggestionService(); + + console.log('[IPCHandlers] Starting streaming suggestion generation'); + await llmSuggestionService.generateSuggestionsStream(payload, { + onStart: (info) => { + console.log('[IPCHandlers] onStart callback triggered:', info); + send('llm-suggestion-stream-start', info); + }, + onHeader: (header) => { + console.log('[IPCHandlers] onHeader callback triggered:', header); + send('llm-suggestion-stream-header', header); + }, + onPartialSuggestion: (chunk) => { + console.log('[IPCHandlers] onPartialSuggestion callback triggered:', chunk); + send('llm-suggestion-stream-partial', chunk); + }, + onSuggestion: (suggestion) => { + console.log('[IPCHandlers] onSuggestion callback triggered:', suggestion); + send('llm-suggestion-stream-chunk', { suggestion, index: suggestion.index }); + }, + onParserError: (error) => { + console.error('[IPCHandlers] onParserError callback triggered:', error); + send('llm-suggestion-stream-error', { error: error.message || 'TOON解析失败' }); + }, + onComplete: (metadata) => { + console.log('[IPCHandlers] onComplete callback triggered:', metadata); + send('llm-suggestion-stream-end', { success: true, metadata }); + }, + onError: (error) => { + console.error('[IPCHandlers] onError callback triggered:', error); + send('llm-suggestion-stream-error', { error: error.message || '生成失败' }); + } + }); + console.log('[IPCHandlers] Streaming suggestion generation completed successfully'); + } catch (error) { + console.error('[IPCHandlers] Error in streaming suggestion generation:', error); + send('llm-suggestion-stream-error', { error: error.message || '生成失败' }); + } + }); + + ipcMain.handle('llm-detect-topic-shift', async (event, payload = {}) => { + try { + ensureSuggestionService(); + return await llmSuggestionService.detectTopicShift(payload); + } catch (error) { + console.error('Error detecting topic shift:', error); + throw error; + } + }); + + console.log('[IPCHandlers] Suggestion handlers registered'); +} + diff --git a/desktop/src/core/modules/ipc-handlers/window-handlers.js b/desktop/src/core/modules/ipc-handlers/window-handlers.js new file mode 100644 index 0000000..0701552 --- /dev/null +++ b/desktop/src/core/modules/ipc-handlers/window-handlers.js @@ -0,0 +1,108 @@ +import { ipcMain } from 'electron'; +import electron from 'electron'; + +/** + * 注册窗口相关 IPC 处理器 + * @param {object} deps + * @param {object} deps.windowManager + * @param {Function} deps.checkASRReady + */ +export function registerWindowHandlers({ windowManager, checkASRReady }) { + // 渲染进程日志转发(可选) + ipcMain.on('log', (_event, message) => { + try { + if (message === undefined) return; + console.log('[RendererLog]', message); + } catch { + // ignore + } + }); + + // 显示 HUD + ipcMain.on('show-hud', async () => { + // 如果正在创建,弹一次提示,避免用户频繁点击 + if (windowManager.hudCreating) { + if (!windowManager.hudCreateNotified) { + windowManager.hudCreateNotified = true; + const parent = windowManager.getMainWindow(); + const message = 'ASR 模型正在加载,可能需要十几秒,请稍等片刻,无需重复点击。'; + console.log('[HUD]', message); + const dialogOpts = { + type: 'info', + buttons: ['好的'], + title: '正在加载', + message + }; + if (parent) { + parent.webContents.send('hud-loading', { message }); + } + electron.dialog.showMessageBox(parent || null, dialogOpts).catch(() => {}); + } + return; + } + + if (!windowManager.getHUDWindow()) { + await windowManager.createHUDWindow(() => checkASRReady(), () => {}); + } else { + windowManager.showHUD(); + } + console.log('HUD显示'); + }); + + // 隐藏 HUD + ipcMain.on('hide-hud', () => { + windowManager.hideHUD(); + console.log('HUD隐藏'); + }); + + // 关闭 HUD + ipcMain.on('close-hud', () => { + windowManager.closeHUD(); + console.log('HUD关闭'); + }); + + // HUD 拖拽 + ipcMain.on('start-hud-drag', (event, pos) => { + windowManager.startHUDrag(pos); + }); + + ipcMain.on('update-hud-drag', (event, pos) => { + windowManager.updateHUDrag(pos); + }); + + ipcMain.on('end-hud-drag', () => { + windowManager.endHUDrag(); + }); + + // 主窗口控制 + ipcMain.on('minimize-window', () => { + windowManager.minimizeMainWindow(); + }); + + ipcMain.on('close-window', () => { + windowManager.closeMainWindow(); + }); + + // 主窗口拖拽 + ipcMain.on('start-drag', (event, pos) => { + windowManager.startMainDrag(pos); + }); + + ipcMain.on('update-drag', (event, pos) => { + windowManager.updateMainDrag(pos); + }); + + ipcMain.on('end-drag', () => { + windowManager.endMainDrag(); + }); + + // 对话建议配置更新后通知 HUD 重新加载配置 + ipcMain.on('suggestion-config-updated', () => { + const hudWin = windowManager.getHUDWindow?.(); + if (hudWin && !hudWin.isDestroyed()) { + hudWin.webContents.send('suggestion-config-updated'); + } + }); + + console.log('Window IPC handlers registered'); +} diff --git a/desktop/src/core/modules/llm-suggestion-service.js b/desktop/src/core/modules/llm-suggestion-service.js new file mode 100644 index 0000000..2589664 --- /dev/null +++ b/desktop/src/core/modules/llm-suggestion-service.js @@ -0,0 +1,928 @@ +import { buildSuggestionContext } from './suggestion-context-builder.js'; +import { createToonSuggestionStreamParser } from './toon-parser.js'; +import { renderPromptTemplate } from './prompt-manager.js'; + +const MIN_SUGGESTION_COUNT = 2; +const MAX_SUGGESTION_COUNT = 5; +const DEFAULT_MODEL = 'gpt-4o-mini'; +const DEFAULT_SITUATION_MODEL = 'gpt-4o-mini'; +const DEFAULT_TIMEOUT_MS = 1000 * 15; +const STREAM_TIMEOUT_MS = 1000 * 30; +const DEFAULT_SITUATION_TIMEOUT_MS = 1000 * 5; + +function safeText(value) { + if (value === undefined || value === null) return ''; + return String(value); +} + +export default class LLMSuggestionService { + constructor(dbGetter) { + this.dbGetter = dbGetter; + this.clientPool = {}; + this.clientConfigSignature = null; // 保留字段兼容旧逻辑(不再使用) + this.currentLLMConfig = null; + this.currentLLMFeature = 'default'; + } + + get db() { + const db = this.dbGetter?.(); + if (!db) { + throw new Error('Database is not initialized'); + } + return db; + } + + async ensureClient(feature = 'default') { + const featureKey = typeof feature === 'string' && feature.trim() ? feature.trim().toLowerCase() : 'default'; + const llmConfig = + (this.db.getLLMConfigForFeature && this.db.getLLMConfigForFeature(featureKey)) || + this.db.getDefaultLLMConfig(); + if (!llmConfig) { + throw new Error('未找到默认LLM配置,请先在设置中配置。'); + } + + const signature = `${featureKey}:${llmConfig.id || 'unknown'}-${llmConfig.updated_at || 0}`; + const cached = this.clientPool[featureKey]; + if (!cached || cached.signature !== signature) { + const { default: OpenAI } = await import('openai'); + const clientConfig = { apiKey: llmConfig.api_key }; + if (llmConfig.base_url) { + // Remove trailing '/chat/completions' if present + const baseURL = llmConfig.base_url.replace(/\/chat\/completions\/?$/, ''); + clientConfig.baseURL = baseURL; + } + this.clientPool[featureKey] = { + client: new OpenAI(clientConfig), + signature, + config: llmConfig + }; + } + + this.currentLLMConfig = llmConfig; + this.currentLLMFeature = featureKey; + return this.clientPool[featureKey].client; + } + + sanitizeCount(value, fallback) { + const num = Number(value ?? fallback ?? MIN_SUGGESTION_COUNT); + if (Number.isNaN(num)) return MIN_SUGGESTION_COUNT; + return Math.min(MAX_SUGGESTION_COUNT, Math.max(MIN_SUGGESTION_COUNT, Math.round(num))); + } + + resolveTimeoutMs(config, fallback) { + const raw = config?.timeout_ms; + const parsed = Number(raw); + if (Number.isFinite(parsed) && parsed > 0) { + return Math.round(parsed); + } + return fallback; + } + + async generateSuggestions(payload = {}) { + const collected = []; + let metadata = null; + await this.generateSuggestionsStream(payload, { + onSuggestion: (suggestion) => { + collected.push(suggestion); + }, + onComplete: (info) => { + metadata = info; + } + }); + return { suggestions: collected, metadata }; + } + + normalizeDeltaContent(deltaContent) { + if (!deltaContent) return ''; + if (typeof deltaContent === 'string') return deltaContent; + if (Array.isArray(deltaContent)) { + return deltaContent + .map((part) => { + if (!part) return ''; + if (typeof part === 'string') return part; + if (typeof part.text === 'string') return part.text; + if (part.text?.value) return part.text.value; + if (part.text?.content) return part.text.content; + if (part.content) return String(part.content); + return ''; + }) + .join(''); + } + return ''; + } + + async generateSuggestionsStream(payload = {}, handlers = {}) { + const { + conversationId, + characterId, + decisionPointId: incomingDecisionPointId, + trigger = 'manual', + reason = 'manual', + optionCount, + messageLimit, + previousSuggestions = [] + } = payload; + + if (!conversationId && !characterId) { + throw new Error('缺少会话或角色信息,无法生成建议。'); + } + + const suggestionConfig = this.db.getSuggestionConfig(); + const count = this.sanitizeCount(optionCount ?? suggestionConfig?.suggestion_count, 3); + const contextLimit = messageLimit || suggestionConfig?.context_message_limit || 10; + const client = await this.ensureClient('suggestion'); + const modelName = this.resolveModelName(this.currentLLMConfig, suggestionConfig); + + const context = buildSuggestionContext(this.db, { + conversationId, + characterId, + messageLimit: contextLimit + }); + + const prompt = this.buildSuggestionPrompt({ + count, + trigger, + reason, + context, + previousSuggestions + }); + + const requestParams = { + model: modelName, + temperature: trigger === 'manual' ? 1.0 : 0.9, + max_tokens: 4096, + reasoning_effort: "disabled", // 禁用 OpenAI 风格推理 + thinking: { type: "disabled" }, // 禁用智谱/GLM 风格深度思考 + stream: true, + messages: [ + { + role: 'system', + content: + '你是一个恋爱互动教练,负责根据当前对话状态,为玩家提供下一步回复的"话题方向 + 简要提示"。' + + '请保持中文输出,语气自然友好。只输出 TOON 格式,遵循用户提供的表头,不要添加 JSON。' + }, + { + role: 'user', + content: prompt + } + ] + }; + + console.log('[LLMSuggestionService] Starting stream with payload:', payload); + + const abortController = new AbortController(); + // 增加超时时间,避免在上下文较长或网络波动时误触发超时 + const streamTimeoutMs = this.resolveTimeoutMs(this.currentLLMConfig, STREAM_TIMEOUT_MS * 2); + const timeoutId = setTimeout(() => { + console.error('[LLMSuggestionService] Stream timed out after', streamTimeoutMs, 'ms'); + abortController.abort(new Error('LLM生成超时,请稍后重试')); + }, streamTimeoutMs); + + let usageInfo = null; + let emittedCount = 0; + let chunkCount = 0; + let totalContentLength = 0; + let rawStreamContent = ''; + + // 记录批次时间戳,同一批次的所有建议使用相同的时间戳 + const batchTimestamp = Date.now(); + + // 获取最新的消息ID(用于关联suggestion) + const latestMessageId = context.history && context.history.length > 0 + ? context.history[context.history.length - 1]?.id || null + : null; + + // 决策点:refresh 必须复用;否则新建 + let decisionPointId = incomingDecisionPointId || null; + if (!decisionPointId && conversationId && this.db.createDecisionPoint) { + try { + decisionPointId = this.db.createDecisionPoint({ + conversationId, + anchorMessageId: latestMessageId, + createdAt: batchTimestamp + }); + } catch (error) { + console.warn('[LLMSuggestionService] Failed to create decision point, falling back to null:', error); + decisionPointId = null; + } + } + + // 每次生成 = 一个批次(包含 trigger/reason),用于区分“换一批” + let batchId = null; + if (decisionPointId && this.db.createSuggestionBatch) { + try { + batchId = this.db.createSuggestionBatch({ + decisionPointId, + trigger, + reason, + createdAt: batchTimestamp + }); + } catch (error) { + console.warn('[LLMSuggestionService] Failed to create suggestion batch, falling back to null:', error); + batchId = null; + } + } + + console.log('[LLMSuggestionService] Creating TOON parser'); + const parser = createToonSuggestionStreamParser({ + onHeader: (header) => { + console.log('[LLMSuggestionService] Parser received header:', header); + handlers.onHeader?.(header); + }, + onPartialSuggestion: (partial) => { + handlers.onPartialSuggestion?.({ + index: emittedCount, + suggestion: partial + }); + }, + onSuggestion: (item) => { + console.log(`[LLMSuggestionService] Parser received suggestion #${emittedCount + 1}:`, item); + const suggestionIndex = emittedCount; + const suggestion = this.decorateSuggestion(item, emittedCount, { trigger, reason }, batchTimestamp); + suggestion.index = suggestionIndex; + suggestion.suggestion_index = suggestionIndex; + suggestion.decision_point_id = decisionPointId; + suggestion.batch_id = batchId; + console.log(`[LLMSuggestionService] Decorated suggestion:`, suggestion); + emittedCount += 1; + + // 保存suggestion到数据库 + if (conversationId && this.db.saveActionSuggestion) { + try { + this.db.saveActionSuggestion(suggestion, conversationId, latestMessageId); + console.log(`[LLMSuggestionService] Saved suggestion to database: ${suggestion.id}`); + } catch (error) { + console.error('[LLMSuggestionService] Failed to save suggestion to database:', error); + // 不阻断流程,继续执行 + } + } + + handlers.onSuggestion?.(suggestion); + }, + onSkip: (data) => { + console.log('[LLMSuggestionService] Parser detected SKIP, no suggestions needed'); + // 调用完成回调,但标记为 skipped + handlers.onComplete?.({ + trigger, + reason, + skipped: true, + skipReason: data?.reason || 'no_suggestion_needed', + model: modelName, + tokenUsage: usageInfo, + contextMessages: context.history?.length || 0 + }); + }, + onError: (error) => { + console.error('[LLMSuggestionService] Parser error:', error); + handlers.onParserError?.(error); + } + }); + + try { + console.log('[LLMSuggestionService] Calling onStart handler'); + handlers.onStart?.({ + trigger, + reason, + expectedCount: count + }); + + console.log('LLM Suggestion Stream Request Debug Info:', { + payload, + llmConfig: { + id: this.currentLLMConfig.id, + name: this.currentLLMConfig.name, + base_url: this.currentLLMConfig.base_url, + model_name: this.currentLLMConfig.model_name + }, + requestParams + }); + + console.log('[LLMSuggestionService] Creating OpenAI stream...'); + const stream = await client.chat.completions.create(requestParams, { signal: abortController.signal }); + console.log('[LLMSuggestionService] OpenAI stream created successfully'); + + console.log('[LLMSuggestionService] Starting to process chunks...'); + let hasReceivedContent = false; // 标记是否已收到真正的content + + for await (const chunk of stream) { + chunkCount++; + const choice = chunk?.choices?.[0]; + const deltaContent = choice?.delta?.content; + const reasoningContent = choice?.delta?.reasoning_content; + const delta = this.normalizeDeltaContent(deltaContent); + + // 如果收到 reasoning_content,记录日志但忽略它(即使设置了 disabled,某些模型可能仍会返回) + if (reasoningContent && !delta) { + // 只在第一次收到思考内容时记录,避免日志过多 + if (chunkCount <= 3) { + console.log(`[LLMSuggestionService] Received reasoning_content (ignored), waiting for content...`); + } + // 跳过这个chunk,不处理 + continue; + } + + // 只处理真正的 content 字段 + if (delta) { + if (!hasReceivedContent) { + hasReceivedContent = true; + console.log(`[LLMSuggestionService] First content chunk received at chunk #${chunkCount}`); + } + + totalContentLength += delta.length; + rawStreamContent += String(delta); + + // 简化日志输出,避免过多细节 + if (chunkCount % 50 === 0 || delta.length > 10) { + const preview = delta.length > 30 ? delta.slice(0, 30).replace(/\n/g, '\\n') + '...' : delta.replace(/\n/g, '\\n'); + console.log(`[LLMSuggestionService] Chunk #${chunkCount}: content (${delta.length} chars) "${preview}"`); + } + + // 立即推送到parser,实现真正的流式展示 + parser.push(delta); + } + + // 检查流是否结束 + if (choice?.finish_reason) { + console.log(`[LLMSuggestionService] Stream finished with reason: ${choice.finish_reason}`); + parser.end(); + } + + // 记录使用情况 + if (chunk?.usage) { + console.log('[LLMSuggestionService] Received usage info:', chunk.usage); + usageInfo = chunk.usage; + } + } + + console.log(`[LLMSuggestionService] Stream processing complete. Total chunks: ${chunkCount}, total content: ${totalContentLength} chars, emitted suggestions: ${emittedCount}`); + console.log('[LLMSuggestionService] Full streamed content:\n', rawStreamContent); + + console.log('[LLMSuggestionService] Calling parser.end() manually'); + parser.end(); + + console.log('[LLMSuggestionService] Calling onComplete handler'); + handlers.onComplete?.({ + trigger, + reason, + model: modelName, + tokenUsage: usageInfo, + contextMessages: context.history?.length || 0 + }); + + console.log('[LLMSuggestionService] Stream completed successfully'); + } catch (error) { + console.error('[LLMSuggestionService] Stream failed, calling onError handler'); + handlers.onError?.(error); + + console.error('LLM Suggestion Stream Failed - Full Debug Info:', { + error: { + message: error.message, + status: error.status, + code: error.code, + type: error.type, + param: error.param, + headers: error.headers, + requestID: error.requestID + }, + payload, + llmConfig: { + id: this.currentLLMConfig.id, + name: this.currentLLMConfig.name, + base_url: this.currentLLMConfig.base_url, + model_name: this.currentLLMConfig.model_name + }, + requestParams, + contextInfo: { + conversationId, + characterId, + messageLimit: contextLimit, + contextHistoryLength: context.history?.length || 0 + }, + streamStats: { + chunkCount, + totalContentLength, + emittedCount + }, + rawStreamContent + }); + throw error; + } finally { + console.log('[LLMSuggestionService] Clearing timeout'); + clearTimeout(timeoutId); + } + } + + buildSuggestionPrompt({ count, trigger, reason, context, previousSuggestions = [] }) { + const triggerLabel = trigger === 'manual' ? '用户主动请求' : `系统被动触发(原因:${reason})`; + const triggerGuidance = { + manual: '用户主动求助:提供多元策略(保守/进取/幽默/共情),帮助选择其一。', + silence: '静默提醒:给破冰/延续话题的轻量提示,降低冷场尴尬。', + message_count: '角色多条未回:提炼关键点,给一条综合回应思路,包含确认/回应/再提问。', + topic_change: '话题转折或被提问:先回应问题/态度,再给推进话题的具体方向。', + refresh: '用户点击"换一批":生成与上次完全不同方向的新建议。' + }[trigger === 'manual' && reason === 'refresh' ? 'refresh' : trigger] || '按通用策略生成多样化可选方案。'; + + const affinityStageText = context.affinityStage?.label + ? `${context.affinityStage.label}:${context.affinityStage.strategy}` + : '好感阶段未知:保持礼貌与真诚。'; + + const emotionText = context.emotion?.label + ? `最后一条角色消息情感:${context.emotion.label}(${context.emotion.reason || '推测'})` + : '最后消息情感:中性'; + + const previousList = Array.isArray(previousSuggestions) + ? previousSuggestions.filter((item) => item && (item.title || item.content)) + : []; + const previousSuggestionText = previousList.length + ? [ + '【用户反馈】上一批建议未被采纳,用户要求换一批,请给出明显不同的新思路,避免重复或轻微改写。', + '【上一批建议(仅供去同质化参考)】', + ...previousList.slice(0, 5).map((item, index) => { + const tagsText = Array.isArray(item.tags) && item.tags.length ? ` [${item.tags.join('、')}]` : ''; + return `${index + 1}. ${item.title || '未命名'}:${item.content || ''}${tagsText}`; + }) + ].join('\n') + : ''; + + const historyText = Array.isArray(context.historyText) + ? context.historyText.join('\n') + : safeText(context.historyText); + + const skipRule = trigger === 'manual' + ? '- 必须生成建议,禁止输出 SKIP。' + : '- 如果对话不需要建议(角色自言自语/话没说完/自然闲聊流畅),直接输出:SKIP'; + + return renderPromptTemplate('suggestion', { + triggerLabel, + triggerGuidance, + characterProfile: safeText(context.characterProfile), + affinityStageText, + historyText, + emotionText, + previousSuggestionText, + count, + skipRule + }); + } + + extractJSON(text = '') { + if (!text) return null; + const match = text.match(/\{[\s\S]*\}/); + if (!match) return null; + try { + return JSON.parse(match[0]); + } catch { + return null; + } + } + + decorateSuggestion(item, index, { trigger, reason }, batchTimestamp = null) { + // 使用批次时间戳,确保同一批次的所有建议使用相同的时间戳 + // 如果没有提供批次时间戳,则使用当前时间(向后兼容) + const timestamp = batchTimestamp || Date.now(); + const suggestionId = `llm-suggestion-${timestamp}-${index}`; + const tags = Array.isArray(item.tags) + ? item.tags.slice(0, 3) + : typeof item.tags === 'string' + ? item.tags.split(/[,,、]/).map((tag) => tag.trim()).filter(Boolean).slice(0, 3) + : []; + const suggestionText = item.suggestion || item.title || item.content || `选项 ${index + 1}`; + const affinityPrediction = + typeof item.affinity_delta === 'number' && !Number.isNaN(item.affinity_delta) + ? Math.max(-10, Math.min(10, Math.round(item.affinity_delta))) + : null; + return { + id: suggestionId, + title: suggestionText, + content: suggestionText, + tags, + // affinity_hint: item.affinity_hint || null, + trigger, + reason, + affinity_prediction: affinityPrediction, + created_at: timestamp // 使用批次时间戳,确保同一批次的所有建议使用相同的时间戳 + }; + } + + runWithTimeout(promise, timeoutMs) { + let timeoutId; + const timeoutPromise = new Promise((_, reject) => { + timeoutId = setTimeout(() => { + reject(new Error('LLM生成超时,请稍后重试')); + }, timeoutMs); + }); + + return Promise.race([ + promise.finally(() => clearTimeout(timeoutId)), + timeoutPromise + ]); + } + + analyzeHeuristics() { + // 关键词启发式已禁用,直接放行由 LLM 判断 + return { shouldCheck: true, reason: 'disabled', features: null }; + } + + clampConfidence(value) { + if (typeof value !== 'number' || Number.isNaN(value)) return null; + return Math.min(1, Math.max(0, value)); + } + + parseBoolean(value) { + const text = String(value ?? '').trim().toLowerCase(); + if (!text) return false; + return ['true', '1', 'yes', 'y', '是', '需要', '要', '是的'].includes(text); + } + + csvSplit(line) { + const result = []; + let current = ''; + let inQuotes = false; + for (let i = 0; i < line.length; i += 1) { + const char = line[i]; + if (char === '"' && line[i - 1] !== '\\') { + inQuotes = !inQuotes; + continue; + } + if ((char === ',' || char === ',') && !inQuotes) { + result.push(current); + current = ''; + continue; + } + current += char; + } + if (current !== '' || line.endsWith(',') || line.endsWith(',')) { + result.push(current); + } + return result; + } + + parseSituationToon(text = '') { + if (!text) return null; + const lines = text + .split('\n') + .map((line) => line.trim().replace(/^```toon\b/i, '').replace(/```$/i, '')) + .filter(Boolean); + + let headerIndex = -1; + let fields = []; + const headerRegex = /^situation\[(\d+)\]\{([^}]+)\}:\s*$/i; + for (let i = 0; i < lines.length; i += 1) { + const match = lines[i].match(headerRegex); + if (match) { + headerIndex = i; + fields = match[2] + .split(',') + .map((f) => f.trim()) + .filter(Boolean); + break; + } + } + + if (headerIndex === -1 || !fields.length) { + console.warn('[SituationToonParser] Header not found or fields empty', { text }); + return null; + } + + const dataLine = lines.slice(headerIndex + 1).find((line) => line && !/^```/.test(line)); + if (!dataLine) { + console.warn('[SituationToonParser] No data line found after header'); + return null; + } + + const values = this.csvSplit(dataLine); + const result = {}; + fields.forEach((field, idx) => { + result[field] = values[idx] !== undefined ? values[idx].trim() : ''; + }); + return result; + } + + buildSituationPrompt(context, heuristicResult, signals = {}) { + const silenceSeconds = signals.silenceSeconds != null ? Math.min(signals.silenceSeconds, 60) : null; + const roleBurstCount = signals.roleBurstCount != null ? Math.min(signals.roleBurstCount, 8) : null; + const triggerHint = signals.triggerHint || 'auto'; + const signalLines = []; + if (silenceSeconds != null) { + signalLines.push(`【冷场时长】约 ${silenceSeconds.toFixed(1)} 秒(已封顶 60 秒)`); + } + if (roleBurstCount != null) { + signalLines.push(`【连续角色消息】${roleBurstCount} 条(已封顶 8 条)`); + } + signalLines.push(`【触发来源提示】${triggerHint}`); + + const historyText = Array.isArray(context.historyText) + ? context.historyText.join('\n') + : safeText(context.historyText); + + return renderPromptTemplate('situation', { + characterProfile: safeText(context.characterProfile), + historyText, + signalLines: signalLines.join('\n') + }); + } + + async evaluateSituation(payload = {}) { + const { conversationId, characterId, messageLimit = 6 } = payload; + const suggestionConfig = this.db.getSuggestionConfig(); + const llmEnabled = suggestionConfig?.situation_llm_enabled ?? suggestionConfig?.topic_detection_enabled; + if (!llmEnabled) { + return { shouldSuggest: false, shouldIntervene: false, reason: 'situation_llm_disabled' }; + } + + const context = buildSuggestionContext(this.db, { + conversationId, + characterId, + messageLimit: Math.min(messageLimit, 8) + }); + + const silenceSeconds = Math.min( + Math.max(Number(payload.silence_seconds ?? 0) || 0, 0), + 60 + ); + const roleBurstCount = Math.min( + Math.max(Number(payload.role_burst_count ?? 0) || 0, 0), + 8 + ); + const triggerHint = payload.trigger_hint || 'auto'; + + const history = context.history || []; + if (!history.length) { + return { shouldSuggest: false, shouldIntervene: false, reason: 'no_history' }; + } + + const heuristicResult = this.analyzeHeuristics(); + + const client = await this.ensureClient('situation'); + const modelName = this.resolveSituationModelName(this.currentLLMConfig, suggestionConfig); + const prompt = this.buildSituationPrompt(context, heuristicResult, { + silenceSeconds, + roleBurstCount, + triggerHint + }); + + const requestParams = { + model: modelName, + temperature: 0, + max_tokens: 120, + reasoning_effort: "disabled", // 禁用 OpenAI 风格推理 + thinking: { type: "disabled" }, // 禁用智谱/GLM 风格深度思考 + stream: true, + messages: [ + { + role: 'system', + content: '你是实时对话决策器,只输出 TOON 表格,不输出任何其他文字。' + }, + { + role: 'user', + content: prompt + } + ] + }; + + console.log('Situation LLM Request Debug Info:', { + payload, + llmConfig: { + id: this.currentLLMConfig.id, + name: this.currentLLMConfig.name, + base_url: this.currentLLMConfig.base_url, + model_name: this.currentLLMConfig.model_name + }, + requestParams + }); + + const controller = new AbortController(); + + const buildResult = (parsed = {}) => { + const needOptions = this.parseBoolean(parsed.need_options ?? parsed.should_suggest ?? parsed.should_intervene); + const shouldIntervene = this.parseBoolean(parsed.should_intervene ?? parsed.should_suggest ?? parsed.need_options ?? needOptions); + return { + shouldIntervene, + shouldSuggest: needOptions, + needOptions, + trigger: parsed.trigger || 'auto', + reason: parsed.reason || 'llm_evaluation', + confidence: this.clampConfidence(parsed.confidence), + features: null, + model: modelName + }; + }; + + try { + const timeoutMs = this.resolveTimeoutMs(this.currentLLMConfig, DEFAULT_SITUATION_TIMEOUT_MS); + const stream = await this.runWithTimeout( + client.chat.completions.create(requestParams, { signal: controller.signal }), + timeoutMs + ); + + return await new Promise((resolve, reject) => { + let buffer = ''; + let rawStreamContent = ''; + let firstContentLogged = false; + let headerParsed = false; + let fields = []; + let lastParsed = null; + let resolved = false; + + const finish = (parsedObj) => { + if (resolved) return; + resolved = true; + controller.abort(); + console.log('[SituationParser] finish', parsedObj || lastParsed || {}); + resolve(buildResult(parsedObj || lastParsed || {})); + }; + + // 增强的表头正则:兼容冒号可选,并尝试捕获可能连在一起的数据 + const headerRegex = /^situation\[(\d+)\]\{([^}]+)\}:?\s*(.*)$/i; + + // 解析key:value格式的数据行(兼容模型输出在同一行的情况) + const parseKeyValueLine = (line) => { + const obj = {}; + // 匹配 key:value 格式,兼容中文字符和逗号 + const kvPattern = /(\w+):\s*([^,}]+)/g; + let match; + while ((match = kvPattern.exec(line)) !== null) { + const key = match[1].trim(); + let value = match[2].trim(); + // 移除末尾可能的逗号或右括号 + value = value.replace(/[,}]$/, '').trim(); + obj[key] = value; + } + return Object.keys(obj).length > 0 ? obj : null; + }; + + const processLine = (line) => { + if (!line) return; + if (!headerParsed) { + const match = line.match(headerRegex); + if (match) { + headerParsed = true; + const headerFields = match[2]; + const possibleData = match[3]?.trim(); + + fields = headerFields + .split(',') + .map((f) => f.trim()) + .filter(Boolean); + console.log('[SituationParser] header parsed', { fields, possibleData }); + + // 如果表头后面有数据(同一行),尝试解析key:value格式 + if (possibleData) { + const kvObj = parseKeyValueLine(possibleData); + if (kvObj && Object.keys(kvObj).length > 0) { + lastParsed = kvObj; + console.log('[SituationParser] data parsed from header line (key:value)', kvObj); + // 解析到完整数据后立即返回,无论need_options值 + finish(kvObj); + return; + } + } + } + return; + } + + // 数据行处理:先尝试key:value格式,再尝试CSV格式 + let parsedObj = null; + + // 尝试key:value格式 + const kvObj = parseKeyValueLine(line); + if (kvObj && Object.keys(kvObj).length > 0) { + parsedObj = kvObj; + console.log('[SituationParser] data line parsed (key:value)', parsedObj); + } else { + // 尝试CSV格式 + const values = this.csvSplit(line); + parsedObj = {}; + fields.forEach((field, idx) => { + parsedObj[field] = values[idx] !== undefined ? values[idx].trim() : ''; + }); + console.log('[SituationParser] data line parsed (csv)', parsedObj); + } + + if (parsedObj && Object.keys(parsedObj).length > 0) { + lastParsed = parsedObj; + // 解析到有效数据后立即返回,无论need_options值 + // 如果need_options为true则更快返回,但false也会在流结束时返回 + const needOptions = this.parseBoolean( + parsedObj.need_options ?? parsedObj.should_suggest ?? parsedObj.should_intervene + ); + if (needOptions) { + finish(parsedObj); + } + // 如果need_options为false,会在流结束时通过finish(lastParsed)返回 + } + }; + + (async () => { + try { + for await (const chunk of stream) { + const choice = chunk?.choices?.[0]; + const reasoningContent = choice?.delta?.reasoning_content; + const content = this.normalizeDeltaContent(choice?.delta?.content); + + // 忽略思考内容,确保判定尽快返回 + if (reasoningContent && !content) { + continue; + } + if (!content) continue; + + buffer += content; + rawStreamContent += content; + + if (!firstContentLogged) { + firstContentLogged = true; + console.log('[SituationParser] first content chunk', content.slice(0, 80).replace(/\n/g, '\\n')); + } + + let newlineIndex = buffer.indexOf('\n'); + while (newlineIndex >= 0) { + const line = buffer.slice(0, newlineIndex).trim(); + buffer = buffer.slice(newlineIndex + 1); + processLine(line); + newlineIndex = buffer.indexOf('\n'); + } + } + // 流结束,若未解析到则用最后记录 + if (buffer.trim()) { + console.log('[SituationParser] flushing tail line', buffer.trim()); + processLine(buffer.trim()); + } + console.log( + '[SituationParser] stream end, raw length=', + rawStreamContent.length, + 'preview=', + rawStreamContent.slice(0, 120).replace(/\n/g, '\\n'), + 'lastParsed=', + lastParsed + ); + finish(lastParsed || {}); + } catch (error) { + if (error?.name === 'AbortError' && resolved) return; + reject(error); + } + })(); + }); + } catch (error) { + console.error('Situation LLM Request Failed - Full Debug Info:', { + error: { + message: error.message, + status: error.status, + code: error.code, + type: error.type, + param: error.param, + headers: error.headers, + requestID: error.requestID + }, + payload, + llmConfig: { + id: this.currentLLMConfig.id, + name: this.currentLLMConfig.name, + base_url: this.currentLLMConfig.base_url, + model_name: this.currentLLMConfig.model_name + }, + requestParams, + contextInfo: { + conversationId, + characterId, + messageLimit: Math.min(messageLimit, 8), + contextHistoryLength: context.history?.length || 0 + } + }); + throw error; + } + } + + async detectTopicShift(payload = {}) { + const result = await this.evaluateSituation(payload); + return { + shouldSuggest: result.shouldSuggest, + reason: result.reason, + trigger: result.trigger, + confidence: result.confidence, + features: result.features || null, + model: result.model + }; + } + + resolveModelName(llmConfig, suggestionConfig) { + const llmModel = llmConfig?.model_name && llmConfig.model_name.trim(); + if (llmModel) { + return llmModel; + } + const suggestionModel = suggestionConfig?.model_name && suggestionConfig.model_name.trim(); + if (suggestionModel) { + return suggestionModel; + } + return DEFAULT_MODEL; + } + + resolveSituationModelName(llmConfig, suggestionConfig) { + const situationModel = suggestionConfig?.situation_model_name && suggestionConfig.situation_model_name.trim(); + // 若用户未显式设置(默认 gpt-4o-mini),优先使用全局默认 LLM 配置的模型,避免与不同提供商的默认占位不匹配 + if (situationModel && situationModel !== 'gpt-4o-mini') { + return situationModel; + } + return this.resolveModelName(llmConfig, suggestionConfig) || DEFAULT_SITUATION_MODEL; + } +} + diff --git a/desktop/src/core/modules/memory-service.js b/desktop/src/core/modules/memory-service.js new file mode 100644 index 0000000..95c71fd --- /dev/null +++ b/desktop/src/core/modules/memory-service.js @@ -0,0 +1,129 @@ +const DEFAULT_TIMEOUT_MS = 5000; + +/** + * MemoryService + * 结构化 Profile/Event 侧车的轻量客户端(无向量召回)。 + * 仅在配置了 baseUrl 时工作;否则返回空结果以保证安全降级。 + */ +export default class MemoryService { + constructor(options = {}) { + this.baseUrl = options.baseUrl || process.env.MEMORY_API_BASE_URL || process.env.MEM_SERVICE_BASE_URL || ''; + this.defaultTimeout = options.timeoutMs || DEFAULT_TIMEOUT_MS; + } + + get enabled() { + return Boolean(this.baseUrl); + } + + buildUrl(pathname, query = {}) { + const url = new URL(pathname, this.baseUrl.endsWith('/') ? this.baseUrl : `${this.baseUrl}/`); + Object.entries(query) + .filter(([, v]) => v !== undefined && v !== null && v !== '') + .forEach(([k, v]) => { + if (Array.isArray(v)) { + v.forEach((item) => url.searchParams.append(k, item)); + } else { + url.searchParams.set(k, v); + } + }); + return url.toString(); + } + + async request(pathname, { method = 'GET', query = {}, body = undefined, timeout = this.defaultTimeout } = {}) { + if (!this.enabled) { + return { ok: true, disabled: true, data: null }; + } + + const controller = new AbortController(); + const timer = setTimeout(() => controller.abort(new Error('memory request timeout')), timeout); + + try { + const url = this.buildUrl(pathname, query); + const res = await fetch(url, { + method, + signal: controller.signal, + headers: { + 'Content-Type': 'application/json' + }, + body: body ? JSON.stringify(body) : undefined + }); + + if (!res.ok) { + const text = await res.text().catch(() => ''); + throw new Error(`MemoryService HTTP ${res.status}: ${text}`); + } + + const data = await res.json().catch(() => null); + return { ok: true, data }; + } catch (error) { + console.error('[MemoryService] request failed', { pathname, error: error.message }); + return { ok: false, error }; + } finally { + clearTimeout(timer); + } + } + + /** + * 查询画像(topic/sub_topic/tag/time 结构化过滤)。 + */ + async queryProfiles(params = {}) { + const { userId, projectId, topics, subTopics, tags, timeFrom, timeTo, limit = 20 } = params; + const resp = await this.request('profiles', { + query: { + user_id: userId, + project_id: projectId, + topic: topics, + sub_topic: subTopics, + tag: tags, + time_from: timeFrom, + time_to: timeTo, + limit + } + }); + + if (!resp.ok) return { profiles: [], error: resp.error?.message }; + if (resp.disabled) return { profiles: [], disabled: true }; + const profiles = Array.isArray(resp.data) ? resp.data : resp.data?.profiles || []; + return { profiles }; + } + + /** + * 查询事件(time / tag / topic 过滤)。 + */ + async queryEvents(params = {}) { + const { userId, projectId, topics, tags, timeFrom, timeTo, limit = 50 } = params; + const resp = await this.request('events', { + query: { + user_id: userId, + project_id: projectId, + topic: topics, + tag: tags, + time_from: timeFrom, + time_to: timeTo, + limit + } + }); + + if (!resp.ok) return { events: [], error: resp.error?.message }; + if (resp.disabled) return { events: [], disabled: true }; + const events = Array.isArray(resp.data) ? resp.data : resp.data?.events || []; + return { events }; + } + + /** + * 可扩展:推送画像/事件。当前未暴露到 UI,保留占位。 + */ + async upsertProfile(payload = {}) { + const resp = await this.request('profiles', { method: 'POST', body: payload }); + if (!resp.ok) return { success: false, error: resp.error?.message }; + if (resp.disabled) return { success: false, disabled: true }; + return { success: true, data: resp.data }; + } + + async upsertEvent(payload = {}) { + const resp = await this.request('events', { method: 'POST', body: payload }); + if (!resp.ok) return { success: false, error: resp.error?.message }; + if (resp.disabled) return { success: false, disabled: true }; + return { success: true, data: resp.data }; + } +} diff --git a/desktop/src/core/modules/prompt-manager.js b/desktop/src/core/modules/prompt-manager.js new file mode 100644 index 0000000..29b72fd --- /dev/null +++ b/desktop/src/core/modules/prompt-manager.js @@ -0,0 +1,86 @@ +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +// Prompts are stored as plain text templates (markdown) to make them easy to review and iterate in OSS. +// They are packaged by electron-builder because they're under `src/core/**/*` in `package.json`. +const PROMPTS_DIR = path.resolve(__dirname, '../prompts'); + +const PROMPT_FILES = { + suggestion: 'suggestion.prompt.md', + situation: 'situation.prompt.md', + 'review.with_nodes': 'review.with_nodes.prompt.md', + 'review.no_nodes': 'review.no_nodes.prompt.md' +}; + +const templateCache = new Map(); // id -> template string + +function resolveTemplatePath(id) { + const fileName = PROMPT_FILES[id]; + if (!fileName) { + throw new Error(`[PromptManager] Unknown prompt id: ${id}`); + } + return path.join(PROMPTS_DIR, fileName); +} + +function safeToString(value) { + if (value === undefined || value === null) return ''; + return String(value); +} + +function getNestedValue(vars, key) { + if (!vars || typeof vars !== 'object') return undefined; + const parts = String(key).split('.').filter(Boolean); + let current = vars; + for (const part of parts) { + if (current == null || typeof current !== 'object' || !(part in current)) { + return undefined; + } + current = current[part]; + } + return current; +} + +export function loadPromptTemplate(id) { + if (templateCache.has(id)) { + return templateCache.get(id); + } + const filePath = resolveTemplatePath(id); + let text; + try { + text = fs.readFileSync(filePath, 'utf-8'); + } catch (error) { + throw new Error(`[PromptManager] Failed to read prompt template: ${id} (${filePath}): ${error.message}`); + } + templateCache.set(id, text); + return text; +} + +/** + * Very small template renderer: + * - Replaces `{{key}}` or `{{nested.key}}` with vars[key] + * - Missing keys become empty string by default (to keep runtime resilient) + */ +export function renderPromptTemplate(id, vars = {}, { strict = false } = {}) { + const template = loadPromptTemplate(id); + const missingKeys = new Set(); + + const rendered = template.replace(/{{\s*([\w.]+)\s*}}/g, (_match, key) => { + const value = getNestedValue(vars, key); + if (value === undefined) { + missingKeys.add(key); + return ''; + } + return safeToString(value); + }); + + if (strict && missingKeys.size) { + throw new Error(`[PromptManager] Missing template variables for ${id}: ${Array.from(missingKeys).join(', ')}`); + } + + return rendered; +} + diff --git a/desktop/src/core/modules/review-service.js b/desktop/src/core/modules/review-service.js new file mode 100644 index 0000000..9295904 --- /dev/null +++ b/desktop/src/core/modules/review-service.js @@ -0,0 +1,1071 @@ +import { renderPromptTemplate } from './prompt-manager.js'; + +const DEFAULT_REVIEW_TIMEOUT_MS = 1000 * 20; + +export default class ReviewService { + constructor(dbGetter) { + this.dbGetter = dbGetter; + this.clientPool = {}; + this.clientConfigSignature = null; // 保留兼容字段 + this.currentLLMConfig = null; + this.currentLLMFeature = 'review'; + + // 复盘输入规模控制(避免超长对话导致上下文爆炸) + this.MAX_NODES = 999; // 移除限制 + this.MAX_MESSAGES = 99999; // 移除限制 + this.MAX_OPTIONS_PER_NODE = 6; + // 粗略 token 上限(经验值):超出会触发更激进裁剪 + this.MAX_PROMPT_TOKENS_EST = 180000; // 按 200k 来,留一些 buffer + } + + get db() { + const db = this.dbGetter?.(); + if (!db) { + throw new Error('Database is not initialized'); + } + return db; + } + + async ensureClient(feature = 'review') { + const featureKey = typeof feature === 'string' && feature.trim() ? feature.trim().toLowerCase() : 'review'; + const llmConfig = + (this.db.getLLMConfigForFeature && this.db.getLLMConfigForFeature(featureKey)) || + this.db.getDefaultLLMConfig(); + if (!llmConfig) { + throw new Error('未找到默认LLM配置,请先在设置中配置。'); + } + + const signature = `${featureKey}:${llmConfig.id || 'unknown'}-${llmConfig.updated_at || 0}`; + const cached = this.clientPool[featureKey]; + if (!cached || cached.signature !== signature) { + const { default: OpenAI } = await import('openai'); + const clientConfig = { apiKey: llmConfig.api_key }; + if (llmConfig.base_url) { + // Remove trailing '/chat/completions' if present + const baseURL = llmConfig.base_url.replace(/\/chat\/completions\/?$/, ''); + clientConfig.baseURL = baseURL; + } + this.clientPool[featureKey] = { + client: new OpenAI(clientConfig), + signature, + config: llmConfig + }; + } + + this.currentLLMConfig = llmConfig; + this.currentLLMFeature = featureKey; + return this.clientPool[featureKey].client; + } + + // 1. 生成复盘报告 + async generateReview(conversationId, options = {}) { + const force = !!options.force; + const onProgress = typeof options.onProgress === 'function' ? options.onProgress : null; + const report = (stage, percent, message, extra = null) => { + try { + onProgress?.({ + stage, + percent, + message, + ...(extra ? { extra } : {}) + }); + } catch { + // ignore + } + }; + + // 检查是否已有复盘 + const existing = this.getExistingReview(conversationId); + if (existing && existing.review_data && !force) { + return existing.review_data; + } + + // 1. 获取消息和选项 + report('load_data', 0.05, '加载对话与建议数据...'); + const messagesRaw = this.db.getMessagesByConversation(conversationId); + const suggestions = this.db.getActionSuggestions(conversationId); + + // 2. 按时间戳分组,识别节点 + report('group_nodes', 0.15, '识别决策点并分组...'); + const nodesRaw = this.groupIntoNodes(suggestions); + const explicitReviewNodes = this.buildReviewNodesFromUserSelection(nodesRaw); + const affinityFromSelection = this.computeAffinityChangeFromSelection(explicitReviewNodes); + const conversation = this.db.getConversationById(conversationId); + + // 3. 特殊情况处理: + // - 如果没有节点但有消息,仍然走 LLM 复盘(不降级),让模型给出完整总结 + // - 如果既没有节点也没有消息,才使用兜底简要复盘 + const hasMessages = Array.isArray(messagesRaw) && messagesRaw.length > 0; + if (nodesRaw.length === 0 && !hasMessages) { + const simpleReview = this.buildSimpleSummary(conversation); + report('save', 0.9, '保存复盘结果...'); + this.db.saveConversationReview({ + conversation_id: conversationId, + review_data: simpleReview, + model_used: 'none' + }); + report('done', 1, '复盘完成'); + return simpleReview; + } + + report('trim', 0.25, '裁剪输入规模以避免超长上下文...'); + const { messages, nodes, trimInfo } = this.trimReviewInputs(messagesRaw, nodesRaw); + + // 4. 调用 LLM 分析(带一次重试) + let reviewData; + try { + report('llm_request', 0.35, '调用模型生成复盘(可能需要一些时间)...', trimInfo); + reviewData = await this.callLLMForReview(messages, nodes); + report('parse', 0.75, '解析模型输出...'); + } catch (err) { + console.warn('[ReviewService] LLM 调用失败,准备重试...', err); + // 针对“上下文过长”做更激进裁剪再重试;其他错误维持一次重试 + if (this.isLikelyContextLimitError(err)) { + console.warn('[ReviewService] Suspected context window issue. Retrying with aggressive trimming...'); + const aggressive = this.trimReviewInputs(messagesRaw, nodesRaw, { aggressive: true }); + report('llm_request', 0.35, '上下文疑似超限,已裁剪后重试...', aggressive.trimInfo); + reviewData = await this.callLLMForReview(aggressive.messages, aggressive.nodes); + report('parse', 0.75, '解析模型输出...'); + } else { + report('llm_request', 0.35, '调用失败,准备重试一次...'); + reviewData = await this.callLLMForReview(messages, nodes); + report('parse', 0.75, '解析模型输出...'); + } + } + + // 5. 合并 LLM 分析 + 系统记录 + report('enrich', 0.82, '合并 LLM 分析与系统记录...'); + + // 策略:LLM 提供 reasoning 和 node_title,系统记录提供准确的选择状态 + const llmNodes = Array.isArray(reviewData.nodes) ? reviewData.nodes : []; + const sysNodesMap = new Map(explicitReviewNodes.map(n => [n.node_id, n])); + + if (llmNodes.length > 0) { + // LLM 输出了节点:合并两者 + const mergedNodes = llmNodes.map((llmNode) => { + const sysNode = sysNodesMap.get(llmNode.node_id); + if (sysNode) { + // 对于系统已有的决策点 + const isSystemSelected = sysNode.choice_type === 'matched'; + return { + node_id: llmNode.node_id, + node_title: llmNode.node_title || sysNode.node_title, + timestamp: sysNode.timestamp, + // 选择状态:优先使用系统显式选择;若无,使用 LLM 的判定 + choice_type: isSystemSelected ? 'matched' : (llmNode.choice_type || 'custom'), + matched_suggestion_id: sysNode.selected_suggestion_id || llmNode.matched_suggestion_id, + selected_suggestion_id: sysNode.selected_suggestion_id, + selected_affinity_delta: sysNode.selected_affinity_delta, + // 描述和推理:使用 LLM 分析(更丰富) + user_description: llmNode.user_description || sysNode.user_description, + reasoning: llmNode.reasoning || sysNode.reasoning, + match_confidence: llmNode.match_confidence, + ghost_options: [] + }; + } else { + // LLM 额外识别的 Insight 节点 + return { + ...llmNode, + choice_type: 'insight', + ghost_options: [] + }; + } + }); + + // 检查是否有系统决策点被 LLM 漏掉了 + const mergedIds = new Set(mergedNodes.map(n => n.node_id)); + explicitReviewNodes.forEach(sysNode => { + if (!mergedIds.has(sysNode.node_id)) { + mergedNodes.push(sysNode); + } + }); + + // 按时间排序 + reviewData.nodes = mergedNodes.sort((a, b) => (a.timestamp || 0) - (b.timestamp || 0)); + console.log(`[ReviewService] 已合并 LLM 分析与系统记录 (LLM:${llmNodes.length}, Sys:${explicitReviewNodes.length}, Result:${reviewData.nodes.length})`); + } else { + // LLM 未输出节点:回退到系统记录 + console.warn('[ReviewService] LLM 未输出任何节点,使用系统记录'); + reviewData.nodes = explicitReviewNodes; + } + + reviewData.has_nodes = reviewData.nodes.length > 0; + if (!reviewData.summary) reviewData.summary = {}; + reviewData.summary.node_count = reviewData.nodes.length; + reviewData.summary.matched_count = reviewData.nodes.filter((n) => n.choice_type === 'matched').length; + reviewData.summary.custom_count = reviewData.nodes.filter((n) => n.choice_type === 'custom' || n.choice_type === 'insight').length; + + this.enrichGhostOptions(reviewData, nodes); + this.enrichAudioInfo(reviewData, conversationId); + + // 6. 校验/兜底 + report('validate', 0.86, '校验并兜底复盘结构...'); + reviewData = this.ensureReviewDataIntegrity(reviewData, nodes, conversation); + + // 6.1 覆盖好感度变化:完全由用户显式选择决定 + if (!reviewData.summary) reviewData.summary = {}; + reviewData.summary.total_affinity_change = affinityFromSelection; + + // 7. 保存 + report('save', 0.9, '保存复盘结果...'); + this.db.saveConversationReview({ + conversation_id: conversationId, + review_data: reviewData, + model_used: this.currentLLMConfig?.model_name || 'unknown' + }); + + // 8. 更新会话信息(标题、摘要、Tag、好感度) + if (reviewData.summary) { + report('update_conversation', 0.95, '更新会话摘要与标签...'); + const updates = {}; + if (reviewData.summary.title) updates.title = reviewData.summary.title; + if (reviewData.summary.conversation_summary) updates.summary = reviewData.summary.conversation_summary; + if (Array.isArray(reviewData.summary.tags)) updates.tags = reviewData.summary.tags.join(','); + if (reviewData.summary.total_affinity_change !== undefined) updates.affinity_change = reviewData.summary.total_affinity_change; + + if (Object.keys(updates).length > 0) { + this.db.updateConversation(conversationId, updates); + } + } + + report('done', 1, '复盘完成'); + return reviewData; + } + + estimateTokens(text = '') { + if (!text) return 0; + // 经验:中英混合平均 1 token ~ 3-4 chars,取 4 做保守估计 + return Math.ceil(String(text).length / 4); + } + + isLikelyContextLimitError(err) { + const msg = String(err?.message || '').toLowerCase(); + return ( + msg.includes('context') || + msg.includes('maximum context') || + msg.includes('max context') || + msg.includes('token') || + msg.includes('length') || + msg.includes('too large') + ); + } + + trimReviewInputs(messagesRaw, nodesRaw, opts = {}) { + const aggressive = !!opts.aggressive; + const maxNodes = aggressive ? Math.max(6, Math.floor(this.MAX_NODES / 2)) : this.MAX_NODES; + const maxMessages = aggressive ? Math.max(40, Math.floor(this.MAX_MESSAGES / 2)) : this.MAX_MESSAGES; + const maxOptions = aggressive ? Math.max(3, Math.floor(this.MAX_OPTIONS_PER_NODE / 2)) : this.MAX_OPTIONS_PER_NODE; + + const messages = Array.isArray(messagesRaw) ? [...messagesRaw] : []; + const nodes = Array.isArray(nodesRaw) ? [...nodesRaw] : []; + + // 1) 先限制 nodes 数量:保留最近 maxNodes 个(按 timestamp) + nodes.sort((a, b) => (a.timestamp || 0) - (b.timestamp || 0)); + const trimmedNodes = nodes.length > maxNodes ? nodes.slice(nodes.length - maxNodes) : nodes; + + // 2) 限制每个节点的 options 数量(保留最靠前的若干条,通常 index=0..) + const finalNodes = trimmedNodes.map((n) => ({ + ...n, + suggestions: Array.isArray(n.suggestions) ? n.suggestions.slice(0, maxOptions) : [] + })); + + // 3) 限制消息数量:保留最近 maxMessages 条 + messages.sort((a, b) => (a.timestamp || 0) - (b.timestamp || 0)); + const finalMessages = messages.length > maxMessages ? messages.slice(messages.length - maxMessages) : messages; + + // 4) 如仍超估计 token,上下文再裁一次(优先裁消息) + const promptPreview = this.buildReviewPrompt(finalMessages, finalNodes); + const est = this.estimateTokens(promptPreview); + if (est > this.MAX_PROMPT_TOKENS_EST) { + const moreAggressiveMessages = finalMessages.slice(Math.max(0, finalMessages.length - Math.floor(maxMessages / 2))); + const moreAggressivePrompt = this.buildReviewPrompt(moreAggressiveMessages, finalNodes); + const est2 = this.estimateTokens(moreAggressivePrompt); + return { + messages: moreAggressiveMessages, + nodes: finalNodes, + trimInfo: { aggressive, nodes: { before: nodesRaw?.length || 0, after: finalNodes.length }, messages: { before: messagesRaw?.length || 0, after: moreAggressiveMessages.length }, tokenEst: { before: est, after: est2 } } + }; + } + + return { + messages: finalMessages, + nodes: finalNodes, + trimInfo: { aggressive, nodes: { before: nodesRaw?.length || 0, after: finalNodes.length }, messages: { before: messagesRaw?.length || 0, after: finalMessages.length }, tokenEst: { est } } + }; + } + + // 获取已有复盘 + getExistingReview(conversationId) { + return this.db.getConversationReview(conversationId); + } + + // 分组节点逻辑 + groupIntoNodes(suggestions) { + if (!suggestions || suggestions.length === 0) return []; + + const withDecisionPoint = []; + const legacy = []; + for (const s of suggestions) { + if (s && s.decision_point_id) withDecisionPoint.push(s); + else legacy.push(s); + } + + const nodes = []; + + // 1) 新版:按 decision_point_id 聚合; + // - 若用户在某个 batch 显式选择了建议,则该决策点选择“被选中项所在 batch”作为节点选项(便于回放 ghost options) + // - 否则取“最新 batch”作为该节点的选项 + if (withDecisionPoint.length > 0) { + const byDP = new Map(); + for (const s of withDecisionPoint) { + const dpId = s.decision_point_id; + if (!byDP.has(dpId)) byDP.set(dpId, []); + byDP.get(dpId).push(s); + } + + // 按决策点的最早 created_at 排序,稳定输出 node_1..n + const dpGroups = [...byDP.entries()].map(([dpId, list]) => { + const minTs = Math.min(...list.map((x) => x.created_at || 0)); + const maxTs = Math.max(...list.map((x) => x.created_at || 0)); + return { dpId, list, minTs, maxTs }; + }).sort((a, b) => a.minTs - b.minTs); + + for (const group of dpGroups) { + // 找最新批次:优先按 batch_id 分组,取 created_at 最大的 batch + const byBatch = new Map(); + for (const s of group.list) { + const batchId = s.batch_id || 'unknown'; + if (!byBatch.has(batchId)) byBatch.set(batchId, []); + byBatch.get(batchId).push(s); + } + const batchCount = byBatch.size; + + // 若存在显式选择,优先使用该 batch + let selectedBatchId = null; + for (const [batchId, list] of byBatch.entries()) { + if (list.some((x) => x && (x.is_selected === 1 || x.is_selected === true || x.is_selected === '1'))) { + selectedBatchId = batchId; + break; + } + } + + let latestBatchId = null; + let latestBatchTs = -1; + for (const [batchId, list] of byBatch.entries()) { + const ts = Math.max(...list.map((x) => x.created_at || 0)); + if (ts > latestBatchTs) { + latestBatchTs = ts; + latestBatchId = batchId; + } + } + + const pickedBatchId = selectedBatchId || latestBatchId; + const latest = (pickedBatchId && byBatch.get(pickedBatchId)) ? byBatch.get(pickedBatchId) : group.list; + const sortedLatest = [...latest].sort((a, b) => { + const aIndex = a.suggestion_index ?? a.index ?? 999; + const bIndex = b.suggestion_index ?? b.index ?? 999; + return aIndex - bIndex; + }); + + nodes.push({ + decision_point_id: group.dpId, + batch_id: pickedBatchId !== 'unknown' ? pickedBatchId : null, + batch_count: batchCount, + timestamp: sortedLatest[0]?.created_at || group.minTs, + suggestions: sortedLatest + }); + } + } + + // 2) 旧版数据:回退到时间窗口分组(保留原逻辑,但不再硬丢弃为3条) + if (legacy.length > 0) { + // 按 created_at 排序,如果时间戳相同,则按 index 排序 + const sorted = [...legacy].sort((a, b) => { + if (a.created_at !== b.created_at) { + return a.created_at - b.created_at; + } + const aIndex = a.index !== undefined ? a.index : 999; + const bIndex = b.index !== undefined ? b.index : 999; + return aIndex - bIndex; + }); + + const groups = []; + let currentGroup = [sorted[0]]; + + for (let i = 1; i < sorted.length; i++) { + const prev = currentGroup[currentGroup.length - 1]; + const curr = sorted[i]; + + const timeDiff = curr.created_at - prev.created_at; + const hasIndexInfo = curr.index !== undefined && prev.index !== undefined; + const isIndexReset = hasIndexInfo && curr.index === 0 && prev.index >= 0 && prev.index < 3; + const isFullBatch = currentGroup.length >= 3; + + if (timeDiff < 1000 && !(isFullBatch && isIndexReset)) { + currentGroup.push(curr); + } else { + groups.push(currentGroup); + currentGroup = [curr]; + } + } + groups.push(currentGroup); + + const legacyNodes = groups.map((group) => ({ + timestamp: group[0].created_at, + suggestions: group + })); + nodes.push(...legacyNodes); + } + + // 统一生成 node_id + return nodes + .sort((a, b) => (a.timestamp || 0) - (b.timestamp || 0)) + .map((node, index) => ({ + node_id: `node_${index + 1}`, + timestamp: node.timestamp || 0, + decision_point_id: node.decision_point_id || null, + batch_id: node.batch_id || null, + suggestions: node.suggestions || [] + })); + } + + // 构建 Prompt + buildReviewPrompt(messages, nodes) { + const formatTime = (ts) => new Date(ts).toLocaleTimeString('zh-CN', { hour12: false }); + + // 过滤掉 system 消息,保留 user 和 character + // 确保时间顺序 + const sortedMessages = [...messages].sort((a, b) => a.timestamp - b.timestamp); + + const transcript = sortedMessages.map(m => + `[${formatTime(m.timestamp)}] ${m.sender === 'user' ? 'User' : 'Character'}: ${m.content}` + ).join('\n'); + + const nodeInfo = nodes.length > 0 + ? nodes.map((node, i) => { + const dpInfo = node.decision_point_id ? `, DP:${node.decision_point_id}` : ''; + const batchInfo = node.batch_id ? `, Batch:${node.batch_id}` : ''; + const batchCountInfo = node.batch_count && node.batch_count > 1 ? `, BatchCount:${node.batch_count}` : ''; + + // 补充上下文:决策点锚点消息 + 本批次触发信息(可解释“为什么弹建议/是否换一批”) + let anchorLine = ''; + let triggerLine = ''; + try { + if (node.decision_point_id && this.db.getDecisionPointById) { + const dp = this.db.getDecisionPointById(node.decision_point_id); + const anchorId = dp?.anchor_message_id; + if (anchorId && this.db.getMessageById) { + const msg = this.db.getMessageById(anchorId); + if (msg) { + anchorLine = `锚点消息: [${formatTime(msg.timestamp)}] ${msg.sender === 'user' ? 'User' : 'Character'}: ${msg.content}`; + } + } + } + if (node.batch_id && this.db.getSuggestionBatchById) { + const batch = this.db.getSuggestionBatchById(node.batch_id); + if (batch) { + triggerLine = `触发: ${batch.trigger || 'unknown'} / ${batch.reason || 'unknown'}`; + } + } + } catch { + // ignore + } + + const options = node.suggestions.map(s => ` - ID:${s.id} 内容:${s.content || s.title}`).join('\n'); + const extraLines = [triggerLine, anchorLine].filter(Boolean).map((l) => ` ${l}`).join('\n'); + return `节点${i + 1} (ID: ${node.node_id}${dpInfo}${batchInfo}${batchCountInfo}, Time: ${formatTime(node.timestamp)}):\n${extraLines ? `${extraLines}\n` : ''}${options}`; + }).join('\n\n') + : "无关键决策节点"; + + const sections = [ + "# Role", + "你是恋爱对话复盘分析师,擅长细腻地洞察人际互动的关键动态。", + "", + "# Task", + nodes.length > 0 + ? '根据对话记录和已知的"关键决策点"(系统当时生成建议的时刻),分析用户的实际选择,并生成优雅、专业的复盘总结。' + : '根据对话记录,总结对话内容并评估好感度变化。', + "", + "## 节点判定规则(重要):", + "**必须分析的节点**:已提供的决策点(带有系统建议的时刻)必须全部分析。", + "**可额外生成的节点**:在对话中识别以下关键时刻,作为额外的 insight 节点:", + "1. **话题转折点**:对话主题/情绪发生明显变化的时刻", + "2. **情感峰值点**:最感动/最尴尬/最有趣的时刻", + "3. **关系里程碑**:关系推进的关键点(如首次称呼昵称、主动示好等)", + "4. **冲突/和解点**:意见不合或和解的时刻", + "", + "## 复盘核心元素:", + "1. **决策点标题 (title)**:用极简的词汇(2-4字)概括该次互动的本质,如\"破冰契机\"、\"情绪共振\"、\"婉转拒绝\"。不要包含数字或符号。", + "2. **用户行为描述 (user_desc)**:用一句话(10-20字)精准描述用户做了什么。", + " - **Bad**: \"用户回复了'好的',表示同意\"", + " - **Good**: \"积极响应邀请,展现出极高的社交主动性\"", + "3. **选择类型 (choice_type)**:", + " - **matched**:用户选择了系统建议的选项", + " - **custom**:用户在决策点使用了自定义回复", + " - **insight**:非决策点的关键时刻(话题转折/情感峰值等),此类节点 matched_id 为空", + "4. **整体表现评价 (self_evaluation)**:作为第一人称视高的教练,给用户一段富有启发性的反馈(20-40字)。", + "", + "## 其他内容:", + "- **标题 (title)**:为本次对话生成一个富有文学美感的标题,如\"月色下的温柔守候\"、\"初见时的微小悸动\"。", + "- **对话标签 (tags)**:生成3-5个高阶感性标签,如:双向奔赴、微妙暧昧、情感防御、深度共情。", + "- **对话概要 (conversation_summary)**:一句话概括故事走向。", + "- **好感度变化 (total_affinity_change)**:-10 到 +10 的整数。", + "", + "# Input", + "", + "## 对话记录", + transcript, + "", + "## 决策点及建议选项", + nodeInfo, + "", + "# Output (TOON 格式)", + "请严格遵守以下格式,只需输出 Data 行。", + "", + "**输出示例**:", + "review_node: node_1,情感共鸣,matched,sugg-xxx-1A,1.0,表达对回忆的珍视,用户选择了温情回复", + "review_node: node_2,话题转折,insight,,0.9,主动提起工作话题,从轻松闲聊转向深度讨论", + "review_node: node_3,关心表达,custom,,0.8,担忧对方状态,没有采纳建议而是自由发挥", + "", + "", + "review_summary[1]{total_affinity_change,title,conversation_summary,self_evaluation,chat_overview,expression_score,expression_desc,topic_score,topic_desc,tags,attitude_analysis}:", + "3,月色下的守候,本次对话从尴尬破冰到温馨互动,展现了极强的同理心,...,8,表达清晰,7,话题自然,双向奔赴;微妙暧昧,对方对你产生了好感", + "" + ]; + + if (nodes.length > 0) { + const minNodes = Math.max(nodes.length, Math.ceil(messages.length / 50)); + sections.push( + "第一部分:节点分析", + `请分析 ${nodes.length} 个已知决策点,并额外识别 insight 节点(话题转折/情感峰值等)。共约 ${minNodes} 个以上节点。`, + `请分析 ${nodes.length} 个已知决策点,并额外识别 insight 节点(话题转折/情感峰值等)。共约 ${minNodes} 个以上节点。`, + "review_node: ,<极简标题>,,,<置信度>,<精准行为描述>,<深度原因分析>", + "", + "**注意**:每个节点必须单独一行,以 `review_node:` 开头。node_id 格式为 node_1, node_2...。", + "", + "第二部分:整体总结" + ); + } else { + sections.push("第一部分:整体总结"); + } + + sections.push( + "review_summary: <好感度变化>,<美感标题>,<走向概述>,<启发性评价>,<对话概要>,<表述分>,<表释放评价>,<话题分>,<话题选择评价>,<分号分隔标签>,<对象态度分析>", + "", + "## 严禁行为:", + "- 禁止输出 markdown 代码块标记 (```)。", + "- 禁止省略引号(如果内容中包含逗号)。", + "- 必须在一行内写完一个节点的数据。", + "- 必须以 `review_node:` 或 `review_summary:` 开头。" + ); + + return sections.join('\n'); + } + + buildReviewNodesFromUserSelection(nodes = []) { + const reviewNodes = []; + for (const node of nodes || []) { + const suggestions = Array.isArray(node.suggestions) ? node.suggestions : []; + const selected = suggestions.find((s) => s && (s.is_selected === 1 || s.is_selected === true || s.is_selected === '1')) || null; + reviewNodes.push({ + node_id: node.node_id, + node_title: selected?.title || '已选择建议', + timestamp: node.timestamp || 0, + choice_type: selected ? 'matched' : 'custom', + selected_suggestion_id: selected?.id || null, + selected_affinity_delta: typeof selected?.affinity_prediction === 'number' ? selected.affinity_prediction : null, + user_description: selected?.content || selected?.title || (selected ? '已选择建议' : '未选择建议'), + reasoning: selected + ? `该节点为用户显式选择:${selected.title || selected.id || '建议'}.` + : '该节点未进行显式选择,无法从系统记录确定采用了哪个选项。', + ghost_options: [] + }); + } + return reviewNodes; + } + + computeAffinityChangeFromSelection(reviewNodes = []) { + const deltas = Array.isArray(reviewNodes) ? reviewNodes.map((n) => n?.selected_affinity_delta).filter((v) => typeof v === 'number' && !Number.isNaN(v)) : []; + const sum = deltas.reduce((acc, v) => acc + v, 0); + // 复盘口径:整段对话总变化限制到 [-10, +10](与 UI/历史数据兼容) + return Math.max(-10, Math.min(10, Math.round(sum))); + } + + async callLLMForReview(messages, nodes) { + const client = await this.ensureClient('review'); + const prompt = this.buildReviewPrompt(messages, nodes); + + console.log('[ReviewService] Sending prompt to LLM (Streaming):', prompt); + + let fullContent = ''; + const timeoutMs = this.resolveTimeoutMs(this.currentLLMConfig, DEFAULT_REVIEW_TIMEOUT_MS); + const controller = new AbortController(); + + let timer = null; + const resetTimer = () => { + if (timer) clearTimeout(timer); + timer = setTimeout(() => { + controller.abort(new Error(`LLM 生成响应超时(${timeoutMs}ms内无新数据)`)); + }, timeoutMs); + }; + + try { + resetTimer(); // 初始请求开始计时 + const stream = await client.chat.completions.create({ + model: this.currentLLMConfig.model_name, + messages: [{ role: 'user', content: prompt }], + max_tokens: 20000, // 按 200k context 来,给足输出空间 + stream: true, + thinking: { type: 'enabled' } // 启用 GLM-4.7 深度思考 + }, { signal: controller.signal }); + + let chunkCount = 0; + for await (const chunk of stream) { + chunkCount++; + const delta = chunk.choices?.[0]?.delta; + if (!delta) continue; + + // GLM-4.7: reasoning_content 是思考,content 是输出 + const reasoningContent = delta.reasoning_content || ''; + const content = delta.content || ''; + + if (reasoningContent || content) { + fullContent += content; // 只保存 content + resetTimer(); // 有任何输出就重置计时器 + } + + if (chunkCount <= 3 && content) { + console.log(`[ReviewService] Chunk ${chunkCount} content preview:`, content.slice(0, 50)); + } + } + + console.log(`[ReviewService] Stream finished. Total chunks: ${chunkCount}`); + } catch (apiError) { + if (apiError.name === 'AbortError') { + console.error('[ReviewService] LLM API 调用超时中断'); + throw new Error(`LLM API 生成响应超时,已自动中断(活跃超时限制: ${timeoutMs}ms)`); + } + console.error('[ReviewService] LLM API 流式调用失败:', apiError); + throw new Error(`LLM API 调用失败: ${apiError.message || apiError}`); + } finally { + if (timer) clearTimeout(timer); + } + + console.log('[ReviewService] LLM Stream Completed. Full length:', fullContent.length); + + if (!fullContent || !fullContent.trim()) { + throw new Error('LLM response is empty or invalid'); + } + + // 打印简短预览用于调试 + console.log('[ReviewService] LLM Response Preview:', fullContent.slice(0, 100).replace(/\n/g, '\\n') + '...'); + + return this.parseReviewToon(fullContent, nodes); + } + + csvSplit(line) { + const result = []; + let current = ''; + let inQuotes = false; + for (let i = 0; i < line.length; i += 1) { + const char = line[i]; + if (char === '"' && line[i - 1] !== '\\') { + inQuotes = !inQuotes; + continue; + } + if ((char === ',' || char === ',') && !inQuotes) { + result.push(current); + current = ''; + continue; + } + current += char; + } + if (current !== '' || line.endsWith(',') || line.endsWith(',')) { + result.push(current); + } + return result.map(s => s.trim().replace(/^["']|["']$/g, '')); // Trim and unquote + } + + resolveTimeoutMs(config, fallback) { + const raw = config?.timeout_ms; + const parsed = Number(raw); + if (Number.isFinite(parsed) && parsed > 0) { + return Math.round(parsed); + } + return fallback; + } + + parseReviewToon(text, originalNodes) { + if (!text) { + return { + version: "1.0", + has_nodes: originalNodes && originalNodes.length > 0, + summary: {}, + nodes: [] + }; + } + + // 预处理:处理引号内的换行符,将跨行记录合并为单行 + const rawLines = text.split('\n'); + const lines = []; + let buffer = ''; + let inQuotes = false; + + for (const line of rawLines) { + const currentLine = line.trim(); + if (!currentLine && !inQuotes) continue; + + if (buffer) { + buffer += '\n' + line; + } else { + buffer = line; + } + + // 统计未转义的引号 + for (let i = 0; i < line.length; i++) { + if (line[i] === '"' && (i === 0 || line[i - 1] !== '\\')) { + inQuotes = !inQuotes; + } + } + + if (!inQuotes) { + lines.push(buffer.trim()); + buffer = ''; + } + } + if (buffer) lines.push(buffer.trim()); + + const result = { + version: "1.0", + has_nodes: true, + summary: {}, + nodes: [] + }; + + // 解析 summary + const summaryHeaderRegex = /^review_summary\[(\d+)\]\{([^}]+)\}:?/; + const summaryHeaderIndex = lines.findIndex(l => l.match(summaryHeaderRegex)); + if (summaryHeaderIndex !== -1) { + const headerLine = lines[summaryHeaderIndex]; + const headerMatch = headerLine.match(summaryHeaderRegex); + const fieldsContent = headerMatch?.[2] || ""; + + // 预期字段顺序 + const expectedFields = [ + 'total_affinity_change', 'title', 'conversation_summary', 'self_evaluation', + 'chat_overview', 'expression_score', 'expression_desc', 'topic_score', + 'topic_desc', 'tags', 'attitude_analysis' + ]; + + let parts = []; + // 启发式判断:如果大括号内包含引号、数字或加减号,认为数据被错误地填入了括号内(模型幻觉) + const hasDataInBraces = fieldsContent.includes('"') || fieldsContent.includes('+') || fieldsContent.includes('-') || /\d/.test(fieldsContent); + + if (hasDataInBraces) { + parts = this.csvSplit(fieldsContent); + } else if (lines[summaryHeaderIndex + 1]) { + parts = this.csvSplit(lines[summaryHeaderIndex + 1]); + } + + if (parts.length > 0) { + const summaryMap = {}; + expectedFields.forEach((field, idx) => { + summaryMap[field] = parts[idx] ?? ''; + }); + + result.summary.total_affinity_change = parseInt(summaryMap.total_affinity_change) || 0; + result.summary.title = summaryMap.title || ""; + // 优先 chat_overview 填充概要,其次 conversation_summary + result.summary.conversation_summary = summaryMap.conversation_summary || summaryMap.chat_overview || ""; + result.summary.self_evaluation = summaryMap.self_evaluation || ""; + result.summary.chat_overview = summaryMap.chat_overview || result.summary.conversation_summary; + // 解析用户表现评价的评分 + result.summary.performance_evaluation = { + expression_ability: { + score: parseInt(summaryMap.expression_score) || null, + description: summaryMap.expression_desc || "" + }, + topic_selection: { + score: parseInt(summaryMap.topic_score) || null, + description: summaryMap.topic_desc || "" + } + }; + // 解析 Tags + result.summary.tags = (summaryMap.tags || "").split(/[;;]/).map(t => t.trim()).filter(Boolean); + // 解析对象态度分析 + result.summary.attitude_analysis = summaryMap.attitude_analysis || ""; + } + } + + // 解析 nodes + const nodesHeaderRegex = /^(?:review_nodes|review_node)\[(\d+)\]\{([^}]+)\}:?/; + const headerLineIndex = lines.findIndex(l => l.match(nodesHeaderRegex)); + + // 策略:无论有没有 header,都尝试寻找 review_node: 开头的行 + const potentialNodeLines = lines.filter(l => l.trim().startsWith('review_node:')); + + if (headerLineIndex !== -1 || potentialNodeLines.length > 0) { + // 如果有 header,先处理 header 括号里的数据(模型有时会把第一条数据挤在括号里) + if (headerLineIndex !== -1) { + const headerLine = lines[headerLineIndex]; + const headerMatch = headerLine.match(nodesHeaderRegex); + const fieldsContent = headerMatch?.[2] || ""; + + const hasDataInBraces = fieldsContent.includes('"') || fieldsContent.includes('node_'); + if (hasDataInBraces && !fieldsContent.includes('node_id')) { + const parts = this.csvSplit(fieldsContent); + if (parts.length >= 7) { + const nodeId = parts[0]; + const originalNode = originalNodes.find(n => n.node_id === nodeId); + result.nodes.push({ + node_id: nodeId, + node_title: parts[1], + timestamp: originalNode ? originalNode.timestamp : 0, + choice_type: parts[2] === 'matched' ? 'matched' : (parts[2] === 'insight' ? 'insight' : 'custom'), + matched_suggestion_id: parts[3] || null, + match_confidence: parseFloat(parts[4]) || 0, + user_description: parts[5], + reasoning: parts[6], + ghost_options: [] + }); + } + } + } + + // 处理所有以 review_node: 开头的行 + potentialNodeLines.forEach(line => { + // 去掉前缀 + const content = line.replace(/^review_node:\s*/, '').trim(); + if (!content.includes(',')) return; + + const parts = this.csvSplit(content); + if (parts.length >= 7) { + let nodeId = parts[0]; + if (nodeId && !nodeId.startsWith('node_') && !isNaN(nodeId)) { + nodeId = `node_${nodeId}`; + } + + // 避免重复(如果 header 处理逻辑已经加过了) + if (result.nodes.some(n => n.node_id === nodeId)) return; + + const originalNode = originalNodes.find(n => n.node_id === nodeId); + + result.nodes.push({ + node_id: nodeId, + node_title: parts[1], + timestamp: originalNode ? originalNode.timestamp : (result.nodes.length > 0 ? result.nodes[result.nodes.length - 1].timestamp + 1000 : 0), + choice_type: parts[2] === 'matched' ? 'matched' : (parts[2] === 'insight' ? 'insight' : 'custom'), + matched_suggestion_id: parts[3] || null, + match_confidence: parseFloat(parts[4]) || 0, + user_description: parts[5], + reasoning: parts[6], + ghost_options: [] + }); + } + }); + + // 如果连 review_node: 前缀都没写,只是纯 CSV 行(最后兜底) + if (result.nodes.length === 0 && headerLineIndex !== -1) { + for (let i = headerLineIndex + 1; i < lines.length; i++) { + const line = lines[i]; + if (line.match(/^review_summary/)) break; + if (line.startsWith('review_node:')) continue; // 已经处理过了 + if (!line.includes(',')) continue; + + const parts = this.csvSplit(line); + if (parts.length >= 7 && (parts[0].startsWith('node_') || !isNaN(parts[0]))) { + let nodeId = parts[0]; + if (!nodeId.startsWith('node_')) nodeId = `node_${nodeId}`; + + const originalNode = originalNodes.find(n => n.node_id === nodeId); + result.nodes.push({ + node_id: nodeId, + node_title: parts[1], + timestamp: originalNode ? originalNode.timestamp : 0, + choice_type: parts[2] === 'matched' ? 'matched' : (parts[2] === 'insight' ? 'insight' : 'custom'), + matched_suggestion_id: parts[3] || null, + match_confidence: parseFloat(parts[4]) || 0, + user_description: parts[5], + reasoning: parts[6], + ghost_options: [] + }); + } + } + } + } + + // 去重 + const seenNodes = new Set(); + result.nodes = result.nodes.filter(n => { + if (seenNodes.has(n.node_id)) return false; + seenNodes.add(n.node_id); + return true; + }); + + return result; + } + + buildSimpleSummary(conversation, summaryText) { + return { + version: "1.0", + has_nodes: false, + summary: { + total_affinity_change: conversation?.affinity_change || 0, + node_count: 0, + matched_count: 0, + custom_count: 0, + conversation_summary: summaryText || "本次对话较为顺畅,无需特别决策点。", + self_evaluation: "暂无复盘评价。", + chat_overview: summaryText || "本次对话较为顺畅,无需特别决策点。", + performance_evaluation: { + expression_ability: { + score: null, + description: "" + }, + topic_selection: { + score: null, + description: "" + } + }, + tags: [], + attitude_analysis: "暂无态度分析。" + }, + nodes: [] + }; + } + + ensureReviewDataIntegrity(reviewData, originalNodes, conversation) { + const safe = reviewData || { summary: {}, nodes: [] }; + safe.summary = safe.summary || {}; + safe.nodes = Array.isArray(safe.nodes) ? safe.nodes : []; + + const expectedCount = originalNodes?.length || 0; + const parsedCount = safe.nodes.length; + const missingNodes = expectedCount > 0 && parsedCount === 0; + const countMismatch = expectedCount > 0 && parsedCount !== expectedCount; + + if (missingNodes) { + console.warn('[ReviewService] LLM 输出未解析出任何节点,降级为无节点复盘。'); + return this.buildSimpleSummary( + conversation, + safe.summary.conversation_summary || "未能解析关键决策点,已输出简要复盘。" + ); + } + + if (countMismatch) { + console.warn('[ReviewService] LLM 节点数与输入不一致,已继续使用解析结果:', { + expected: expectedCount, + parsed: parsedCount + }); + } + + safe.has_nodes = safe.nodes.length > 0; + safe.summary.node_count = safe.nodes.length; + + // 关键决策:原始输入中存在的节点(带系统建议) + safe.summary.decision_count = safe.nodes.filter(n => n.has_source).length; + // 转折点/Insight:LLM 额外识别的节点 + safe.summary.insight_count = safe.nodes.filter(n => !n.has_source).length; + + safe.summary.matched_count = safe.nodes.filter(n => n.choice_type === 'matched').length; + safe.summary.custom_count = safe.nodes.filter(n => n.choice_type === 'custom' || n.choice_type === 'insight').length; + + if (safe.summary.total_affinity_change === undefined || safe.summary.total_affinity_change === null) { + safe.summary.total_affinity_change = conversation?.affinity_change || 0; + } + if (!safe.summary.conversation_summary) { + safe.summary.conversation_summary = "本次对话复盘已生成。"; + } + if (!safe.summary.chat_overview) { + safe.summary.chat_overview = safe.summary.conversation_summary; + } + if (!safe.summary.self_evaluation) { + safe.summary.self_evaluation = "暂无复盘评价。"; + } + // 确保performance_evaluation字段存在 + if (!safe.summary.performance_evaluation) { + safe.summary.performance_evaluation = { + expression_ability: { + score: null, + description: "" + }, + topic_selection: { + score: null, + description: "" + } + }; + } else { + // 确保子字段存在 + if (!safe.summary.performance_evaluation.expression_ability) { + safe.summary.performance_evaluation.expression_ability = { score: null, description: "" }; + } + if (!safe.summary.performance_evaluation.topic_selection) { + safe.summary.performance_evaluation.topic_selection = { score: null, description: "" }; + } + } + if (!safe.summary.tags) { + safe.summary.tags = []; + } + if (!safe.summary.attitude_analysis) { + safe.summary.attitude_analysis = "暂无态度分析。"; + } + + return safe; + } + + enrichGhostOptions(reviewData, originalNodes) { + reviewData.nodes.forEach(reviewNode => { + const original = originalNodes.find(n => n.node_id === reviewNode.node_id); + // Mark if this node originated from a system suggestion (vs pure LLM insight) + reviewNode.has_source = !!original; + + if (original) { + // 使用系统记录的 selected_suggestion_id(优先)或 LLM 的 matched_suggestion_id + const actualSelectedId = reviewNode.selected_suggestion_id || reviewNode.matched_suggestion_id; + reviewNode.ghost_options = original.suggestions + .filter(s => s.id !== actualSelectedId) // 过滤掉实际选中的 + .map(s => ({ + suggestion_id: s.id, + content: s.content || s.title + })); + } + }); + } + + enrichAudioInfo(reviewData, conversationId) { + if (!reviewData.nodes || reviewData.nodes.length === 0) return; + + // 获取该对话的所有录音记录,按时间排序 + const records = this.db.getSpeechRecordsByConversation(conversationId); + if (!records || records.length === 0) return; + + reviewData.nodes.forEach(node => { + if (!node.timestamp) return; + + // 寻找离该节点 timestamp 最近的一个录音记录 (时间差最小) + // 且录音记录通常在节点之前或附近(例如用户说话后产生的节点) + let closest = null; + let minDiff = Infinity; + + for (const record of records) { + // 录音记录的 start_time 或 end_time 与节点 timestamp 的关系 + // 建议使用 end_time,因为识别完成时刻更接近节点触发时刻 + const refTime = record.end_time || record.start_time; + const diff = Math.abs(refTime - node.timestamp); + + // 只有当录音记录有文件路径时才考虑 + if (record.audio_file_path && diff < minDiff) { + minDiff = diff; + closest = record; + } + } + + // 如果找到且时间差距在合理范围内(例如 30秒内),则关联 + if (closest && minDiff < 30000) { + node.audio_record_id = closest.id; + node.audio_file_path = closest.audio_file_path; + node.audio_duration = closest.audio_duration; + } + }); + } +} diff --git a/desktop/src/core/modules/suggestion-context-builder.js b/desktop/src/core/modules/suggestion-context-builder.js new file mode 100644 index 0000000..c69c7e8 --- /dev/null +++ b/desktop/src/core/modules/suggestion-context-builder.js @@ -0,0 +1,182 @@ +const DEFAULT_MESSAGE_LIMIT = 20; +const MAX_MESSAGE_CHARS = 500; + +const POSITIVE_KEYWORDS = ['谢谢', '感激', '喜欢', '开心', '愉快', '高兴', '满意']; +const NEGATIVE_KEYWORDS = ['生气', '难过', '失望', '烦', '累', '不爽', '吵']; +const QUESTION_KEYWORDS = ['吗', '呢', '?', '?', '怎么', '为何', '可以', '要不要', '愿不愿意', '能否']; +const EXPECTATION_KEYWORDS = ['期待', '希望', '想', '盼', '等你', '一起', '约']; + +const sanitizeText = (text = '') => + (text || '') + .replace(/\s+/g, ' ') + .trim() + .slice(0, MAX_MESSAGE_CHARS); + +const getAffinityStage = (affinity) => { + if (typeof affinity !== 'number') { + return { label: '未定义', strategy: '默认保持礼貌与真诚,不冒进' }; + } + if (affinity < 30) { + return { label: '低好感(0-30)', strategy: '建立信任,保持礼貌真诚,避免过度暧昧或冒进' }; + } + if (affinity < 70) { + return { label: '中好感(30-70)', strategy: '适度暧昧,展示关心与幽默,逐步加深话题' }; + } + return { label: '高好感(70+)', strategy: '可以更直接和亲密,表达心意,但尊重边界' }; +}; + +const analyzeLastMessageEmotion = (messages = []) => { + const last = messages[messages.length - 1]; + if (!last || !last.content) return { label: '中性', reason: '缺少有效内容' }; + const text = sanitizeText(last.content || last.text || ''); + const lower = text.toLowerCase(); + + const hits = (keywords) => keywords.some((kw) => lower.includes(kw.toLowerCase())); + + if (hits(NEGATIVE_KEYWORDS)) return { label: '负面/不满', reason: '检测到负面情绪词' }; + if (hits(POSITIVE_KEYWORDS)) return { label: '正向/愉快', reason: '检测到积极情绪词' }; + if (hits(QUESTION_KEYWORDS)) return { label: '疑问/等待回应', reason: '包含疑问/提问词' }; + if (hits(EXPECTATION_KEYWORDS)) return { label: '期待/邀请', reason: '包含期待或邀请表达' }; + + return { label: '中性', reason: '未命中显著情绪/提问关键词' }; +}; + +const pickTopTraits = (traitsData, limit = 3) => { + if (!traitsData) return []; + try { + let source = traitsData; + if (typeof source === 'string') { + source = JSON.parse(source); + } + + if (Array.isArray(source)) { + return source.slice(0, limit).map((item) => { + if (typeof item === 'string') return item; + if (item?.name) return item.name; + return typeof item === 'object' ? Object.values(item).join('/') : String(item); + }); + } + + if (Array.isArray(source.keywords)) { + return source.keywords.slice(0, limit); + } + + return []; + } catch { + return []; + } +}; + +export function buildCharacterProfile(character, details, affinityStage) { + if (!character) return '角色信息未知。'; + const parts = []; + parts.push(`角色:${character.name}`); + + if (character.relationship_label) { + parts.push(`关系:${character.relationship_label}`); + } + + if (typeof character.affinity === 'number') { + parts.push(`当前好感度:${character.affinity}`); + } + + if (affinityStage?.label) { + parts.push(`好感阶段:${affinityStage.label}`); + } + + if (details?.personality_traits) { + const traits = pickTopTraits(details.personality_traits); + if (traits.length) { + parts.push(`性格关键词:${traits.join('、')}`); + } + } + + if (details?.likes_dislikes) { + try { + const parsed = typeof details.likes_dislikes === 'string' + ? JSON.parse(details.likes_dislikes) + : details.likes_dislikes; + if (parsed?.likes?.length) { + parts.push(`喜好:${parsed.likes.slice(0, 2).join('、')}`); + } + if (parsed?.dislikes?.length) { + parts.push(`忌讳:${parsed.dislikes.slice(0, 2).join('、')}`); + } + } catch { + // ignore parsing errors + } + } + + if (Array.isArray(character.tags) && character.tags.length) { + parts.push(`标签:${character.tags.slice(0, 3).join('、')}`); + } + + return parts.join(' | '); +} + +const formatRelativeTime = (timestamp) => { + if (!timestamp) return ''; + const now = Date.now(); + const diff = Math.floor((now - timestamp) / 1000); // seconds + if (diff < 60) return `${diff}秒前`; + if (diff < 3600) return `${Math.floor(diff / 60)}分钟前`; + if (diff < 86400) return `${Math.floor(diff / 3600)}小时前`; + return `${Math.floor(diff / 86400)}天前`; +}; + +export function formatMessageHistory(messages = [], totalCount = null) { + if (!messages.length) return '暂无历史消息。'; + + const lines = []; + + // 如果总数大于展示数量,添加提示 + if (totalCount && totalCount > messages.length) { + lines.push(`【对话历史(共 ${totalCount} 条,显示最近 ${messages.length} 条)】`); + } + + messages.forEach((msg) => { + const sender = msg.sender === 'user' ? '玩家' : '角色'; + const content = sanitizeText(msg.content || msg.text || ''); + const timeTag = formatRelativeTime(msg.timestamp); + const prefix = timeTag ? `[${timeTag}] ` : ''; + lines.push(`${prefix}${sender}:${content}`); + }); + + return lines.join('\n'); +} + +export function buildSuggestionContext(db, options = {}) { + const { conversationId, characterId, messageLimit = DEFAULT_MESSAGE_LIMIT } = options; + if (!conversationId && !characterId) { + throw new Error('conversationId 或 characterId 至少需要一个'); + } + + const conversation = conversationId ? db.getConversationById(conversationId) : null; + const resolvedCharacterId = characterId || conversation?.character_id; + const character = resolvedCharacterId ? db.getCharacterById(resolvedCharacterId) : null; + const characterDetails = resolvedCharacterId ? db.getCharacterDetails(resolvedCharacterId) : null; + const history = conversationId + ? db.getRecentMessagesByConversation(conversationId, messageLimit || DEFAULT_MESSAGE_LIMIT) + : []; + + // 获取总消息数量(用于显示提示) + const totalMessageCount = conversationId + ? (db.getMessagesByConversation?.(conversationId)?.length || history.length) + : history.length; + + const affinityStage = getAffinityStage(character?.affinity); + const emotion = analyzeLastMessageEmotion(history); + + return { + conversation, + character, + characterDetails, + history, + totalMessageCount, + characterProfile: buildCharacterProfile(character, characterDetails, affinityStage), + historyText: formatMessageHistory(history, totalMessageCount), + affinityStage, + emotion + }; +} + diff --git a/desktop/src/core/modules/toon-parser.js b/desktop/src/core/modules/toon-parser.js new file mode 100644 index 0000000..317305a --- /dev/null +++ b/desktop/src/core/modules/toon-parser.js @@ -0,0 +1,385 @@ +const HEADER_REGEX = /^suggestions\[(\d+)\]\{([^}]+)\}:\s*$/i; + +const INLINE_ROW_REGEX = /^suggestions\[(\d+)\]\{([^}]+)\}\s*;?\s*$/i; + +const STRING_QUOTES = /^["'“”]|["'“”]$/g; + +const DEFAULT_FIELDS = ['suggestion', 'tags']; + +const normalizeValue = (value = '') => { + let v = value.trim(); + // 处理成对的中文引号 + if (v.startsWith('“') && v.endsWith('”')) { + v = v.slice(1, -1); + } else if (v.startsWith('‘') && v.endsWith('’')) { + v = v.slice(1, -1); + } + return v.replace(STRING_QUOTES, '').trim(); +}; + +// 针对 suggestion/tags,取“最后一个未被引号包裹的逗号(中/英文)”作为分隔 +const splitByLastDelimiter = (line) => { + let inQuotes = false; + let quoteChar = null; + let lastIndex = -1; + for (let i = 0; i < line.length; i += 1) { + const char = line[i]; + if ((char === '"' || char === '“' || char === '”') && line[i - 1] !== '\\') { + if (!inQuotes) { + inQuotes = true; + quoteChar = char; + } else { + // 如果是匹配的引号或者是结束引号,则结束 + if (char === '"' || (quoteChar === '“' && char === '”') || (quoteChar === char)) { + inQuotes = false; + quoteChar = null; + } + } + continue; + } + if (!inQuotes && (char === ',' || char === ',')) { + lastIndex = i; + } + } + if (lastIndex === -1) return [line]; + return [line.slice(0, lastIndex), line.slice(lastIndex + 1)]; +}; + +const parseTags = (raw) => { + if (!raw) return []; + const cleaned = normalizeValue(raw); + if (!cleaned) return []; + return cleaned + .split(/[,,、]/) + .map((tag) => tag.trim()) + .filter(Boolean); +}; + +const csvSplit = (line) => { + const result = []; + let current = ''; + let inQuotes = false; + let quoteChar = null; + for (let i = 0; i < line.length; i += 1) { + const char = line[i]; + if ((char === '"' || char === '“' || char === '”') && line[i - 1] !== '\\') { + if (!inQuotes) { + inQuotes = true; + quoteChar = char; + } else { + if (char === '"' || (quoteChar === '“' && char === '”') || (quoteChar === char)) { + inQuotes = false; + quoteChar = null; + } + } + continue; + } + if ((char === ',' || char === ',') && !inQuotes) { + result.push(current); + current = ''; + continue; + } + current += char; + } + if (current !== '' || line.endsWith(',') || line.endsWith(',')) { + result.push(current); + } + return result; +}; + +const parseAffinityDelta = (raw) => { + if (raw === undefined || raw === null) return null; + const text = normalizeValue(String(raw)); + if (!text) return null; + const parsed = Number.parseInt(text, 10); + if (Number.isNaN(parsed)) return null; + return Math.max(-10, Math.min(10, parsed)); +}; + +export class ToonSuggestionStreamParser { + constructor({ onHeader, onSuggestion, onPartialSuggestion, onError, onSkip } = {}) { + this.onHeader = onHeader; + this.onSuggestion = onSuggestion; + this.onPartialSuggestion = onPartialSuggestion; + this.onError = onError; + this.onSkip = onSkip; // 新增: SKIP 回调 + this.buffer = ''; + this.headerParsed = false; + this.skipDetected = false; // 新增: SKIP 状态标记 + this.expectedCount = null; + this.fields = DEFAULT_FIELDS; + this.headerSkipCount = 0; + this.MAX_HEADER_SKIP = 5; + } + + push(chunk) { + if (!chunk) { + console.log('[ToonSuggestionStreamParser] Received empty chunk, skipping'); + return; + } + + console.log(`[ToonSuggestionStreamParser] Received chunk (${chunk.length} chars): "${chunk.replace(/\n/g, '\\n')}"`); + this.buffer += chunk; + console.log(`[ToonSuggestionStreamParser] Buffer length: ${this.buffer.length}`); + + let newlineIndex = this.buffer.indexOf('\n'); + let lineCount = 0; + while (newlineIndex >= 0) { + const line = this.buffer.slice(0, newlineIndex).trim(); + this.buffer = this.buffer.slice(newlineIndex + 1); + console.log(`[ToonSuggestionStreamParser] Processing line ${++lineCount}: "${line}"`); + this.processLine(line); + newlineIndex = this.buffer.indexOf('\n'); + } + // 将当前未结束的行以 partial 形式暴露,便于前端流式展示 + // 只有在 header 已解析后才发送 partial,避免把 header 内容误当作 suggestion + if (typeof this.onPartialSuggestion === 'function' && this.headerParsed) { + const partialLine = this.buffer.trim(); + if (partialLine) { + this.onPartialSuggestion({ + suggestion: normalizeValue(partialLine), + tags: [] + }); + } + } + + console.log(`[ToonSuggestionStreamParser] Remaining buffer (${this.buffer.length} chars): "${this.buffer}"`); + } + + end() { + console.log('[ToonSuggestionStreamParser] Stream ended, processing remaining buffer'); + const remaining = this.buffer.trim(); + if (remaining) { + console.log(`[ToonSuggestionStreamParser] Processing remaining line: "${remaining}"`); + this.processLine(remaining); + } else { + console.log('[ToonSuggestionStreamParser] No remaining content in buffer'); + } + this.buffer = ''; + console.log('[ToonSuggestionStreamParser] Parser finished'); + } + + processLine(line) { + if (!line) { + console.log('[ToonSuggestionStreamParser] Skipping empty line'); + return; + } + + // 检测 SKIP 信号(不需要建议) + const trimmed = line.trim(); + if (trimmed === 'SKIP' || trimmed.toUpperCase() === 'SKIP') { + console.log('[ToonSuggestionStreamParser] Detected SKIP signal'); + this.skipDetected = true; + if (typeof this.onSkip === 'function') { + this.onSkip({ reason: 'no_suggestion_needed' }); + } + return; + } + + // 如果已经检测到SKIP,忽略后续所有内容 + if (this.skipDetected) { + return; + } + + // 处理某些模型可能出现的每行都带 suggestions[N]{...} 的变体格式 + const inlineMatch = line.match(INLINE_ROW_REGEX); + if (inlineMatch && !line.match(HEADER_REGEX)) { + console.log('[ToonSuggestionStreamParser] Detected inline row format'); + const count = Number(inlineMatch[1]); + const content = inlineMatch[2]; + + if (!this.headerParsed) { + console.log('[ToonSuggestionStreamParser] Implicitly parsing header from inline row'); + this.expectedCount = count; + this.fields = ['suggestion', 'affinity_delta', 'tags']; + this.headerParsed = true; + if (typeof this.onHeader === 'function') { + this.onHeader({ expectedCount: this.expectedCount, fields: this.fields }); + } + } + + this.parseRow(content); + return; + } + + const lower = line.toLowerCase(); + const normalizedToon = lower.replace(/[`]/g, '').replace(/\s/g, ''); + if ( + normalizedToon === 'toon' || + (lower.startsWith('```') && lower.includes('toon')) || + lower === '```toon' || + lower.startsWith('toon```') + ) { + console.log('[ToonSuggestionStreamParser] Skipping code fence/toon marker line:', line); + return; + } + if (!this.headerParsed) { + console.log('[ToonSuggestionStreamParser] Header not parsed yet, parsing header'); + this.parseHeader(line); + return; + } + console.log('[ToonSuggestionStreamParser] Parsing data row'); + this.parseRow(line); + } + + parseHeader(line) { + console.log(`[ToonSuggestionStreamParser] Attempting to parse header: "${line}"`); + const match = line.match(HEADER_REGEX); + if (!match) { + // 容忍若干行非表头文本(模型可能输出客套或提示语) + const shouldSkip = + /^```/.test(line) || + /^(好的|以下|这里|结果|建议|总结|输出)/i.test(line) || + /^(toon:?)$/i.test(line); + if (shouldSkip && this.headerSkipCount < this.MAX_HEADER_SKIP) { + this.headerSkipCount += 1; + console.warn( + `[ToonSuggestionStreamParser] Skipping non-header line (${this.headerSkipCount}/${this.MAX_HEADER_SKIP}): "${line}"` + ); + return; + } + + this.headerSkipCount += 1; + if (this.headerSkipCount <= this.MAX_HEADER_SKIP) { + console.warn( + `[ToonSuggestionStreamParser] Non-header line tolerated (${this.headerSkipCount}/${this.MAX_HEADER_SKIP}): "${line}"` + ); + return; + } + + console.error(`[ToonSuggestionStreamParser] Header format invalid after tolerance: "${line}"`); + this.emitError(new Error(`TOON 表头格式不正确:${line}`)); + return; + } + + this.headerParsed = true; + this.expectedCount = Number(match[1]); + console.log(`[ToonSuggestionStreamParser] Parsed expected count: ${this.expectedCount}`); + + const fieldList = match[2] + .split(',') + .map((field) => { + const f = field.trim(); + // 映射常见的中文表头到标准字段名 + if (f === '建议内容' || f === '建议') return 'suggestion'; + if (f === '好感度变化' || f === '好感变化' || f === '好感变化预测') return 'affinity_delta'; + if (f === '标签' || f === '策略标签') return 'tags'; + return f; + }) + .filter(Boolean); + console.log(`[ToonSuggestionStreamParser] Parsed fields: [${fieldList.join(', ')}]`); + + if (fieldList.length) { + this.fields = fieldList; + } + + if (typeof this.onHeader === 'function') { + console.log('[ToonSuggestionStreamParser] Calling onHeader callback'); + this.onHeader({ + expectedCount: this.expectedCount, + fields: this.fields + }); + } else { + console.warn('[ToonSuggestionStreamParser] No onHeader callback provided'); + } + } + + parseRow(line) { + console.log(`[ToonSuggestionStreamParser] Parsing row: "${line}"`); + + // 跳过纯分隔符/占位符行(如 ..., …, ---, ``` 等) + const symbolicOnly = line.trim().replace(/\s/g, ''); + if ( + !symbolicOnly || + /^(```|---|—{2,}|\.{2,}|…+|={2,})$/i.test(symbolicOnly) + ) { + console.log('[ToonSuggestionStreamParser] Skipping non-content row:', line); + return; + } + + const isSuggestionOnly = + this.fields.length === 2 && + this.fields.includes('suggestion') && + this.fields.includes('tags'); + + const isSuggestionAffinityTags = + this.fields.length === 3 && + this.fields[0] === 'suggestion' && + this.fields[1] === 'affinity_delta' && + this.fields[2] === 'tags'; + + let values = []; + if (isSuggestionOnly) { + values = splitByLastDelimiter(line); + } else if (isSuggestionAffinityTags) { + // 更加稳健的切分策略:寻找符合好感度变化(数字)的部分作为锚点 + const allParts = csvSplit(line); + let affinityIndex = -1; + for (let i = allParts.length - 1; i >= 0; i -= 1) { + const val = normalizeValue(allParts[i]); + if (/^[+-]?\d+$/.test(val)) { + affinityIndex = i; + break; + } + } + + if (affinityIndex !== -1) { + const suggestionRaw = allParts.slice(0, affinityIndex).join(','); + const affinityRaw = allParts[affinityIndex]; + const tagsRaw = allParts.slice(affinityIndex + 1).join(','); + values = [suggestionRaw, affinityRaw, tagsRaw]; + } else { + // 退化方案 + const parts = splitByLastDelimiter(line); + const left = parts[0] ?? ''; + const tagsRaw = parts[1] ?? ''; + const parts2 = splitByLastDelimiter(left); + const suggestionRaw = parts2[0] ?? ''; + const affinityRaw = parts2[1] ?? ''; + values = [suggestionRaw, affinityRaw, tagsRaw]; + } + } else { + values = csvSplit(line); + } + + console.log(`[ToonSuggestionStreamParser] Parsed CSV values: [${values.map(v => `"${v}"`).join(', ')}]`); + + if (!values.length) { + console.log('[ToonSuggestionStreamParser] No values parsed, skipping'); + return; + } + + const suggestion = {}; + this.fields.forEach((field, index) => { + suggestion[field] = values[index] !== undefined ? normalizeValue(values[index]) : ''; + }); + + console.log(`[ToonSuggestionStreamParser] Mapped suggestion:`, suggestion); + + const normalized = { + suggestion: suggestion.suggestion || suggestion.title || suggestion.content || `选项`, + tags: parseTags(suggestion.tags || suggestion.tag_list || ''), + affinity_delta: parseAffinityDelta(suggestion.affinity_delta) + }; + + console.log(`[ToonSuggestionStreamParser] Normalized suggestion:`, normalized); + + if (typeof this.onSuggestion === 'function') { + console.log('[ToonSuggestionStreamParser] Calling onSuggestion callback'); + this.onSuggestion(normalized); + } else { + console.warn('[ToonSuggestionStreamParser] No onSuggestion callback provided'); + } + } + + emitError(error) { + if (typeof this.onError === 'function') { + this.onError(error); + } else { + console.warn('[ToonSuggestionStreamParser]', error); + } + } +} + +export const createToonSuggestionStreamParser = (options) => + new ToonSuggestionStreamParser(options); diff --git a/desktop/src/core/modules/window-manager.js b/desktop/src/core/modules/window-manager.js index ae442ee..4b2efc1 100644 --- a/desktop/src/core/modules/window-manager.js +++ b/desktop/src/core/modules/window-manager.js @@ -2,12 +2,23 @@ import electron from 'electron'; import path from 'path'; import { fileURLToPath } from 'url'; -const { BrowserWindow, screen } = electron; +const { app, BrowserWindow, screen, dialog } = electron; // 获取 __dirname 的 ESM 等效方式 const __filename = fileURLToPath(import.meta.url); const __dirname = path.dirname(__filename); +/** + * 生产环境下获取打包后的渲染进程入口文件路径 + * 开发环境下不会使用(dev 走 loadURL) + * @param {string} fileName + */ +function getRendererFilePath(fileName) { + // app.getAppPath() 会指向 app.asar 根目录,dist/renderer 位于其下一级 + const appRoot = app.getAppPath(); + return path.join(appRoot, 'dist', 'renderer', fileName); +} + /** * 窗口管理器 - 负责创建和管理应用窗口 */ @@ -15,6 +26,8 @@ export class WindowManager { constructor() { this.mainWindow = null; this.hudWindow = null; + this.hudCreating = false; // 防止重复创建 HUD + this.hudCreateNotified = false; // 避免反复弹窗 this.hudDragState = { isDragging: false, startPos: { x: 0, y: 0 }, @@ -69,9 +82,21 @@ export class WindowManager { this.mainWindow.loadURL('http://localhost:5173'); } else { // 生产环境:加载构建后的文件 - this.mainWindow.loadFile(path.join(__dirname, '../../dist/renderer/index.html')); + this.mainWindow.loadFile(getRendererFilePath('index.html')); } + // 主窗口加载错误/渲染日志(生产环境排查白屏很关键) + this.mainWindow.webContents.on('did-fail-load', (event, errorCode, errorDescription, validatedURL) => { + console.error('[MainWindow] did-fail-load:', { errorCode, errorDescription, validatedURL }); + }); + this.mainWindow.webContents.on('render-process-gone', (_event, details) => { + console.error('[MainWindow] render-process-gone:', details); + }); + this.mainWindow.webContents.on('console-message', (_event, level, message, line, sourceId) => { + const levelName = ['debug', 'info', 'warn', 'error'][level] || String(level); + console.log(`[Renderer:${levelName}] ${message} (${sourceId}:${line})`); + }); + // 窗口准备就绪后显示 this.mainWindow.once('ready-to-show', () => { this.mainWindow.show(); @@ -98,24 +123,69 @@ export class WindowManager { * @param {Function} onHudClosed - HUD关闭回调 */ async createHUDWindow(checkASRReady, onHudClosed) { + if (this.hudCreating) { + return; + } + this.hudCreating = true; + this.hudCreateNotified = false; + try { + const parentWindow = this.getMainWindow(); + const notifyHudLoading = (payload) => { + if (parentWindow) { + parentWindow.webContents.send('hud-loading', payload); + } + }; + // 检查ASR模型是否就绪,如果未就绪则等待 console.log('[HUD] 检查ASR模型状态...'); let checkAttempts = 0; - const maxAttempts = 120; // 最多等待12秒(120 * 100ms) + const maxAttempts = 1800; // 最多等待180秒(1800 * 100ms),覆盖首次模型下载场景 while (checkAttempts < maxAttempts) { const status = await checkASRReady(); + const waitedSeconds = Number(((checkAttempts * 100) / 1000).toFixed(1)); if (status.ready) { console.log('[HUD] ASR模型已就绪:', status.message); + if (parentWindow) { + parentWindow.webContents.send('hud-ready', { + message: 'ASR模型已就绪,正在打开HUD...', + waitedSeconds, + from: 'hud' + }); + } break; } if (checkAttempts === 0) { console.log('[HUD] ASR模型未就绪,等待加载:', status.message); + notifyHudLoading({ + message: status.message || 'ASR模型正在加载,请稍候...', + waitedSeconds, + downloading: status.downloading, + from: 'hud' + }); } else if (checkAttempts % 10 === 0) { // 每1秒输出一次状态 - console.log(`[HUD] 等待ASR模型加载中... (${checkAttempts * 100}ms)`); + const waitedMs = checkAttempts * 100; + console.log(`[HUD] 等待ASR模型加载中... (${(waitedMs / 1000).toFixed(1)}s)`); + notifyHudLoading({ + message: status.message || 'ASR模型正在加载,请稍候...', + waitedSeconds, + downloading: status.downloading, + from: 'hud' + }); + } else if (checkAttempts % 100 === 0) { + // 每10秒输出一次详细提示 + const waitedSec = (checkAttempts * 100) / 1000; + console.log(`[HUD] ASR仍在加载,可能正在下载模型,请稍候... 已等待 ${waitedSec.toFixed(0)}s`); + notifyHudLoading({ + message: status.message || 'ASR模型正在加载,请稍候...', + waitedSeconds, + downloading: status.downloading, + from: 'hud', + detailed: true + }); } checkAttempts++; @@ -124,8 +194,14 @@ export class WindowManager { if (checkAttempts >= maxAttempts) { console.warn('[HUD] ASR模型加载超时,但继续创建HUD窗口'); + notifyHudLoading({ + message: '等待ASR模型加载超时,仍尝试打开HUD,请稍候', + waitedSeconds: Number(((checkAttempts * 100) / 1000).toFixed(1)), + from: 'hud', + timedOut: true + }); } else { - console.log(`[HUD] ASR模型就绪,等待时间: ${checkAttempts * 100}ms`); + console.log(`[HUD] ASR模型就绪,等待时间: ${(checkAttempts * 100 / 1000).toFixed(1)}s`); } const primaryDisplay = screen.getPrimaryDisplay(); @@ -177,7 +253,7 @@ export class WindowManager { this.hudWindow.loadURL('http://localhost:5173/hud.html'); } else { // 生产环境:从构建后的文件加载 - this.hudWindow.loadFile(path.join(__dirname, '../../dist/renderer/hud.html')); + this.hudWindow.loadFile(getRendererFilePath('hud.html')); } // 页面加载完成后再显示 @@ -208,6 +284,8 @@ export class WindowManager { } catch (error) { console.error('Failed to create HUD window:', error); } + this.hudCreating = false; + this.hudCreateNotified = false; } /** @@ -357,4 +435,4 @@ export class WindowManager { getHUDWindow() { return this.hudWindow; } -} \ No newline at end of file +} diff --git a/desktop/src/core/prompts/README.md b/desktop/src/core/prompts/README.md new file mode 100644 index 0000000..dfda803 --- /dev/null +++ b/desktop/src/core/prompts/README.md @@ -0,0 +1,29 @@ +# Prompt Templates + +本目录存放 LiveGalGame Desktop 的 **LLM Prompt 模板**(纯文本/Markdown),用于将 Prompt 从业务 JS 逻辑中解耦,便于: + +- 开源协作:Prompt diff 更直观,review 更容易 +- 迭代维护:改文案不必改业务代码结构 +- 版本管理:可对 Prompt 做独立演进与回滚 + +## 使用方式 + +Prompt 由 `src/core/modules/prompt-manager.js` 加载并渲染,支持最小化的变量替换: + +- 变量语法:`{{key}}` 或 `{{nested.key}}` +- 未提供的变量默认替换为空字符串(保持运行时鲁棒) +- 如需新增模板:在本目录新增文件,并在 `prompt-manager.js` 的 `PROMPT_FILES` 注册 + +## 现有模板 + +- `suggestion.prompt.md`:对话建议生成(TOON suggestions) +- `situation.prompt.md`:时机判断(TOON situation) +- `review.with_nodes.prompt.md`:复盘(有决策节点) +- `review.no_nodes.prompt.md`:复盘(无决策节点) + +## 约定与注意事项 + +- **输出格式优先级最高**:TOON 表头/字段定义必须稳定,否则解析器会失败。 +- **好感度评估采用 Rubric**:复盘类 Prompt 使用论文评审式 5 档(reject/weak reject/weak accept/accept/strong accept)来约束 `total_affinity_change` 的取值区间,避免评分漂移。 +- Prompt 里尽量避免与运行时变量同名的 `{{...}}` 文本;如果确实需要,考虑改写文案。 +- 模板文件会随 Electron 打包(因为位于 `src/core/**/*` 范围内),无需额外配置。 diff --git a/desktop/src/core/prompts/review.no_nodes.prompt.md b/desktop/src/core/prompts/review.no_nodes.prompt.md new file mode 100644 index 0000000..d6fbdbe --- /dev/null +++ b/desktop/src/core/prompts/review.no_nodes.prompt.md @@ -0,0 +1,38 @@ +# Role +你是恋爱对话复盘分析师。 + +# Task +根据对话记录,总结对话内容并评估好感度变化。 +补充以下内容: +1. 用户表现评价:对用户在本次对话中的表现做详细评价,包括: + - 表述能力评分(0-100分)和一句话评价(10~30字) + - 话题选择评分(0-100分)和一句话评价(10~30字) +2. 标题与概要: + - 为本次对话生成一个标题(title),6-15字,吸引人且概括核心内容。 + - 用1-2句话概述对话主题/走向(conversation_summary),适合直接展示给用户。 + - 整体表现评价(10~40字) +3. 对话标签(Tag):生成3-5个简短的标签(如:破冰、分享、幽默、关心),概括对话特点。 +4. 对象态度分析:详细分析对象对用户的好感度变化和态度倾向(20~50字)。 + +# Input + +## 对话记录 +{{transcript}} + +## 关键节点及当时的选项 +无关键决策节点 + +# Output (TOON 格式) +输出分为两部分,请严格遵守格式,不要输出其他废话: + +第一部分:整体总结(单独一行) +review_summary[1]{total_affinity_change,title,conversation_summary,self_evaluation,chat_overview,expression_score,expression_desc,topic_score,topic_desc,tags,attitude_analysis}: +<好感度变化整数>,<对话标题>,<对话整体概述>,<用户整体表现评价>,<对话概要>,<表述能力评分0-100>,<表述能力描述>,<话题选择评分0-100>,<话题选择描述>,<标签列表(分号分隔)>,<对象态度分析> + +# 规则 +- total_affinity_change: 填写 0(由系统根据用户显式选择的建议计算真实好感度变化并覆盖该值) +- 字段用英文逗号分隔,如内容含逗号请用引号包裹 + +# 示例 +review_summary[1]{total_affinity_change,title,conversation_summary,self_evaluation,chat_overview,expression_score,expression_desc,topic_score,topic_desc,tags,attitude_analysis}: +0,初次见面寒暄,双方进行了简单的日常寒暄,氛围和谐。,回复自然有礼,能给予积极反馈,聊了日常和兴趣,气氛温和友善。,82,表达自然有礼,85,话题选择合适,日常;寒暄;温和,对方态度友善,回应积极,但尚未深入交流,保持礼貌距离。 diff --git a/desktop/src/core/prompts/review.with_nodes.prompt.md b/desktop/src/core/prompts/review.with_nodes.prompt.md new file mode 100644 index 0000000..fcc4034 --- /dev/null +++ b/desktop/src/core/prompts/review.with_nodes.prompt.md @@ -0,0 +1,46 @@ +# Role +你是恋爱对话复盘分析师。 + +# Task +根据对话记录和已知的"关键节点"(系统当时生成选项的时刻),分析每个节点用户的实际表现,并总结对话。 + +# Input + +## 对话记录 +{{transcript}} + +## 关键节点及当时的选项(共 {{nodesCount}} 个节点) +{{nodeInfo}} + +# Output (TOON 格式) +请严格按照以下格式输出,不要添加任何额外说明: + +## 1. 节点分析(每个节点一行) +review_nodes[{{nodesCount}}]{node_id,node_title,choice_type,matched_suggestion_id,match_confidence,user_description,reasoning}: +<节点ID>,<节点标题10-20字>,<选择类型>,<匹配的建议ID或空>,<匹配置信度0.0-1.0>,<用户实际表现描述20-50字>,<分析推理30-80字> + +## 2. 整体总结(单独一行) +review_summary[1]{total_affinity_change,title,conversation_summary,self_evaluation,chat_overview,expression_score,expression_desc,topic_score,topic_desc,tags,attitude_analysis}: +<好感度变化整数>,<对话标题>,<对话整体概述>,<用户整体表现评价>,<对话概要>,<表述能力评分0-100>,<表述能力描述>,<话题选择评分0-100>,<话题选择描述>,<标签列表(分号分隔)>,<对象态度分析> + +# 规则 +1. **节点分析**: + - node_id: 使用输入中提供的节点ID(如 node_1, node_2) + - node_title: 为该决策点生成简洁有趣的标题 + - choice_type: 填 "matched"(用户采纳了某个建议)或 "custom"(用户自由发挥) + - matched_suggestion_id: 如果是 matched,填写最匹配的建议ID;否则留空 + - match_confidence: 0.0-1.0,表示用户话语与该建议的匹配程度 + - user_description: 用20-50字描述用户在该节点的实际表现 + - reasoning: 30-80字分析为什么这样判断,包括话语风格、内容相似度等 + +2. **整体总结**: + - total_affinity_change: 填写 0(系统会根据实际选择覆盖) + - 字段用英文逗号分隔,如内容含逗号请用引号包裹 + +# 示例 +review_nodes[2]{node_id,node_title,choice_type,matched_suggestion_id,match_confidence,user_description,reasoning}: +node_1,积极回应露营邀约,matched,llm-suggestion-1766583092177-0,0.85,"用户主动询问活动细节,表现出浓厚兴趣","用户的询问与建议高度一致,都聚焦在活动氛围和趣味点上,语气积极主动" +node_2,自由表达氛围感受,custom,,0.3,"用户用个性化语言描述篝火晚会的氛围感","虽然话题延续了篝火晚会,但表达方式完全是用户个人风格,未采纳任何建议模板" + +review_summary[1]{total_affinity_change,title,conversation_summary,self_evaluation,chat_overview,expression_score,expression_desc,topic_score,topic_desc,tags,attitude_analysis}: +0,露营活动探讨,用户主动询问露营活动细节,展现出对户外活动的兴趣,双方围绕篝火晚会等话题展开愉快交流,回应自然主动,善于通过追问深化话题,围绕共同兴趣展开,话题选择恰当,82,表达流畅自然,善于延续话题,85,能抓住对方兴趣点深入交流,兴趣;探索;户外;互动,对方表现出积极分享的态度,主动介绍活动细节,好感度稳步提升。 diff --git a/desktop/src/core/prompts/situation.prompt.md b/desktop/src/core/prompts/situation.prompt.md new file mode 100644 index 0000000..5cd3159 --- /dev/null +++ b/desktop/src/core/prompts/situation.prompt.md @@ -0,0 +1,44 @@ +# Role +你是恋爱对话的“交互时机决策系统”,唯一任务是判断此刻是否需要立刻向玩家推送“回复建议”。 + +# Task +分析【角色信息】【对话历史】【实时信号】,在“需要帮助/推进”时果断介入,在“无关紧要/自然流”时保持安静。 + +# Decision Logic +1) need_options=true: + - 关键交互:角色提问/邀约/二选一/期待表态。 + - 打破冷场:冷场时间较长(参考信号)。 + - 切入对话:连续角色消息较多(参考信号),需要给玩家回复选项。 +2) need_options=false: + - 仅日常陈述、感叹,无明确期待;对话流畅无需辅助。 + +# Output (Strict TOON Format) +必须输出两行,禁止 JSON/代码块/解释/前缀/后缀: + +第一行(表头):situation[1]{need_options,trigger,reason,confidence}: +第二行(数据):值1,值2,值3,值4 + +【格式要求】 +- 表头和数据行必须分开,不能在同一行 +- 表头必须以冒号结尾 +- 数据行用英文逗号分隔,顺序对应表头字段 +- reason字段如果包含逗号,请用引号包裹,如:"理由,包含逗号" + +【字段说明】 +- need_options: true 或 false(是否介入) +- trigger: question | invite | message_burst | silence | other +- reason: 简短中文决策理由,如"角色提问等待回答""冷场超10秒需破冰" +- confidence: 0.0-1.0 的数值 + +【示例】 +situation[1]{need_options,trigger,reason,confidence}: +true,silence,冷场超3秒需破冰,0.8 + +# Context Data +【角色信息】{{characterProfile}} +【对话历史】 +{{historyText}} +{{signalLines}} + +请严格按照上述格式输出,表头和数据行必须分开,不要在同一行。 + diff --git a/desktop/src/core/prompts/suggestion.prompt.md b/desktop/src/core/prompts/suggestion.prompt.md new file mode 100644 index 0000000..6793827 --- /dev/null +++ b/desktop/src/core/prompts/suggestion.prompt.md @@ -0,0 +1,106 @@ +# 角色定位 +你是"恋爱互动教练",职责包括两部分: +1. **判断时机**:分析对话状态,决定是否需要给用户提供回复建议 +2. **生成建议**:如果需要,生成具体的回复策略和话术 + +# 输出规则 +{{skipRule}} +- 如果需要建议,输出 TOON 格式的建议列表 +- 禁止输出任何解释性文字如"好的"/"以下是"/"让我分析" + +--- + +<触发方式>{{triggerLabel}} +<触发策略指导>{{triggerGuidance}} +<角色档案>{{characterProfile}} +<好感阶段策略>{{affinityStageText}} +<对话历史> +{{historyText}} +<情感分析>{{emotionText}} +{{previousSuggestionText}} + +--- + +# Few-Shot 示例 + +## 示例1:不需要建议 - 话未说完 +``` +【对话历史】 +[10秒前] 角色:哈哈哈今天遇到一件超搞笑的事 +[5秒前] 角色:我跟你说... +[2秒前] 角色:(还在打字中) +【情感分析】兴奋/期待分享 +``` +**输出:** +SKIP + +## 示例2:需要建议 - 角色提问/邀约 +``` +【对话历史】 +[30秒前] 角色:最近工作还好吗? +[20秒前] 玩家:还行吧 +[5秒前] 角色:那...周末有空吗? +【情感分析】期待/试探 +【触发方式】静默/被动(上一条消息后沉默 8 秒) +``` +**输出:** +suggestions[3]{suggestion,affinity_delta,tags}: +积极回应"有空!"然后问她"有什么安排吗?",表现出期待感,+4,积极回应、推进 +如果真的不确定,可以说"要看情况,怎么了?"再问清楚她的意图,+2,稳妥确认、礼貌 +用幽默的方式回"周末在等你约"配合一个调皮表情(适合暧昧期), +5,幽默暧昧、撩 + +## 示例3:需要建议 - 尴尬冷场 +``` +【对话历史】 +[3分钟前] 角色:今天上班好累啊... +[3分钟前] 玩家:辛苦啦~ +[2分钟前] 角色:嗯... +[沉默 2 分钟] +【情感分析】疲惫/需要安慰 +【触发方式】静默检测(沉默超过 2 分钟) +``` +**输出:** +suggestions[3]{suggestion,affinity_delta,tags}: +问她"要不要视频聊聊?"或"需要我陪你吗?",展现关心,+4,关心体贴、推进 +分享你今天的趣事转移话题,如"我今天也遇到个搞笑的事...",打破沉默,+2,破冰、分享 +直接问她"是不是有什么烦心事?",鼓励她倾诉,+3,共情倾听、深入 + +## 示例4:需要建议 - 换一批(去重) +``` +【对话历史】 +[1分钟前] 角色:你觉得我怎么样? +【上一批建议】 +1. 诚实夸奖她的优点,如"我觉得你很善良" +2. 用开玩笑的方式回避,如"你钓鱼执法呢?" +3. 反问她"你想听真话还是假话?" +【触发方式】用户点击"换一批" +``` +**输出:** +suggestions[3]{suggestion,affinity_delta,tags}: +主动示好说"我喜欢和你聊天",然后问她为什么突然这么问,+4,表达好感、推进 +分享具体观察如"我发现你特别细心,上次还记得我说过的...",用细节打动她,+3,细节共情、真诚 +稍微撩一下说"你想听我夸你吗?那要做好心理准备哦",营造暧昧氛围,+5,暧昧撩拨、幽默 + +--- + +# 正式任务 +请根据上述【角色档案】和【对话历史】,执行任务: +1. 判断是否需要建议。注意:{{skipRule}} +2. **如无需建议**,仅输出:SKIP +3. **如需要建议**,输出 {{count}} 条 TOON 格式建议,覆盖不同策略维度 + +【格式要求】 +- TOON 格式表头:suggestions[{{count}}]{suggestion,affinity_delta,tags}: +- 每行一个建议,格式:建议内容,好感度变化预测,标签列表 +- suggestion(建议内容):1-2句话的详细可执行思路/话术,结合角色喜好/忌讳与情感状态 +- affinity_delta(好感度变化预测):-10 到 +10 的整数(只能输出整数;正数表示好感上升,负数表示下降) +- tags(策略标签):2-3个策略标签,用逗号分隔(如"积极回应,推进") + +【策略要求】 +- 选项必须覆盖不同策略维度:保守稳妥、积极进取、轻松幽默、共情等,不要同质化 +- 严格结合触发方式:静默→破冰延续;消息累积→综合回应;话题转折→先回应再推进;主动→多元供选 +- 严格结合角色档案:投其所好、避开忌讳,符合性格与好感阶段边界 +- 如果提供了上一批建议,务必生成不同方向的新选项,避免与列表雷同或轻微改写 +- 不要直接代替玩家发言;不要输出泛化空话(如"多聊聊""继续沟通");不要复述历史;不编造不存在的事实 + +请开始输出: diff --git a/desktop/src/db/LiveGalGame.code-workspace b/desktop/src/db/LiveGalGame.code-workspace new file mode 100644 index 0000000..0461619 --- /dev/null +++ b/desktop/src/db/LiveGalGame.code-workspace @@ -0,0 +1,8 @@ +{ + "folders": [ + { + "path": "../../.." + } + ], + "settings": {} +} \ No newline at end of file diff --git a/desktop/src/db/database.js b/desktop/src/db/database.js index 4643a02..0567d2d 100644 --- a/desktop/src/db/database.js +++ b/desktop/src/db/database.js @@ -1,1848 +1,4 @@ -import Database from 'better-sqlite3'; -import path from 'path'; -import fs from 'fs'; -import { fileURLToPath } from 'url'; +// 模块化版本的数据库管理器 +// 原 database.js 已拆分为多个模块文件,位于 ./modules/ 目录下 -// 获取 __dirname 的 ESM 等效方式 -const __filename = fileURLToPath(import.meta.url); -const __dirname = path.dirname(__filename); - -class DatabaseManager { - constructor(options = {}) { - // 数据库文件路径 - const customPath = options.dbPath || process.env.LIVEGALGAME_DB_PATH; - const resolvedPath = customPath - ? path.resolve(customPath) - : path.join(__dirname, '../../data/livegalgame.db'); - this.dbPath = resolvedPath; - - // 确保data目录存在 - const dataDir = path.dirname(this.dbPath); - if (!fs.existsSync(dataDir)) { - fs.mkdirSync(dataDir, { recursive: true }); - } - - // 创建数据库连接 - this.db = new Database(this.dbPath, { - verbose: console.log // 关闭 SQL 语句打印,避免每次启动都输出数据库 schema - }); - - // 启用外键约束 - this.db.pragma('foreign_keys = ON'); - - // 初始化数据库表 - this.initialize(); - - console.log('Database initialized at:', this.dbPath); - } - - // 初始化数据库表 - initialize() { - console.log('Initializing database schema...'); - const schemaPath = path.join(__dirname, 'schema.sql'); - const schema = fs.readFileSync(schemaPath, 'utf-8'); - - // 执行SQL语句(分割并逐条执行) - const statements = schema.split(';').filter(stmt => stmt.trim()); - - // 开始事务 - const transaction = this.db.transaction(() => { - for (const statement of statements) { - if (statement.trim()) { - this.db.exec(statement); - } - } - }); - - transaction(); - console.log('Database schema initialized'); - - // 初始化示例数据(如果数据库为空) - this.seedSampleData(); - - // 初始化默认 ASR 配置(如果没有) - this.seedDefaultASRConfig(); - - // 修复 ASR 配置(迁移旧的/错误的模型名称) - this.fixASRConfig(); - - // 初始化默认音频源(如果没有) - this.seedDefaultAudioSources(); - } - - // 关闭数据库连接 - close() { - if (this.db) { - this.db.close(); - } - } - - // ========== 角色相关方法 ========== - - // 创建角色 - createCharacter(characterData) { - const stmt = this.db.prepare(` - INSERT INTO characters (id, name, nickname, relationship_label, avatar_color, affinity, created_at, updated_at, notes) - VALUES (@id, @name, @nickname, @relationship_label, @avatar_color, @affinity, @created_at, @updated_at, @notes) - `); - - const info = stmt.run({ - id: characterData.id || this.generateId(), - name: characterData.name, - nickname: characterData.nickname || null, - relationship_label: characterData.relationship_label || null, - avatar_color: characterData.avatar_color || '#ff6b6b', - affinity: characterData.affinity || 50, - created_at: Date.now(), - updated_at: Date.now(), - notes: characterData.notes || null - }); - - return this.getCharacterById(characterData.id || info.lastInsertRowid); - } - - // 获取所有角色 - getAllCharacters() { - const stmt = this.db.prepare(` - SELECT c.*, - GROUP_CONCAT(t.name) as tags - FROM characters c - LEFT JOIN character_tags ct ON c.id = ct.character_id - LEFT JOIN tags t ON ct.tag_id = t.id - GROUP BY c.id - ORDER BY c.updated_at DESC - `); - - return stmt.all().map(row => ({ - ...row, - tags: row.tags ? row.tags.split(',') : [] - })); - } - - // 获取单个角色 - getCharacterById(id) { - const stmt = this.db.prepare(` - SELECT c.*, - GROUP_CONCAT(t.name) as tags - FROM characters c - LEFT JOIN character_tags ct ON c.id = ct.character_id - LEFT JOIN tags t ON ct.tag_id = t.id - WHERE c.id = ? - GROUP BY c.id - `); - - const row = stmt.get(id); - if (!row) return null; - - return { - ...row, - tags: row.tags ? row.tags.split(',') : [] - }; - } - - // 更新角色 - updateCharacter(id, updates) { - const fields = []; - const values = { id }; - - for (const [key, value] of Object.entries(updates)) { - if (key !== 'id' && key !== 'tags') { - fields.push(`${key} = @${key}`); - values[key] = value; - } - } - - fields.push('updated_at = @updated_at'); - values.updated_at = Date.now(); - - const stmt = this.db.prepare(` - UPDATE characters - SET ${fields.join(', ')} - WHERE id = @id - `); - - stmt.run(values); - return this.getCharacterById(id); - } - - // ========== 对话相关方法 ========== - - // 创建对话 - createConversation(conversationData) { - const stmt = this.db.prepare(` - INSERT INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) - VALUES (@id, @character_id, @title, @date, @affinity_change, @summary, @tags, @created_at, @updated_at) - `); - - const info = stmt.run({ - id: conversationData.id || this.generateId(), - character_id: conversationData.character_id, - title: conversationData.title || null, - date: conversationData.date || Date.now(), - affinity_change: conversationData.affinity_change || 0, - summary: conversationData.summary || null, - tags: conversationData.tags || null, - created_at: Date.now(), - updated_at: Date.now() - }); - - return this.getConversationById(conversationData.id || info.lastInsertRowid); - } - - // 获取角色的所有对话(带角色信息和消息数) - getConversationsByCharacter(characterId) { - const stmt = this.db.prepare(` - SELECT - c.*, - char.name as character_name, - char.avatar_color as character_avatar_color, - char.id as character_id, - COUNT(m.id) as message_count - FROM conversations c - INNER JOIN characters char ON c.character_id = char.id - LEFT JOIN messages m ON c.id = m.conversation_id - WHERE c.character_id = ? - GROUP BY c.id - ORDER BY c.created_at DESC - `); - - return stmt.all(characterId); - } - - // 获取单个对话 - getConversationById(id) { - const stmt = this.db.prepare('SELECT * FROM conversations WHERE id = ?'); - return stmt.get(id); - } - - // 更新对话 - updateConversation(id, updates) { - const fields = []; - const values = { id }; - - for (const [key, value] of Object.entries(updates)) { - if (key !== 'id') { - fields.push(`${key} = @${key}`); - values[key] = value; - } - } - - fields.push('updated_at = @updated_at'); - values.updated_at = Date.now(); - - const stmt = this.db.prepare(` - UPDATE conversations - SET ${fields.join(', ')} - WHERE id = @id - `); - - stmt.run(values); - return this.getConversationById(id); - } - - // ========== 消息相关方法 ========== - - // 创建消息 - createMessage(messageData) { - const stmt = this.db.prepare(` - INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) - VALUES (@id, @conversation_id, @sender, @content, @timestamp, @is_ai_generated) - `); - - const messageId = messageData.id || this.generateId(); - - stmt.run({ - id: messageId, - conversation_id: messageData.conversation_id, - sender: messageData.sender, // 'user' or 'character' - content: messageData.content, - timestamp: messageData.timestamp || Date.now(), - is_ai_generated: messageData.is_ai_generated ? 1 : 0 - }); - - return this.getMessageById(messageId); - } - - // 获取对话的所有消息 - getMessagesByConversation(conversationId) { - const stmt = this.db.prepare(` - SELECT * FROM messages - WHERE conversation_id = ? - ORDER BY timestamp ASC - `); - - return stmt.all(conversationId); - } - - // 获取单个消息 - getMessageById(id) { - const stmt = this.db.prepare('SELECT * FROM messages WHERE id = ?'); - return stmt.get(id); - } - - // 更新消息 - updateMessage(id, updates) { - const allowedFields = ['content']; - const updateFields = Object.keys(updates).filter(key => allowedFields.includes(key)); - - if (updateFields.length === 0) { - return this.getMessageById(id); - } - - const setClause = updateFields.map(field => `${field} = @${field}`).join(', '); - const stmt = this.db.prepare(` - UPDATE messages - SET ${setClause} - WHERE id = @id - `); - - const params = { id }; - updateFields.forEach(field => { - params[field] = updates[field]; - }); - - stmt.run(params); - return this.getMessageById(id); - } - - // ========== 删除方法 ========== - - // 删除对话(级联删除相关消息、AI分析、AI建议) - deleteConversation(conversationId) { - const stmt = this.db.prepare('DELETE FROM conversations WHERE id = ?'); - const info = stmt.run(conversationId); - return info.changes > 0; - } - - // 删除角色(级联删除相关对话、消息、标签关联、角色详情) - deleteCharacter(characterId) { - const stmt = this.db.prepare('DELETE FROM characters WHERE id = ?'); - const info = stmt.run(characterId); - return info.changes > 0; - } - - // ========== 标签相关方法 ========== - - // 创建标签 - createTag(tagData) { - const stmt = this.db.prepare(` - INSERT OR IGNORE INTO tags (id, name, color) - VALUES (@id, @name, @color) - `); - - const info = stmt.run({ - id: tagData.id || this.generateId(), - name: tagData.name, - color: tagData.color || 'primary' - }); - - return this.getTagById(tagData.id || info.lastInsertRowid); - } - - // 获取所有标签 - getAllTags() { - const stmt = this.db.prepare('SELECT * FROM tags ORDER BY name'); - return stmt.all(); - } - - // 获取单个标签 - getTagById(id) { - const stmt = this.db.prepare('SELECT * FROM tags WHERE id = ?'); - return stmt.get(id); - } - - // 为角色添加标签 - addTagToCharacter(characterId, tagId) { - const stmt = this.db.prepare(` - INSERT OR IGNORE INTO character_tags (character_id, tag_id) - VALUES (?, ?) - `); - - stmt.run(characterId, tagId); - } - - // ========== 统计相关方法 ========== - - // 获取对话总数 - getConversationCount() { - const stmt = this.db.prepare('SELECT COUNT(*) as count FROM conversations'); - return stmt.get().count; - } - - // 获取消息总数 - getMessageCount() { - const stmt = this.db.prepare('SELECT COUNT(*) as count FROM messages'); - return stmt.get().count; - } - - // 获取角色的对话统计 - getCharacterStats(characterId) { - const stmt = this.db.prepare(` - SELECT - COUNT(DISTINCT c.id) as conversation_count, - COUNT(m.id) as message_count, - MAX(c.date) as last_conversation_date - FROM conversations c - LEFT JOIN messages m ON c.id = m.conversation_id - WHERE c.character_id = ? - `); - - return stmt.get(characterId); - } - - // 获取统计数据 - getStatistics() { - const characterCount = this.db.prepare('SELECT COUNT(*) as count FROM characters').get().count; - const conversationCount = this.db.prepare('SELECT COUNT(*) as count FROM conversations').get().count; - const messageCount = this.db.prepare('SELECT COUNT(*) as count FROM messages').get().count; - - // 计算平均好感度 - const avgAffinity = this.db.prepare('SELECT AVG(affinity) as avg FROM characters').get().avg || 0; - - return { - characterCount, - conversationCount, - messageCount, - avgAffinity: Math.round(avgAffinity) - }; - } - - // 获取角色页面的统计数据 - getCharacterPageStatistics() { - // 总计攻略对象 - const characterCount = this.db.prepare('SELECT COUNT(*) as count FROM characters').get().count; - - // 活跃对话:两天内创建的新对话 - const twoDaysAgo = Date.now() - (2 * 24 * 60 * 60 * 1000); - const activeConversationCount = this.db.prepare(` - SELECT COUNT(*) as count - FROM conversations - WHERE created_at >= ? - `).get(twoDaysAgo).count; - - // 计算平均好感度 - const avgAffinity = this.db.prepare('SELECT AVG(affinity) as avg FROM characters').get().avg || 0; - - return { - characterCount, - activeConversationCount, - avgAffinity: Math.round(avgAffinity) - }; - } - - // 获取最近对话(带角色信息) - getRecentConversations(limit = 10) { - const stmt = this.db.prepare(` - SELECT - c.*, - char.name as character_name, - char.avatar_color as character_avatar_color, - char.id as character_id, - COUNT(m.id) as message_count - FROM conversations c - INNER JOIN characters char ON c.character_id = char.id - LEFT JOIN messages m ON c.id = m.conversation_id - GROUP BY c.id - ORDER BY c.updated_at DESC - LIMIT ? - `); - - return stmt.all(limit); - } - - // 获取所有对话(带角色信息) - getAllConversations() { - const stmt = this.db.prepare(` - SELECT - c.*, - char.name as character_name, - char.avatar_color as character_avatar_color, - char.id as character_id, - COUNT(m.id) as message_count - FROM conversations c - INNER JOIN characters char ON c.character_id = char.id - LEFT JOIN messages m ON c.id = m.conversation_id - GROUP BY c.id - ORDER BY c.created_at DESC - `); - - return stmt.all(); - } - - // 获取对话的消息 - getMessagesByConversation(conversationId) { - const stmt = this.db.prepare(` - SELECT * FROM messages - WHERE conversation_id = ? - ORDER BY timestamp ASC - `); - - return stmt.all(conversationId); - } - - // ========== AI分析相关方法 ========== - - // 获取对话的AI分析报告 - getConversationAnalysis(conversationId) { - try { - const stmt = this.db.prepare(` - SELECT * FROM ai_analysis - WHERE conversation_id = ? AND insight_type = 'analysis_report' - ORDER BY created_at DESC - LIMIT 1 - `); - const result = stmt.get(conversationId); - console.log(`[DB] getConversationAnalysis for ${conversationId}:`, result ? 'found' : 'not found'); - if (result) { - console.log(`[DB] Analysis report content:`, result.content); - } - return result || null; - } catch (error) { - console.error('Error getting conversation analysis:', error); - return null; - } - } - - // 获取对话的关键时刻回放 - getKeyMoments(conversationId) { - try { - const stmt = this.db.prepare(` - SELECT - a.*, - m.content as message_content, - m.timestamp as message_timestamp, - m.sender - FROM ai_analysis a - LEFT JOIN messages m ON a.message_id = m.id - WHERE a.conversation_id = ? AND a.insight_type = 'key_moment' - ORDER BY a.created_at ASC - `); - return stmt.all(conversationId) || []; - } catch (error) { - console.error('Error getting key moments:', error); - return []; - } - } - - // 获取对话的行动建议 - getActionSuggestions(conversationId) { - try { - const stmt = this.db.prepare(` - SELECT * FROM ai_suggestions - WHERE conversation_id = ? - ORDER BY created_at DESC - `); - return stmt.all(conversationId) || []; - } catch (error) { - console.error('Error getting action suggestions:', error); - return []; - } - } - - // 获取对话的完整AI分析数据 - getConversationAIData(conversationId) { - console.log(`[DB] Getting AI data for conversation: ${conversationId}`); - - // 获取分析报告 - const analysisReport = this.getConversationAnalysis(conversationId); - console.log(`[DB] Analysis report found:`, analysisReport ? 'yes' : 'no'); - - // 获取关键时刻 - const keyMoments = this.getKeyMoments(conversationId); - console.log(`[DB] Key moments found: ${keyMoments.length}`); - - // 获取行动建议 - const actionSuggestions = this.getActionSuggestions(conversationId); - console.log(`[DB] Action suggestions found: ${actionSuggestions.length}`); - - // 获取对话信息以获取角色ID - const conversation = this.getConversationById(conversationId); - - // 获取本轮对话的表现态度分析(从ai_analysis表获取) - let attitudeAnalysis = null; - try { - const attitudeStmt = this.db.prepare(` - SELECT content FROM ai_analysis - WHERE conversation_id = ? AND insight_type = 'attitude_analysis' - ORDER BY created_at DESC - LIMIT 1 - `); - const attitudeData = attitudeStmt.get(conversationId); - if (attitudeData && attitudeData.content) { - // 如果content是JSON,解析它;否则直接使用 - try { - const parsed = JSON.parse(attitudeData.content); - const affinityChange = parsed.affinityChange || conversation?.affinity_change || 0; - attitudeAnalysis = { - description: parsed.description || parsed.content || attitudeData.content, - affinityChange: affinityChange, - trend: parsed.trend || (affinityChange > 0 ? '上升' : affinityChange < 0 ? '下降' : '持平') - }; - } catch (e) { - // 如果不是JSON,直接使用字符串,从conversation获取affinity_change - const affinityChange = conversation?.affinity_change || 0; - attitudeAnalysis = { - description: attitudeData.content, - affinityChange: affinityChange, - trend: affinityChange > 0 ? '上升' : affinityChange < 0 ? '下降' : '持平' - }; - } - } else if (conversation) { - // 如果没有专门的attitude_analysis,使用conversation的affinity_change作为基础 - const affinityChange = conversation.affinity_change || 0; - attitudeAnalysis = { - description: '本轮对话中,对方表现积极,互动良好。', - affinityChange: affinityChange, - trend: affinityChange > 0 ? '上升' : affinityChange < 0 ? '下降' : '持平' - }; - } - } catch (error) { - console.error('Error getting attitude analysis:', error); - } - - // 解析分析报告 - let parsedReport = null; - if (analysisReport && analysisReport.content) { - try { - parsedReport = JSON.parse(analysisReport.content); - } catch (e) { - console.error('Failed to parse analysis report:', e); - } - } - - // 解析关键时刻评价 - const parsedKeyMoments = keyMoments.map(km => { - let evaluation = null; - if (km.content) { - try { - evaluation = JSON.parse(km.content); - } catch (e) { - evaluation = km.content; // 如果不是JSON,直接使用字符串 - } - } - return { - id: km.id, - timestamp: km.message_timestamp, - messageContent: km.message_content, - sender: km.sender, - evaluation: evaluation - }; - }); - - const result = { - analysisReport: parsedReport, - keyMoments: parsedKeyMoments, - attitudeAnalysis, - actionSuggestions: actionSuggestions.map(as => ({ - id: as.id, - title: as.title, - content: as.content, - tags: as.tags ? as.tags.split(',').map(t => t.trim()) : [] - })) - }; - - console.log(`[DB] Returning AI data:`, { - hasAnalysisReport: !!result.analysisReport, - keyMomentsCount: result.keyMoments.length, - hasAttitudeAnalysis: !!result.attitudeAnalysis, - actionSuggestionsCount: result.actionSuggestions.length - }); - - return result; - } - - // ========== 角色详情相关方法 ========== - - // 获取角色详情 - getCharacterDetails(characterId) { - try { - const stmt = this.db.prepare('SELECT * FROM character_details WHERE character_id = ?'); - const row = stmt.get(characterId); - - if (!row) { - // 如果没有详情记录,尝试从会话中生成 - return this.generateCharacterDetailsFromConversations(characterId); - } - - // 解析JSON字段 - return { - character_id: row.character_id, - profile: row.profile ? JSON.parse(row.profile) : null, - personality_traits: row.personality_traits ? JSON.parse(row.personality_traits) : null, - likes_dislikes: row.likes_dislikes ? JSON.parse(row.likes_dislikes) : null, - important_events: row.important_events ? JSON.parse(row.important_events) : null, - conversation_summary: row.conversation_summary, - custom_fields: row.custom_fields ? JSON.parse(row.custom_fields) : {}, - updated_at: row.updated_at - }; - } catch (error) { - console.error('Error getting character details:', error); - return null; - } - } - - // 从会话中生成角色详情 - generateCharacterDetailsFromConversations(characterId) { - try { - // 获取角色的所有对话 - const conversations = this.getConversationsByCharacter(characterId); - - if (conversations.length === 0) { - return { - character_id: characterId, - profile: null, - personality_traits: null, - likes_dislikes: null, - important_events: null, - conversation_summary: '暂无对话记录', - custom_fields: {}, - updated_at: Date.now() - }; - } - - // 收集所有消息 - const allMessages = []; - const allSummaries = []; - const allTags = []; - const affinityChanges = []; - - for (const conv of conversations) { - const messages = this.getMessagesByConversation(conv.id); - allMessages.push(...messages); - - if (conv.summary) { - allSummaries.push(conv.summary); - } - - if (conv.tags) { - allTags.push(...conv.tags.split(',').map(t => t.trim())); - } - - if (conv.affinity_change) { - affinityChanges.push(conv.affinity_change); - } - } - - // 提取角色消息(sender = 'character') - const characterMessages = allMessages - .filter(msg => msg.sender === 'character') - .map(msg => msg.content); - - // 生成性格特点(从消息中提取关键词和模式) - const personalityTraits = this.extractPersonalityTraits(characterMessages, allTags); - - // 生成喜好厌恶(从消息中提取) - const likesDislikes = this.extractLikesDislikes(characterMessages); - - // 生成重要事件(从对话标题和摘要中提取) - const importantEvents = this.extractImportantEvents(conversations); - - // 生成对话总结 - const conversationSummary = this.generateConversationSummary(conversations, allSummaries, affinityChanges); - - // 生成角色档案(基本信息) - const character = this.getCharacterById(characterId); - const profile = character ? { - name: character.name, - nickname: character.nickname, - relationship_label: character.relationship_label, - affinity: character.affinity, - tags: character.tags || [], - created_at: character.created_at, - notes: character.notes - } : null; - - const details = { - character_id: characterId, - profile: profile, - personality_traits: personalityTraits, - likes_dislikes: likesDislikes, - important_events: importantEvents, - conversation_summary: conversationSummary, - custom_fields: {}, - updated_at: Date.now() - }; - - // 保存到数据库 - this.saveCharacterDetails(characterId, details); - - return details; - } catch (error) { - console.error('Error generating character details:', error); - return null; - } - } - - // 提取性格特点 - extractPersonalityTraits(messages, tags) { - const traits = { - keywords: [], - descriptions: [] - }; - - // 从标签中提取 - if (tags && tags.length > 0) { - traits.keywords = [...new Set(tags)]; - } - - // 从消息中分析(简单关键词匹配) - const traitKeywords = { - '温柔': ['温柔', '体贴', '关心', '照顾'], - '活泼': ['开心', '快乐', '兴奋', '活泼', '活跃'], - '认真': ['认真', '负责', '仔细', '专注'], - '内向': ['安静', '内向', '害羞', '沉默'], - '外向': ['外向', '开朗', '健谈', '热情'], - '幽默': ['有趣', '幽默', '搞笑', '玩笑'], - '真诚': ['真诚', '诚实', '真实', '坦率'] - }; - - const foundTraits = new Set(); - const messageText = messages.join(' '); - - for (const [trait, keywords] of Object.entries(traitKeywords)) { - if (keywords.some(keyword => messageText.includes(keyword))) { - foundTraits.add(trait); - } - } - - traits.keywords = [...new Set([...traits.keywords, ...foundTraits])]; - - // 生成描述 - if (traits.keywords.length > 0) { - traits.descriptions = [ - `从对话中可以看出,${traits.keywords.slice(0, 3).join('、')}是主要特点。`, - `在互动中表现出${traits.keywords[0]}的一面。` - ]; - } - - return traits; - } - - // 提取喜好厌恶 - extractLikesDislikes(messages) { - const likes = []; - const dislikes = []; - - const messageText = messages.join(' '); - - // 简单的关键词匹配(实际应用中可以使用更复杂的NLP) - const likeKeywords = ['喜欢', '爱好', '感兴趣', '爱', '享受', '享受', '享受']; - const dislikeKeywords = ['不喜欢', '讨厌', '厌恶', '反感', '不感兴趣']; - - // 提取包含"喜欢"的句子片段 - const likePatterns = messageText.match(/喜欢[^,。!?]*/g) || []; - likePatterns.forEach(pattern => { - const cleaned = pattern.replace(/喜欢/g, '').trim(); - if (cleaned && cleaned.length < 20) { - likes.push(cleaned); - } - }); - - // 提取包含"不喜欢"的句子片段 - const dislikePatterns = messageText.match(/不(喜欢|感兴趣)[^,。!?]*/g) || []; - dislikePatterns.forEach(pattern => { - const cleaned = pattern.replace(/不(喜欢|感兴趣)/g, '').trim(); - if (cleaned && cleaned.length < 20) { - dislikes.push(cleaned); - } - }); - - return { - likes: [...new Set(likes)].slice(0, 10), // 最多10个 - dislikes: [...new Set(dislikes)].slice(0, 10) - }; - } - - // 提取重要事件 - extractImportantEvents(conversations) { - const events = []; - - conversations.forEach(conv => { - if (conv.title || conv.summary) { - events.push({ - title: conv.title || '对话', - summary: conv.summary || '', - date: conv.date, - affinity_change: conv.affinity_change || 0 - }); - } - }); - - // 按日期排序,最新的在前 - events.sort((a, b) => b.date - a.date); - - return events.slice(0, 10); // 最多10个重要事件 - } - - // 生成对话总结 - generateConversationSummary(conversations, summaries, affinityChanges) { - const totalConversations = conversations.length; - const totalAffinityChange = affinityChanges.reduce((sum, change) => sum + change, 0); - const avgAffinityChange = affinityChanges.length > 0 - ? Math.round(totalAffinityChange / affinityChanges.length) - : 0; - - let summary = `共进行了 ${totalConversations} 次对话。`; - - if (summaries.length > 0) { - summary += `主要话题包括:${summaries.slice(0, 3).join('、')}。`; - } - - if (totalAffinityChange !== 0) { - const trend = totalAffinityChange > 0 ? '上升' : '下降'; - summary += `好感度总体${trend}了 ${Math.abs(totalAffinityChange)} 点。`; - } - - return summary; - } - - // 保存角色详情 - saveCharacterDetails(characterId, details) { - try { - const stmt = this.db.prepare(` - INSERT OR REPLACE INTO character_details - (character_id, profile, personality_traits, likes_dislikes, important_events, conversation_summary, custom_fields, updated_at) - VALUES (@character_id, @profile, @personality_traits, @likes_dislikes, @important_events, @conversation_summary, @custom_fields, @updated_at) - `); - - stmt.run({ - character_id: characterId, - profile: details.profile ? JSON.stringify(details.profile) : null, - personality_traits: details.personality_traits ? JSON.stringify(details.personality_traits) : null, - likes_dislikes: details.likes_dislikes ? JSON.stringify(details.likes_dislikes) : null, - important_events: details.important_events ? JSON.stringify(details.important_events) : null, - conversation_summary: details.conversation_summary || null, - custom_fields: details.custom_fields ? JSON.stringify(details.custom_fields) : '{}', - updated_at: details.updated_at || Date.now() - }); - - return true; - } catch (error) { - console.error('Error saving character details:', error); - return false; - } - } - - // 更新角色详情的自定义字段 - updateCharacterDetailsCustomFields(characterId, customFields) { - try { - const currentDetails = this.getCharacterDetails(characterId); - if (!currentDetails) { - return false; - } - - const updatedCustomFields = { - ...(currentDetails.custom_fields || {}), - ...customFields - }; - - const stmt = this.db.prepare(` - UPDATE character_details - SET custom_fields = @custom_fields, updated_at = @updated_at - WHERE character_id = @character_id - `); - - stmt.run({ - character_id: characterId, - custom_fields: JSON.stringify(updatedCustomFields), - updated_at: Date.now() - }); - - return true; - } catch (error) { - console.error('Error updating custom fields:', error); - return false; - } - } - - // ========== 工具方法 ========== - - // 生成ID - generateId() { - return Date.now().toString(36) + Math.random().toString(36).substr(2); - } - - // 批量插入示例数据(从SQL文件加载) - seedSampleData() { - console.log('Seeding sample data...'); - - // 检查对话数据是否存在 - const conversationCount = this.db.prepare('SELECT COUNT(*) as count FROM conversations').get().count; - const characterCount = this.db.prepare('SELECT COUNT(*) as count FROM characters').get().count; - const aiAnalysisCount = this.db.prepare('SELECT COUNT(*) as count FROM ai_analysis').get().count; - - console.log(`Current database state: ${characterCount} characters, ${conversationCount} conversations, ${aiAnalysisCount} AI analyses`); - - // 如果对话数据已存在,检查是否需要插入AI分析数据 - if (conversationCount > 0) { - // 如果AI分析数据不存在,只插入AI分析相关的数据 - if (aiAnalysisCount === 0) { - console.log('Conversation data exists but AI analysis data missing, inserting AI analysis data only...'); - this.seedAIDataOnly(); - } else { - console.log(`Conversation data already exists (${aiAnalysisCount} AI analyses found), skipping seed...`); - // 即使有数据,也检查一下是否有分析报告数据 - const reportCount = this.db.prepare('SELECT COUNT(*) as count FROM ai_analysis WHERE insight_type = ?').get('analysis_report').count; - console.log(`Found ${reportCount} analysis reports in database`); - } - return; - } - - // 如果没有角色数据,需要先插入角色 - if (characterCount === 0) { - console.log('No characters found, will insert all data including characters'); - } else { - console.log('Characters exist, will only insert conversations and messages'); - } - - // 如果角色数据不存在,需要先插入角色数据 - const needCharacters = characterCount === 0; - - try { - // 读取并执行SQL种子文件 - const seedPath = path.join(__dirname, 'seed.sql'); - if (fs.existsSync(seedPath)) { - const seedSQL = fs.readFileSync(seedPath, 'utf-8'); - - // 改进SQL语句分割:先移除注释行,然后按分号分割 - const lines = seedSQL.split('\n'); - let cleanedLines = []; - let inMultiLineStatement = false; - let currentStatement = ''; - - for (let i = 0; i < lines.length; i++) { - let line = lines[i].trim(); - - // 跳过空行和纯注释行 - if (!line || line.startsWith('--')) { - continue; - } - - // 移除行内注释(-- 后面的内容) - const commentIndex = line.indexOf('--'); - if (commentIndex >= 0) { - line = line.substring(0, commentIndex).trim(); - if (!line) continue; - } - - // 累积到当前语句 - currentStatement += (currentStatement ? ' ' : '') + line; - - // 如果行以分号结尾,说明语句完整 - if (line.endsWith(';')) { - const statement = currentStatement.slice(0, -1).trim(); // 移除末尾的分号 - if (statement) { - cleanedLines.push(statement); - } - currentStatement = ''; - } - } - - // 处理最后可能没有分号的语句 - if (currentStatement.trim()) { - cleanedLines.push(currentStatement.trim()); - } - - console.log(`Found ${cleanedLines.length} SQL statements to execute`); - - const transaction = this.db.transaction(() => { - for (let i = 0; i < cleanedLines.length; i++) { - const statement = cleanedLines[i]; - - // 如果角色数据已存在,跳过角色相关的INSERT语句 - if (!needCharacters && statement.toUpperCase().includes('INSERT') && - (statement.includes('INSERT INTO characters') || - statement.includes('INSERT INTO tags') || - statement.includes('INSERT INTO character_tags'))) { - console.log(`Skipping statement ${i + 1}: character data (already exists)`); - continue; - } - - try { - // 执行SQL语句(添加分号) - this.db.exec(statement + ';'); - if (statement.includes('INSERT INTO conversations')) { - console.log(`✓ Executed conversation INSERT statement ${i + 1}`); - } - } catch (err) { - // 忽略重复插入的错误(INSERT OR IGNORE 会处理) - if (err.message.includes('UNIQUE constraint') || err.message.includes('already exists')) { - console.log(`Statement ${i + 1}: skipped (duplicate)`); - } else { - console.error(`Error executing statement ${i + 1}:`, err.message); - console.error('Statement preview:', statement.substring(0, 150) + '...'); - // 继续执行其他语句,不中断 - } - } - } - }); - - transaction(); - console.log('Sample data seeded successfully from SQL file'); - - // 验证数据插入 - const finalConvCount = this.db.prepare('SELECT COUNT(*) as count FROM conversations').get().count; - const finalMsgCount = this.db.prepare('SELECT COUNT(*) as count FROM messages').get().count; - const finalCharCount = this.db.prepare('SELECT COUNT(*) as count FROM characters').get().count; - console.log(`Data verification: ${finalCharCount} characters, ${finalConvCount} conversations, ${finalMsgCount} messages`); - - if (finalConvCount === 0) { - console.warn('⚠️ Warning: No conversations were inserted!'); - console.warn('This might indicate a SQL parsing or execution issue.'); - } else { - console.log('✅ Data seeding completed successfully'); - } - } else { - console.warn('Seed SQL file not found, skipping data seeding'); - } - } catch (error) { - console.error('Error seeding sample data:', error); - console.error(error.stack); - // 不抛出错误,允许应用继续运行 - } - } - - // ========== LLM配置相关方法 ========== - - // 创建或更新LLM配置 - saveLLMConfig(configData) { - const now = Date.now(); - - // 如果设置为默认配置,先取消其他默认配置 - if (configData.is_default) { - const clearDefaultStmt = this.db.prepare('UPDATE llm_configs SET is_default = 0 WHERE is_default = 1'); - clearDefaultStmt.run(); - } - - // 检查是否已存在(通过id或name) - const existingStmt = this.db.prepare('SELECT * FROM llm_configs WHERE id = ? OR name = ?'); - const existing = existingStmt.get(configData.id || '', configData.name || ''); - - if (existing) { - // 更新现有配置 - const updateStmt = this.db.prepare(` - UPDATE llm_configs - SET name = @name, - provider = @provider, - api_key = @api_key, - base_url = @base_url, - is_default = @is_default, - updated_at = @updated_at - WHERE id = @id - `); - - updateStmt.run({ - id: existing.id, - name: configData.name || existing.name, - provider: configData.provider || existing.provider || 'openai', - api_key: configData.api_key || existing.api_key, - base_url: configData.base_url !== undefined ? configData.base_url : existing.base_url, - is_default: configData.is_default !== undefined ? (configData.is_default ? 1 : 0) : existing.is_default, - updated_at: now - }); - - return this.getLLMConfigById(existing.id); - } else { - // 创建新配置 - const insertStmt = this.db.prepare(` - INSERT INTO llm_configs (id, name, provider, api_key, base_url, is_default, created_at, updated_at) - VALUES (@id, @name, @provider, @api_key, @base_url, @is_default, @created_at, @updated_at) - `); - - const id = configData.id || this.generateId(); - insertStmt.run({ - id, - name: configData.name || '默认配置', - provider: configData.provider || 'openai', - api_key: configData.api_key, - base_url: configData.base_url || null, - is_default: configData.is_default ? 1 : 0, - created_at: now, - updated_at: now - }); - - return this.getLLMConfigById(id); - } - } - - // 获取所有LLM配置 - getAllLLMConfigs() { - const stmt = this.db.prepare('SELECT * FROM llm_configs ORDER BY is_default DESC, updated_at DESC'); - return stmt.all(); - } - - // 获取默认LLM配置 - getDefaultLLMConfig() { - const stmt = this.db.prepare('SELECT * FROM llm_configs WHERE is_default = 1 LIMIT 1'); - return stmt.get(); - } - - // 根据ID获取LLM配置 - getLLMConfigById(id) { - const stmt = this.db.prepare('SELECT * FROM llm_configs WHERE id = ?'); - return stmt.get(id); - } - - // 删除LLM配置 - deleteLLMConfig(id) { - const stmt = this.db.prepare('DELETE FROM llm_configs WHERE id = ?'); - return stmt.run(id); - } - - // 设置默认LLM配置 - setDefaultLLMConfig(id) { - // 先取消所有默认配置 - const clearDefaultStmt = this.db.prepare('UPDATE llm_configs SET is_default = 0 WHERE is_default = 1'); - clearDefaultStmt.run(); - - // 设置新的默认配置 - const setDefaultStmt = this.db.prepare('UPDATE llm_configs SET is_default = 1, updated_at = ? WHERE id = ?'); - setDefaultStmt.run(Date.now(), id); - - return this.getLLMConfigById(id); - } - - // 测试LLM连接(ping) - async testLLMConnection(configData) { - try { - // 动态导入openai(因为它是可选依赖) - const { default: OpenAI } = await import('openai'); - - const config = { - apiKey: configData.api_key, - }; - - // 如果提供了base_url,使用自定义URL - if (configData.base_url) { - config.baseURL = configData.base_url; - } - - const client = new OpenAI(config); - - // 使用models.list()来测试连接(这是一个轻量级的API调用) - await client.models.list(); - - return { success: true, message: '连接成功' }; - } catch (error) { - console.error('LLM connection test failed:', error); - - // 提供更友好的错误信息 - let errorMessage = '连接失败'; - if (error.status === 401) { - errorMessage = 'API密钥无效'; - } else if (error.status === 404) { - errorMessage = 'API端点不存在'; - } else if (error.message) { - errorMessage = error.message; - } - - return { success: false, message: errorMessage, error: error.message }; - } - } - - // 只插入AI分析数据(当对话数据已存在但AI分析数据缺失时) - seedAIDataOnly() { - console.log('Seeding AI analysis data only...'); - - try { - const seedPath = path.join(__dirname, 'seed.sql'); - if (!fs.existsSync(seedPath)) { - console.warn('Seed SQL file not found, skipping AI data seeding'); - return; - } - - const seedSQL = fs.readFileSync(seedPath, 'utf-8'); - const lines = seedSQL.split('\n'); - let cleanedLines = []; - let currentStatement = ''; - - for (let i = 0; i < lines.length; i++) { - let line = lines[i]; - const originalLine = line; - - // 跳过空行 - if (!line.trim()) { - continue; - } - - // 跳过纯注释行(整行都是注释) - // 但如果currentStatement已经有内容,说明这是多行语句中的注释,应该跳过但不清空currentStatement - if (line.trim().startsWith('--')) { - continue; // 跳过注释行,但保留currentStatement - } - - // 移除行内注释(但保留SQL代码) - const commentIndex = line.indexOf('--'); - if (commentIndex >= 0) { - // 检查--是否在字符串内(简单检查) - const beforeComment = line.substring(0, commentIndex); - const singleQuotes = (beforeComment.match(/'/g) || []).length; - // 如果单引号数量是偶数,说明--不在字符串内,可以移除注释 - if (singleQuotes % 2 === 0) { - line = line.substring(0, commentIndex).trim(); - if (!line) continue; - } - } - - line = line.trim(); - if (!line) continue; - - // 累积到当前语句 - if (currentStatement) { - currentStatement += ' ' + line; - } else { - currentStatement = line; - } - - // 如果行以分号结尾,说明语句完整 - if (line.endsWith(';')) { - const statement = currentStatement.slice(0, -1).trim(); // 移除末尾的分号 - if (statement) { - // 只处理AI分析相关的INSERT语句 - const upperStatement = statement.toUpperCase(); - const isAIAnalysis = upperStatement.includes('INSERT') && upperStatement.includes('AI_ANALYSIS'); - const isAISuggestions = upperStatement.includes('INSERT') && upperStatement.includes('AI_SUGGESTIONS'); - - if (isAIAnalysis || isAISuggestions) { - cleanedLines.push(statement); - console.log(`[SQL Parser] Found AI statement (line ${i + 1}): ${statement.substring(0, 150)}...`); - } - } - currentStatement = ''; - } - } - - if (currentStatement.trim()) { - const statement = currentStatement.trim(); - const upperStatement = statement.toUpperCase(); - if (upperStatement.includes('INSERT') && - (upperStatement.includes('INSERT INTO AI_ANALYSIS') || - upperStatement.includes('INSERT INTO AI_SUGGESTIONS'))) { - cleanedLines.push(statement); - console.log(`[SQL Parser] Found AI statement (final): ${statement.substring(0, 100)}...`); - } - } - - console.log(`Found ${cleanedLines.length} AI-related SQL statements to execute`); - - // 如果没找到,打印一些调试信息 - if (cleanedLines.length === 0) { - console.log('[SQL Parser] Debug: Checking seed.sql content...'); - const seedSQL = fs.readFileSync(path.join(__dirname, 'seed.sql'), 'utf-8'); - const hasAIAnalysis = seedSQL.includes('INSERT') && seedSQL.includes('ai_analysis'); - const hasAISuggestions = seedSQL.includes('INSERT') && seedSQL.includes('ai_suggestions'); - console.log(`[SQL Parser] seed.sql contains ai_analysis: ${hasAIAnalysis}, ai_suggestions: ${hasAISuggestions}`); - - // 尝试直接查找包含ai_analysis的行 - const lines = seedSQL.split('\n'); - let aiAnalysisLines = 0; - let aiSuggestionLines = 0; - for (let i = 0; i < lines.length; i++) { - if (lines[i].includes('ai_analysis')) aiAnalysisLines++; - if (lines[i].includes('ai_suggestions')) aiSuggestionLines++; - } - console.log(`[SQL Parser] Lines containing ai_analysis: ${aiAnalysisLines}, ai_suggestions: ${aiSuggestionLines}`); - } - - if (cleanedLines.length === 0) { - console.log('No AI analysis data found in seed file'); - return; - } - - // 打印前几个语句用于调试 - if (cleanedLines.length > 0) { - console.log('First statement preview:', cleanedLines[0].substring(0, 200) + '...'); - } - - const transaction = this.db.transaction(() => { - let successCount = 0; - let errorCount = 0; - for (let i = 0; i < cleanedLines.length; i++) { - const statement = cleanedLines[i]; - try { - this.db.exec(statement + ';'); - successCount++; - if (statement.includes('INSERT INTO ai_analysis')) { - console.log(`✓ Executed AI analysis INSERT statement ${i + 1}/${cleanedLines.length}`); - } else if (statement.includes('INSERT INTO ai_suggestions')) { - console.log(`✓ Executed AI suggestion INSERT statement ${i + 1}/${cleanedLines.length}`); - } - } catch (err) { - errorCount++; - if (err.message.includes('UNIQUE constraint') || err.message.includes('already exists')) { - console.log(`Statement ${i + 1}: skipped (duplicate)`); - } else { - console.error(`Error executing AI statement ${i + 1}:`, err.message); - console.error('Statement preview:', statement.substring(0, 200) + '...'); - } - } - } - console.log(`AI data insertion summary: ${successCount} succeeded, ${errorCount} errors`); - }); - - transaction(); - - // 验证数据插入 - const finalAICount = this.db.prepare('SELECT COUNT(*) as count FROM ai_analysis').get().count; - const finalSuggestionCount = this.db.prepare('SELECT COUNT(*) as count FROM ai_suggestions').get().count; - console.log(`AI data verification: ${finalAICount} AI analyses, ${finalSuggestionCount} AI suggestions`); - console.log('✅ AI analysis data seeding completed successfully'); - - } catch (error) { - console.error('Error seeding AI analysis data:', error); - console.error(error.stack); - } - } - - // ========== ASR(语音识别)相关方法 ========== - - // 获取所有音频源配置 - getAudioSources() { - const stmt = this.db.prepare('SELECT * FROM audio_sources ORDER BY created_at ASC'); - return stmt.all(); - } - - // 获取单个音频源配置 - getAudioSourceById(id) { - const stmt = this.db.prepare('SELECT * FROM audio_sources WHERE id = ?'); - return stmt.get(id); - } - - // 创建音频源配置 - createAudioSource(sourceData) { - const stmt = this.db.prepare(` - INSERT INTO audio_sources (id, name, is_active, device_id, device_name, created_at, updated_at) - VALUES (@id, @name, @is_active, @device_id, @device_name, @created_at, @updated_at) - `); - - const info = stmt.run({ - id: sourceData.id || this.generateId(), - name: sourceData.name, - is_active: sourceData.is_active !== undefined ? sourceData.is_active : 0, - device_id: sourceData.device_id || null, - device_name: sourceData.device_name || null, - created_at: Date.now(), - updated_at: Date.now() - }); - - return this.getAudioSourceById(sourceData.id || info.lastInsertRowid); - } - - // 更新音频源配置 - updateAudioSource(id, updates) { - const fields = []; - const values = { id }; - - if (updates.name !== undefined) { - fields.push('name = @name'); - values.name = updates.name; - } - if (updates.is_active !== undefined) { - fields.push('is_active = @is_active'); - values.is_active = updates.is_active; - } - if (updates.device_id !== undefined) { - fields.push('device_id = @device_id'); - values.device_id = updates.device_id; - } - if (updates.device_name !== undefined) { - fields.push('device_name = @device_name'); - values.device_name = updates.device_name; - } - - fields.push('updated_at = @updated_at'); - values.updated_at = Date.now(); - - const stmt = this.db.prepare(` - UPDATE audio_sources - SET ${fields.join(', ')} - WHERE id = @id - `); - - const info = stmt.run(values); - return info.changes > 0 ? this.getAudioSourceById(id) : null; - } - - // 获取所有 ASR 配置 - getASRConfigs() { - const stmt = this.db.prepare('SELECT * FROM asr_configs ORDER BY created_at ASC'); - return stmt.all(); - } - - // 获取默认 ASR 配置 - getDefaultASRConfig() { - const stmt = this.db.prepare('SELECT * FROM asr_configs WHERE is_default = 1 LIMIT 1'); - return stmt.get(); - } - - // 获取指定 ASR 配置 - getASRConfigById(id) { - const stmt = this.db.prepare('SELECT * FROM asr_configs WHERE id = ?'); - return stmt.get(id); - } - - // 创建 ASR 配置 - createASRConfig(configData) { - const stmt = this.db.prepare(` - INSERT INTO asr_configs ( - id, model_name, language, enable_vad, sentence_pause_threshold, - retain_audio_files, audio_retention_days, audio_storage_path, - is_default, created_at, updated_at - ) - VALUES ( - @id, @model_name, @language, @enable_vad, @sentence_pause_threshold, - @retain_audio_files, @audio_retention_days, @audio_storage_path, - @is_default, @created_at, @updated_at - ) - `); - - const info = stmt.run({ - id: configData.id || this.generateId(), - model_name: configData.model_name || 'whisper-base', - language: configData.language || 'zh', - enable_vad: configData.enable_vad !== undefined ? configData.enable_vad : 1, - sentence_pause_threshold: configData.sentence_pause_threshold || 1.0, - retain_audio_files: configData.retain_audio_files !== undefined ? configData.retain_audio_files : 0, - audio_retention_days: configData.audio_retention_days || 30, - audio_storage_path: configData.audio_storage_path || null, - is_default: configData.is_default !== undefined ? configData.is_default : 0, - created_at: Date.now(), - updated_at: Date.now() - }); - - return this.getASRConfigById(configData.id || info.lastInsertRowid); - } - - // 更新 ASR 配置 - updateASRConfig(id, updates) { - const fields = []; - const values = { id }; - - if (updates.model_name !== undefined) { - fields.push('model_name = @model_name'); - values.model_name = updates.model_name; - } - if (updates.language !== undefined) { - fields.push('language = @language'); - values.language = updates.language; - } - if (updates.enable_vad !== undefined) { - fields.push('enable_vad = @enable_vad'); - values.enable_vad = updates.enable_vad; - } - if (updates.sentence_pause_threshold !== undefined) { - fields.push('sentence_pause_threshold = @sentence_pause_threshold'); - values.sentence_pause_threshold = updates.sentence_pause_threshold; - } - if (updates.retain_audio_files !== undefined) { - fields.push('retain_audio_files = @retain_audio_files'); - values.retain_audio_files = updates.retain_audio_files; - } - if (updates.audio_retention_days !== undefined) { - fields.push('audio_retention_days = @audio_retention_days'); - values.audio_retention_days = updates.audio_retention_days; - } - if (updates.audio_storage_path !== undefined) { - fields.push('audio_storage_path = @audio_storage_path'); - values.audio_storage_path = updates.audio_storage_path; - } - - fields.push('updated_at = @updated_at'); - values.updated_at = Date.now(); - - const stmt = this.db.prepare(` - UPDATE asr_configs - SET ${fields.join(', ')} - WHERE id = @id - `); - - const info = stmt.run(values); - return info.changes > 0 ? this.getASRConfigById(id) : null; - } - - // 设置默认 ASR 配置 - setDefaultASRConfig(id) { - const transaction = this.db.transaction(() => { - // 先将所有配置设为非默认 - this.db.prepare('UPDATE asr_configs SET is_default = 0').run(); - // 然后设置指定配置为默认 - const stmt = this.db.prepare('UPDATE asr_configs SET is_default = 1 WHERE id = ?'); - return stmt.run(id); - }); - - const info = transaction(); - return info.changes > 0; - } - - // 保存语音识别记录 - saveSpeechRecord(recordData) { - // 验证外键约束:检查对话是否存在 - if (recordData.conversation_id) { - const conversation = this.getConversationById(recordData.conversation_id); - if (!conversation) { - throw new Error(`Conversation not found: ${recordData.conversation_id}. Cannot save speech record.`); - } - } else { - throw new Error('conversation_id is required for speech record'); - } - - // 验证外键约束:检查音频源是否存在,如果不存在则自动创建 - let audioSource = this.getAudioSourceById(recordData.source_id); - if (!audioSource) { - console.warn(`Audio source not found: ${recordData.source_id}, creating it automatically...`); - // 自动创建音频源 - audioSource = this.createAudioSource({ - id: recordData.source_id, - name: recordData.source_id === 'speaker1' ? 'Speaker 1' : recordData.source_id === 'speaker2' ? 'Speaker 2' : `Audio Source ${recordData.source_id}`, - is_active: 1, - device_id: null, - device_name: null - }); - console.log(`Auto-created audio source: ${recordData.source_id}`); - } - - // 生成或使用提供的ID - const recordId = recordData.id || this.generateId(); - - const stmt = this.db.prepare(` - INSERT INTO speech_recognition_records ( - id, conversation_id, source_id, message_id, - audio_data, audio_file_path, audio_duration, - recognized_text, confidence, start_time, end_time, - status, error_message, created_at, updated_at - ) - VALUES ( - @id, @conversation_id, @source_id, @message_id, - @audio_data, @audio_file_path, @audio_duration, - @recognized_text, @confidence, @start_time, @end_time, - @status, @error_message, @created_at, @updated_at - ) - `); - - const info = stmt.run({ - id: recordId, - conversation_id: recordData.conversation_id, - source_id: recordData.source_id, - message_id: recordData.message_id || null, - audio_data: recordData.audio_data || null, - audio_file_path: recordData.audio_file_path || null, - audio_duration: recordData.audio_duration || null, - recognized_text: recordData.recognized_text || null, - confidence: recordData.confidence || null, - start_time: recordData.start_time, - end_time: recordData.end_time || null, - status: recordData.status || 'recording', - error_message: recordData.error_message || null, - created_at: Date.now(), - updated_at: Date.now() - }); - - return this.getSpeechRecordById(recordId); - } - - // 获取语音识别记录 - getSpeechRecordById(id) { - const stmt = this.db.prepare(` - SELECT sr.*, asrc.name as source_name - FROM speech_recognition_records sr - LEFT JOIN audio_sources asrc ON sr.source_id = asrc.id - WHERE sr.id = ? - `); - return stmt.get(id); - } - - // 更新语音识别记录 - updateSpeechRecord(id, updates) { - const fields = []; - const values = { id }; - - if (updates.message_id !== undefined) { - fields.push('message_id = @message_id'); - values.message_id = updates.message_id; - } - if (updates.audio_data !== undefined) { - fields.push('audio_data = @audio_data'); - values.audio_data = updates.audio_data; - } - if (updates.audio_file_path !== undefined) { - fields.push('audio_file_path = @audio_file_path'); - values.audio_file_path = updates.audio_file_path; - } - if (updates.audio_duration !== undefined) { - fields.push('audio_duration = @audio_duration'); - values.audio_duration = updates.audio_duration; - } - if (updates.recognized_text !== undefined) { - fields.push('recognized_text = @recognized_text'); - values.recognized_text = updates.recognized_text; - } - if (updates.confidence !== undefined) { - fields.push('confidence = @confidence'); - values.confidence = updates.confidence; - } - if (updates.end_time !== undefined) { - fields.push('end_time = @end_time'); - values.end_time = updates.end_time; - } - if (updates.status !== undefined) { - fields.push('status = @status'); - values.status = updates.status; - } - if (updates.error_message !== undefined) { - fields.push('error_message = @error_message'); - values.error_message = updates.error_message; - } - - fields.push('updated_at = @updated_at'); - values.updated_at = Date.now(); - - const stmt = this.db.prepare(` - UPDATE speech_recognition_records - SET ${fields.join(', ')} - WHERE id = @id - `); - - const info = stmt.run(values); - return info.changes > 0 ? this.getSpeechRecordById(id) : null; - } - - // 获取对话的语音识别记录 - getSpeechRecordsByConversation(conversationId) { - const stmt = this.db.prepare(` - SELECT sr.*, asrc.name as source_name - FROM speech_recognition_records sr - LEFT JOIN audio_sources asrc ON sr.source_id = asrc.id - WHERE sr.conversation_id = ? - ORDER BY sr.start_time ASC - `); - return stmt.all(conversationId); - } - - // 删除过期的语音识别记录(清理音频文件) - cleanupExpiredAudioFiles(retentionDays) { - const cutoffTime = Date.now() - (retentionDays * 24 * 60 * 60 * 1000); - - const stmt = this.db.prepare(` - SELECT id, audio_file_path - FROM speech_recognition_records - WHERE audio_file_path IS NOT NULL - AND created_at < ? - `); - - const expiredRecords = stmt.all(cutoffTime); - - // 删除文件和数据库记录 - const deleteStmt = this.db.prepare(` - UPDATE speech_recognition_records - SET audio_file_path = NULL, audio_data = NULL - WHERE id = ? - `); - - let deletedCount = 0; - for (const record of expiredRecords) { - if (record.audio_file_path && fs.existsSync(record.audio_file_path)) { - try { - fs.unlinkSync(record.audio_file_path); - deleteStmt.run(record.id); - deletedCount++; - } catch (err) { - console.error(`Error deleting audio file ${record.audio_file_path}:`, err); - } - } - } - - return deletedCount; - } - - // 初始化默认 ASR 配置(如果没有配置的话) - seedDefaultASRConfig() { - try { - const count = this.db.prepare('SELECT COUNT(*) as count FROM asr_configs').get().count; - - if (count === 0) { - console.log('No ASR config found, creating default config...'); - - const defaultConfig = { - model_name: 'funasr-paraformer', - language: 'zh', - enable_vad: 1, - sentence_pause_threshold: 1.0, - retain_audio_files: 0, - audio_retention_days: 30, - audio_storage_path: null, - is_default: 1 - }; - - const config = this.createASRConfig(defaultConfig); - console.log('Default ASR config created:', config); - return config; - } else { - console.log(`ASR configs already exist (${count} configs found), skipping default config creation`); - return null; - } - } catch (error) { - console.error('Error seeding default ASR config:', error); - return null; - } - } - - // 修复 ASR 配置(迁移旧的/错误的模型名称) - fixASRConfig() { - try { - // 将 'base' 或旧的 whisper.cpp 模型名称更新为 'medium' - const stmt = this.db.prepare(` - UPDATE asr_configs - SET model_name = 'medium', updated_at = ? - WHERE model_name = 'base' OR model_name LIKE 'ggml%' - `); - - const info = stmt.run(Date.now()); - - if (info.changes > 0) { - console.log(`Migrated ${info.changes} ASR configs to 'medium' model`); - } - } catch (error) { - console.error('Error fixing ASR config:', error); - } - } - - // 初始化默认音频源 - seedDefaultAudioSources() { - try { - // 检查并创建 speaker1 - let speaker1 = this.getAudioSourceById('speaker1'); - if (!speaker1) { - console.log('Creating default audio source: speaker1'); - speaker1 = this.createAudioSource({ - id: 'speaker1', - name: 'Speaker 1', - is_active: 1, - device_id: null, - device_name: null - }); - console.log('Default audio source speaker1 created:', speaker1); - } - - // 检查并创建 speaker2 - let speaker2 = this.getAudioSourceById('speaker2'); - if (!speaker2) { - console.log('Creating default audio source: speaker2'); - speaker2 = this.createAudioSource({ - id: 'speaker2', - name: 'Speaker 2', - is_active: 0, - device_id: null, - device_name: null - }); - console.log('Default audio source speaker2 created:', speaker2); - } - - return { speaker1, speaker2 }; - } catch (error) { - console.error('Error seeding default audio sources:', error); - return null; - } - } -} - -export default DatabaseManager; +export { default } from './modules/index.js'; \ No newline at end of file diff --git a/desktop/src/db/modules/ai-analysis.js b/desktop/src/db/modules/ai-analysis.js new file mode 100644 index 0000000..8065417 --- /dev/null +++ b/desktop/src/db/modules/ai-analysis.js @@ -0,0 +1,465 @@ +export default function AIAnalysisManager(BaseClass) { + return class extends BaseClass { + ensureSuggestionDecisionSchema() { + if (this._suggestionDecisionSchemaEnsured) return; + + // 1) 确保新表存在(即使老库里没有) + this.db.exec(` + CREATE TABLE IF NOT EXISTS decision_points ( + id TEXT PRIMARY KEY, + conversation_id TEXT NOT NULL, + anchor_message_id TEXT, + created_at INTEGER NOT NULL, + FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE, + FOREIGN KEY (anchor_message_id) REFERENCES messages(id) ON DELETE SET NULL + ); + + CREATE TABLE IF NOT EXISTS suggestion_batches ( + id TEXT PRIMARY KEY, + decision_point_id TEXT NOT NULL, + trigger TEXT, + reason TEXT, + created_at INTEGER NOT NULL, + FOREIGN KEY (decision_point_id) REFERENCES decision_points(id) ON DELETE CASCADE + ); + + CREATE INDEX IF NOT EXISTS idx_decision_points_conversation_id ON decision_points(conversation_id); + CREATE INDEX IF NOT EXISTS idx_suggestion_batches_decision_point_id ON suggestion_batches(decision_point_id); + `); + + // 2) 迁移 ai_suggestions:需要新增列,并删除 is_used(SQLite 需重建表) + const columns = this.db.prepare('PRAGMA table_info(ai_suggestions)').all(); + if (columns && columns.length > 0) { + const names = columns.map((c) => c.name); + const hasIsUsed = names.includes('is_used'); + const hasDecisionPointId = names.includes('decision_point_id'); + const hasBatchId = names.includes('batch_id'); + const hasSuggestionIndex = names.includes('suggestion_index'); + const hasIsSelected = names.includes('is_selected'); + const hasSelectedAt = names.includes('selected_at'); + + const needsRebuild = + hasIsUsed || !hasDecisionPointId || !hasBatchId || !hasSuggestionIndex; + + if (needsRebuild) { + const transaction = this.db.transaction(() => { + this.db.prepare('ALTER TABLE ai_suggestions RENAME TO ai_suggestions_backup').run(); + + this.db.exec(` + CREATE TABLE IF NOT EXISTS ai_suggestions ( + id TEXT PRIMARY KEY, + conversation_id TEXT NOT NULL, + message_id TEXT, + decision_point_id TEXT, + batch_id TEXT, + suggestion_index INTEGER, + title TEXT NOT NULL, + content TEXT NOT NULL, + affinity_prediction INTEGER, + tags TEXT, + is_selected INTEGER DEFAULT 0, + selected_at INTEGER, + created_at INTEGER NOT NULL, + FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE, + FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE + ); + `); + + const backupColumns = this.db.prepare('PRAGMA table_info(ai_suggestions_backup)').all(); + const backupNames = backupColumns.map((c) => c.name); + const hasAffinity = backupNames.includes('affinity_prediction'); + const hasTags = backupNames.includes('tags'); + const hasSelected = backupNames.includes('is_selected'); + const hasSelectedAt = backupNames.includes('selected_at'); + + // 旧表没有 suggestion_index / decision_point_id / batch_id,统一置 NULL + const affinitySelect = hasAffinity ? 'affinity_prediction' : 'NULL'; + const tagsSelect = hasTags ? 'tags' : "''"; + const selectedSelect = hasSelected ? 'is_selected' : '0'; + const selectedAtSelect = hasSelectedAt ? 'selected_at' : 'NULL'; + + this.db + .prepare(` + INSERT INTO ai_suggestions ( + id, conversation_id, message_id, + decision_point_id, batch_id, suggestion_index, + title, content, affinity_prediction, tags, + is_selected, selected_at, + created_at + ) + SELECT + id, conversation_id, message_id, + NULL, NULL, NULL, + title, content, ${affinitySelect}, ${tagsSelect}, ${selectedSelect}, ${selectedAtSelect}, created_at + FROM ai_suggestions_backup + `) + .run(); + + this.db.prepare('DROP TABLE ai_suggestions_backup').run(); + + // 索引补齐 + this.db.prepare('CREATE INDEX IF NOT EXISTS idx_ai_suggestions_conversation_id ON ai_suggestions(conversation_id)').run(); + this.db.prepare('CREATE INDEX IF NOT EXISTS idx_ai_suggestions_decision_point_id ON ai_suggestions(decision_point_id)').run(); + this.db.prepare('CREATE INDEX IF NOT EXISTS idx_ai_suggestions_batch_id ON ai_suggestions(batch_id)').run(); + }); + transaction(); + } else { + // 仅补齐索引 + this.db.prepare('CREATE INDEX IF NOT EXISTS idx_ai_suggestions_decision_point_id ON ai_suggestions(decision_point_id)').run(); + this.db.prepare('CREATE INDEX IF NOT EXISTS idx_ai_suggestions_batch_id ON ai_suggestions(batch_id)').run(); + + // 新增字段(向前兼容) + if (!hasIsSelected) { + this.db.prepare('ALTER TABLE ai_suggestions ADD COLUMN is_selected INTEGER DEFAULT 0').run(); + } + if (!hasSelectedAt) { + this.db.prepare('ALTER TABLE ai_suggestions ADD COLUMN selected_at INTEGER').run(); + } + } + } + + this._suggestionDecisionSchemaEnsured = true; + } + + createDecisionPoint({ conversationId, anchorMessageId = null, createdAt = null } = {}) { + this.ensureSuggestionDecisionSchema(); + if (!conversationId) throw new Error('conversationId is required'); + const now = createdAt || Date.now(); + const id = `dp-${now}-${Math.random().toString(36).slice(2, 8)}`; + this.db + .prepare( + ` + INSERT INTO decision_points (id, conversation_id, anchor_message_id, created_at) + VALUES (@id, @conversation_id, @anchor_message_id, @created_at) + ` + ) + .run({ + id, + conversation_id: conversationId, + anchor_message_id: anchorMessageId, + created_at: now + }); + return id; + } + + createSuggestionBatch({ decisionPointId, trigger = null, reason = null, createdAt = null } = {}) { + this.ensureSuggestionDecisionSchema(); + if (!decisionPointId) throw new Error('decisionPointId is required'); + const now = createdAt || Date.now(); + const id = `batch-${now}-${Math.random().toString(36).slice(2, 8)}`; + this.db + .prepare( + ` + INSERT INTO suggestion_batches (id, decision_point_id, trigger, reason, created_at) + VALUES (@id, @decision_point_id, @trigger, @reason, @created_at) + ` + ) + .run({ + id, + decision_point_id: decisionPointId, + trigger, + reason, + created_at: now + }); + return id; + } + + getDecisionPointById(id) { + this.ensureSuggestionDecisionSchema(); + if (!id) return null; + try { + const stmt = this.db.prepare('SELECT * FROM decision_points WHERE id = ?'); + return stmt.get(id) || null; + } catch (error) { + console.error('Error getting decision point:', error); + return null; + } + } + + getSuggestionBatchById(id) { + this.ensureSuggestionDecisionSchema(); + if (!id) return null; + try { + const stmt = this.db.prepare('SELECT * FROM suggestion_batches WHERE id = ?'); + return stmt.get(id) || null; + } catch (error) { + console.error('Error getting suggestion batch:', error); + return null; + } + } + + // 获取对话的AI分析报告 + getConversationAnalysis(conversationId) { + try { + const stmt = this.db.prepare(` + SELECT * FROM ai_analysis + WHERE conversation_id = ? AND insight_type = 'analysis_report' + ORDER BY created_at DESC + LIMIT 1 + `); + const result = stmt.get(conversationId); + console.log(`[DB] getConversationAnalysis for ${conversationId}:`, result ? 'found' : 'not found'); + if (result) { + console.log(`[DB] Analysis report content:`, result.content); + } + return result || null; + } catch (error) { + console.error('Error getting conversation analysis:', error); + return null; + } + } + + // 获取对话的关键时刻回放 + getKeyMoments(conversationId) { + try { + const stmt = this.db.prepare(` + SELECT + a.*, + m.content as message_content, + m.timestamp as message_timestamp, + m.sender + FROM ai_analysis a + LEFT JOIN messages m ON a.message_id = m.id + WHERE a.conversation_id = ? AND a.insight_type = 'key_moment' + ORDER BY a.created_at ASC + `); + return stmt.all(conversationId) || []; + } catch (error) { + console.error('Error getting key moments:', error); + return []; + } + } + + // 获取对话的行动建议 + getActionSuggestions(conversationId) { + try { + this.ensureSuggestionDecisionSchema(); + const stmt = this.db.prepare(` + SELECT * FROM ai_suggestions + WHERE conversation_id = ? + ORDER BY created_at DESC + `); + return stmt.all(conversationId) || []; + } catch (error) { + console.error('Error getting action suggestions:', error); + return []; + } + } + + // 保存行动建议到数据库 + saveActionSuggestion(suggestion, conversationId, messageId = null) { + try { + this.ensureSuggestionDecisionSchema(); + if (!suggestion || !conversationId) { + console.warn('[DB] saveActionSuggestion: Missing required fields', { suggestion, conversationId }); + return null; + } + + const stmt = this.db.prepare(` + INSERT INTO ai_suggestions ( + id, conversation_id, message_id, + decision_point_id, batch_id, suggestion_index, + title, content, + affinity_prediction, tags, created_at + ) VALUES ( + @id, @conversation_id, @message_id, + @decision_point_id, @batch_id, @suggestion_index, + @title, @content, + @affinity_prediction, @tags, @created_at + ) + `); + + const tagsStr = Array.isArray(suggestion.tags) + ? suggestion.tags.join(',') + : (suggestion.tags || ''); + + const now = Date.now(); + const suggestionId = suggestion.id || `suggestion-${now}-${Math.random().toString(36).substr(2, 9)}`; + + stmt.run({ + id: suggestionId, + conversation_id: conversationId, + message_id: messageId, + decision_point_id: suggestion.decision_point_id || suggestion.decisionPointId || null, + batch_id: suggestion.batch_id || suggestion.batchId || null, + suggestion_index: + suggestion.suggestion_index !== undefined + ? suggestion.suggestion_index + : suggestion.index !== undefined + ? suggestion.index + : null, + title: suggestion.title || suggestion.content || '未命名建议', + content: suggestion.content || suggestion.title || '', + affinity_prediction: suggestion.affinity_prediction || null, + tags: tagsStr, + created_at: suggestion.created_at || now + }); + + console.log(`[DB] Saved action suggestion: ${suggestionId} for conversation: ${conversationId}`); + return suggestionId; + } catch (error) { + console.error('Error saving action suggestion:', error); + return null; + } + } + + /** + * 用户显式确认“采用了哪个建议” + * - 默认按 batch_id 互斥(同一批次只能选一个) + * - 如果没有 batch_id,则回退按 decision_point_id 互斥 + */ + selectActionSuggestion({ suggestionId, selected = true, selectedAt = null } = {}) { + try { + this.ensureSuggestionDecisionSchema(); + if (!suggestionId) throw new Error('suggestionId is required'); + + const row = this.db + .prepare('SELECT id, batch_id, decision_point_id FROM ai_suggestions WHERE id = ? LIMIT 1') + .get(suggestionId); + if (!row) { + throw new Error(`Suggestion not found: ${suggestionId}`); + } + + const ts = selectedAt || Date.now(); + const scope = row.batch_id || row.decision_point_id || null; + const scopeField = row.batch_id ? 'batch_id' : (row.decision_point_id ? 'decision_point_id' : null); + + const tx = this.db.transaction(() => { + if (scopeField && scope) { + // 互斥:同一 scope 先全部清空 + this.db + .prepare(`UPDATE ai_suggestions SET is_selected = 0, selected_at = NULL WHERE ${scopeField} = ?`) + .run(scope); + } else { + // 无 scope:至少确保当前项能被正确更新 + this.db.prepare('UPDATE ai_suggestions SET is_selected = 0, selected_at = NULL WHERE id = ?').run(suggestionId); + } + + if (selected) { + this.db + .prepare('UPDATE ai_suggestions SET is_selected = 1, selected_at = ? WHERE id = ?') + .run(ts, suggestionId); + } + }); + + tx(); + return true; + } catch (error) { + console.error('[DB] selectActionSuggestion failed:', error); + return false; + } + } + + // 获取对话的完整AI分析数据 + getConversationAIData(conversationId) { + console.log(`[DB] Getting AI data for conversation: ${conversationId}`); + + // 获取分析报告 + const analysisReport = this.getConversationAnalysis(conversationId); + console.log(`[DB] Analysis report found:`, analysisReport ? 'yes' : 'no'); + + // 获取关键时刻 + const keyMoments = this.getKeyMoments(conversationId); + console.log(`[DB] Key moments found: ${keyMoments.length}`); + + // 获取行动建议 + const actionSuggestions = this.getActionSuggestions(conversationId); + console.log(`[DB] Action suggestions found: ${actionSuggestions.length}`); + + // 获取对话信息以获取角色ID + const conversation = this.getConversationById(conversationId); + + // 获取本轮对话的表现态度分析(从ai_analysis表获取) + let attitudeAnalysis = null; + try { + const attitudeStmt = this.db.prepare(` + SELECT content FROM ai_analysis + WHERE conversation_id = ? AND insight_type = 'attitude_analysis' + ORDER BY created_at DESC + LIMIT 1 + `); + const attitudeData = attitudeStmt.get(conversationId); + if (attitudeData && attitudeData.content) { + // 如果content是JSON,解析它;否则直接使用 + try { + const parsed = JSON.parse(attitudeData.content); + const affinityChange = parsed.affinityChange || conversation?.affinity_change || 0; + attitudeAnalysis = { + description: parsed.description || parsed.content || attitudeData.content, + affinityChange: affinityChange, + trend: parsed.trend || (affinityChange > 0 ? '上升' : affinityChange < 0 ? '下降' : '持平') + }; + } catch (e) { + // 如果不是JSON,直接使用字符串,从conversation获取affinity_change + const affinityChange = conversation?.affinity_change || 0; + attitudeAnalysis = { + description: attitudeData.content, + affinityChange: affinityChange, + trend: affinityChange > 0 ? '上升' : affinityChange < 0 ? '下降' : '持平' + }; + } + } else if (conversation) { + // 如果没有专门的attitude_analysis,使用conversation的affinity_change作为基础 + const affinityChange = conversation.affinity_change || 0; + attitudeAnalysis = { + description: '本轮对话中,对方表现积极,互动良好。', + affinityChange: affinityChange, + trend: affinityChange > 0 ? '上升' : affinityChange < 0 ? '下降' : '持平' + }; + } + } catch (error) { + console.error('Error getting attitude analysis:', error); + } + + // 解析分析报告 + let parsedReport = null; + if (analysisReport && analysisReport.content) { + try { + parsedReport = JSON.parse(analysisReport.content); + } catch (e) { + console.error('Failed to parse analysis report:', e); + } + } + + // 解析关键时刻评价 + const parsedKeyMoments = keyMoments.map(km => { + let evaluation = null; + if (km.content) { + try { + evaluation = JSON.parse(km.content); + } catch (e) { + evaluation = km.content; // 如果不是JSON,直接使用字符串 + } + } + return { + id: km.id, + timestamp: km.message_timestamp, + messageContent: km.message_content, + sender: km.sender, + evaluation: evaluation + }; + }); + + const result = { + analysisReport: parsedReport, + keyMoments: parsedKeyMoments, + attitudeAnalysis, + actionSuggestions: actionSuggestions.map(as => ({ + id: as.id, + title: as.title, + content: as.content, + tags: as.tags ? as.tags.split(',').map(t => t.trim()) : [] + })) + }; + + console.log(`[DB] Returning AI data:`, { + hasAnalysisReport: !!result.analysisReport, + keyMomentsCount: result.keyMoments.length, + hasAttitudeAnalysis: !!result.attitudeAnalysis, + actionSuggestionsCount: result.actionSuggestions.length + }); + + return result; + } + }; +} diff --git a/desktop/src/db/modules/asr.js b/desktop/src/db/modules/asr.js new file mode 100644 index 0000000..115c827 --- /dev/null +++ b/desktop/src/db/modules/asr.js @@ -0,0 +1,470 @@ +import fs from 'fs'; + +export default function ASRManager(BaseClass) { + return class extends BaseClass { + // 获取所有音频源配置 + getAudioSources() { + const stmt = this.db.prepare('SELECT * FROM audio_sources ORDER BY created_at ASC'); + return stmt.all(); + } + + // 获取单个音频源配置 + getAudioSourceById(id) { + const stmt = this.db.prepare('SELECT * FROM audio_sources WHERE id = ?'); + return stmt.get(id); + } + + // 创建音频源配置 + createAudioSource(sourceData) { + const stmt = this.db.prepare(` + INSERT INTO audio_sources (id, name, is_active, device_id, device_name, created_at, updated_at) + VALUES (@id, @name, @is_active, @device_id, @device_name, @created_at, @updated_at) + `); + + const id = sourceData.id || this.generateId(); + + stmt.run({ + id, + name: sourceData.name, + is_active: sourceData.is_active !== undefined ? sourceData.is_active : 0, + device_id: sourceData.device_id || null, + device_name: sourceData.device_name || null, + created_at: Date.now(), + updated_at: Date.now() + }); + + return this.getAudioSourceById(id); + } + + // 更新音频源配置 + updateAudioSource(id, updates) { + const fields = []; + const values = { id }; + + if (updates.name !== undefined) { + fields.push('name = @name'); + values.name = updates.name; + } + if (updates.is_active !== undefined) { + fields.push('is_active = @is_active'); + values.is_active = updates.is_active; + } + if (updates.device_id !== undefined) { + fields.push('device_id = @device_id'); + values.device_id = updates.device_id; + } + if (updates.device_name !== undefined) { + fields.push('device_name = @device_name'); + values.device_name = updates.device_name; + } + + fields.push('updated_at = @updated_at'); + values.updated_at = Date.now(); + + const stmt = this.db.prepare(` + UPDATE audio_sources + SET ${fields.join(', ')} + WHERE id = @id + `); + + const info = stmt.run(values); + return info.changes > 0 ? this.getAudioSourceById(id) : null; + } + + // 获取所有 ASR 配置 + getASRConfigs() { + const stmt = this.db.prepare('SELECT * FROM asr_configs ORDER BY created_at ASC'); + return stmt.all(); + } + + // 获取默认 ASR 配置 + getDefaultASRConfig() { + const stmt = this.db.prepare('SELECT * FROM asr_configs WHERE is_default = 1 LIMIT 1'); + return stmt.get(); + } + + // 获取指定 ASR 配置 + getASRConfigById(id) { + const stmt = this.db.prepare('SELECT * FROM asr_configs WHERE id = ?'); + return stmt.get(id); + } + + // 创建 ASR 配置 + createASRConfig(configData) { + const stmt = this.db.prepare(` + INSERT INTO asr_configs ( + id, model_name, language, enable_vad, sentence_pause_threshold, + retain_audio_files, audio_retention_days, audio_storage_path, + is_default, created_at, updated_at + ) + VALUES ( + @id, @model_name, @language, @enable_vad, @sentence_pause_threshold, + @retain_audio_files, @audio_retention_days, @audio_storage_path, + @is_default, @created_at, @updated_at + ) + `); + + const id = configData.id || this.generateId(); + + stmt.run({ + id, + model_name: configData.model_name || 'whisper-base', + language: configData.language || 'zh', + enable_vad: configData.enable_vad !== undefined ? configData.enable_vad : 1, + sentence_pause_threshold: configData.sentence_pause_threshold || 1.0, + retain_audio_files: configData.retain_audio_files !== undefined ? configData.retain_audio_files : 0, + audio_retention_days: configData.audio_retention_days || 30, + audio_storage_path: configData.audio_storage_path || null, + is_default: configData.is_default !== undefined ? configData.is_default : 0, + created_at: Date.now(), + updated_at: Date.now() + }); + + return this.getASRConfigById(id); + } + + // 更新 ASR 配置 + updateASRConfig(id, updates) { + const fields = []; + const values = { id }; + + if (updates.model_name !== undefined) { + fields.push('model_name = @model_name'); + values.model_name = updates.model_name; + } + if (updates.language !== undefined) { + fields.push('language = @language'); + values.language = updates.language; + } + if (updates.enable_vad !== undefined) { + fields.push('enable_vad = @enable_vad'); + values.enable_vad = updates.enable_vad; + } + if (updates.sentence_pause_threshold !== undefined) { + fields.push('sentence_pause_threshold = @sentence_pause_threshold'); + values.sentence_pause_threshold = updates.sentence_pause_threshold; + } + if (updates.retain_audio_files !== undefined) { + fields.push('retain_audio_files = @retain_audio_files'); + values.retain_audio_files = updates.retain_audio_files; + } + if (updates.audio_retention_days !== undefined) { + fields.push('audio_retention_days = @audio_retention_days'); + values.audio_retention_days = updates.audio_retention_days; + } + if (updates.audio_storage_path !== undefined) { + fields.push('audio_storage_path = @audio_storage_path'); + values.audio_storage_path = updates.audio_storage_path; + } + + fields.push('updated_at = @updated_at'); + values.updated_at = Date.now(); + + const stmt = this.db.prepare(` + UPDATE asr_configs + SET ${fields.join(', ')} + WHERE id = @id + `); + + const info = stmt.run(values); + return info.changes > 0 ? this.getASRConfigById(id) : null; + } + + // 设置默认 ASR 配置 + setDefaultASRConfig(id) { + const transaction = this.db.transaction(() => { + // 先将所有配置设为非默认 + this.db.prepare('UPDATE asr_configs SET is_default = 0').run(); + // 然后设置指定配置为默认 + const stmt = this.db.prepare('UPDATE asr_configs SET is_default = 1 WHERE id = ?'); + return stmt.run(id); + }); + + const info = transaction(); + return info.changes > 0; + } + + // 保存语音识别记录 + saveSpeechRecord(recordData) { + // 验证外键约束:检查对话是否存在 + if (recordData.conversation_id) { + const conversation = this.getConversationById(recordData.conversation_id); + if (!conversation) { + throw new Error(`Conversation not found: ${recordData.conversation_id}. Cannot save speech record.`); + } + } else { + throw new Error('conversation_id is required for speech record'); + } + + // 验证外键约束:检查音频源是否存在,如果不存在则自动创建 + let audioSource = this.getAudioSourceById(recordData.source_id); + if (!audioSource) { + console.warn(`Audio source not found: ${recordData.source_id}, creating it automatically...`); + // 自动创建音频源 + audioSource = this.createAudioSource({ + id: recordData.source_id, + name: recordData.source_id === 'speaker1' ? 'Speaker 1' : recordData.source_id === 'speaker2' ? 'Speaker 2' : `Audio Source ${recordData.source_id}`, + is_active: 1, + device_id: null, + device_name: null + }); + console.log(`Auto-created audio source: ${recordData.source_id}`); + } + + // 生成或使用提供的ID + const recordId = recordData.id || this.generateId(); + + const stmt = this.db.prepare(` + INSERT INTO speech_recognition_records ( + id, conversation_id, source_id, message_id, + audio_data, audio_file_path, audio_duration, + recognized_text, confidence, start_time, end_time, + status, error_message, created_at, updated_at + ) + VALUES ( + @id, @conversation_id, @source_id, @message_id, + @audio_data, @audio_file_path, @audio_duration, + @recognized_text, @confidence, @start_time, @end_time, + @status, @error_message, @created_at, @updated_at + ) + `); + + const info = stmt.run({ + id: recordId, + conversation_id: recordData.conversation_id, + source_id: recordData.source_id, + message_id: recordData.message_id || null, + audio_data: recordData.audio_data || null, + audio_file_path: recordData.audio_file_path || null, + audio_duration: recordData.audio_duration || null, + recognized_text: recordData.recognized_text || null, + confidence: recordData.confidence || null, + start_time: recordData.start_time, + end_time: recordData.end_time || null, + status: recordData.status || 'recording', + error_message: recordData.error_message || null, + created_at: Date.now(), + updated_at: Date.now() + }); + + return this.getSpeechRecordById(recordId); + } + + // 删除语音识别记录关联的音频文件路径 + deleteSpeechRecordAudio(id) { + const stmt = this.db.prepare(` + UPDATE speech_recognition_records + SET audio_file_path = NULL, updated_at = ? + WHERE id = ? + `); + return stmt.run(Date.now(), id); + } + + // 获取语音识别记录 + getSpeechRecordById(id) { + const stmt = this.db.prepare(` + SELECT sr.*, asrc.name as source_name + FROM speech_recognition_records sr + LEFT JOIN audio_sources asrc ON sr.source_id = asrc.id + WHERE sr.id = ? + `); + return stmt.get(id); + } + + // 更新语音识别记录 + updateSpeechRecord(id, updates) { + const fields = []; + const values = { id }; + + if (updates.message_id !== undefined) { + fields.push('message_id = @message_id'); + values.message_id = updates.message_id; + } + if (updates.audio_data !== undefined) { + fields.push('audio_data = @audio_data'); + values.audio_data = updates.audio_data; + } + if (updates.audio_file_path !== undefined) { + fields.push('audio_file_path = @audio_file_path'); + values.audio_file_path = updates.audio_file_path; + } + if (updates.audio_duration !== undefined) { + fields.push('audio_duration = @audio_duration'); + values.audio_duration = updates.audio_duration; + } + if (updates.recognized_text !== undefined) { + fields.push('recognized_text = @recognized_text'); + values.recognized_text = updates.recognized_text; + } + if (updates.confidence !== undefined) { + fields.push('confidence = @confidence'); + values.confidence = updates.confidence; + } + if (updates.end_time !== undefined) { + fields.push('end_time = @end_time'); + values.end_time = updates.end_time; + } + if (updates.status !== undefined) { + fields.push('status = @status'); + values.status = updates.status; + } + if (updates.error_message !== undefined) { + fields.push('error_message = @error_message'); + values.error_message = updates.error_message; + } + + fields.push('updated_at = @updated_at'); + values.updated_at = Date.now(); + + const stmt = this.db.prepare(` + UPDATE speech_recognition_records + SET ${fields.join(', ')} + WHERE id = @id + `); + + const info = stmt.run(values); + return info.changes > 0 ? this.getSpeechRecordById(id) : null; + } + + // 获取对话的语音识别记录 + getSpeechRecordsByConversation(conversationId) { + const stmt = this.db.prepare(` + SELECT sr.*, asrc.name as source_name + FROM speech_recognition_records sr + LEFT JOIN audio_sources asrc ON sr.source_id = asrc.id + WHERE sr.conversation_id = ? + ORDER BY sr.start_time ASC + `); + return stmt.all(conversationId); + } + + // 删除过期的语音识别记录(清理音频文件) + cleanupExpiredAudioFiles(retentionDays) { + const cutoffTime = Date.now() - (retentionDays * 24 * 60 * 60 * 1000); + + const stmt = this.db.prepare(` + SELECT id, audio_file_path + FROM speech_recognition_records + WHERE audio_file_path IS NOT NULL + AND created_at < ? + `); + + const expiredRecords = stmt.all(cutoffTime); + + // 删除文件和数据库记录 + const deleteStmt = this.db.prepare(` + UPDATE speech_recognition_records + SET audio_file_path = NULL, audio_data = NULL + WHERE id = ? + `); + + let deletedCount = 0; + for (const record of expiredRecords) { + if (record.audio_file_path && fs.existsSync(record.audio_file_path)) { + try { + fs.unlinkSync(record.audio_file_path); + deleteStmt.run(record.id); + deletedCount++; + } catch (err) { + console.error(`Error deleting audio file ${record.audio_file_path}:`, err); + } + } + } + + return deletedCount; + } + + // 初始化默认 ASR 配置(如果没有配置的话) + seedDefaultASRConfig() { + try { + const count = this.db.prepare('SELECT COUNT(*) as count FROM asr_configs').get().count; + + if (count === 0) { + console.log('No ASR config found, creating default config...'); + + // 默认使用 SiliconFlow Cloud(无需下载) + const defaultModelName = 'siliconflow-cloud'; + const defaultConfig = { + model_name: defaultModelName, + language: 'zh', + enable_vad: 1, + // 云端模式更适合更灵敏的停顿分句(仅影响 cloud 分句策略) + sentence_pause_threshold: 0.6, + retain_audio_files: 1, + audio_retention_days: 30, + audio_storage_path: null, + is_default: 1 + }; + + const config = this.createASRConfig(defaultConfig); + console.log('Default ASR config created:', config); + return config; + } else { + console.log(`ASR configs already exist (${count} configs found), skipping default config creation`); + return null; + } + } catch (error) { + console.error('Error seeding default ASR config:', error); + return null; + } + } + + // 修复 ASR 配置(迁移旧的/错误的模型名称) + fixASRConfig() { + try { + // 仅迁移旧的 whisper.cpp ggml 名称,不再强制把 base 覆盖成 medium,避免用户选择丢失 + const stmt = this.db.prepare(` + UPDATE asr_configs + SET model_name = 'medium', updated_at = ? + WHERE model_name LIKE 'ggml%' + `); + + const info = stmt.run(Date.now()); + + if (info.changes > 0) { + console.log(`Migrated ${info.changes} ASR configs from ggml* to 'medium' model`); + } + } catch (error) { + console.error('Error fixing ASR config:', error); + } + } + + // 初始化默认音频源 + seedDefaultAudioSources() { + try { + // 检查并创建 speaker1 + let speaker1 = this.getAudioSourceById('speaker1'); + if (!speaker1) { + console.log('Creating default audio source: speaker1'); + speaker1 = this.createAudioSource({ + id: 'speaker1', + name: 'Speaker 1', + is_active: 1, + device_id: null, + device_name: null + }); + console.log('Default audio source speaker1 created:', speaker1); + } + + // 检查并创建 speaker2 + let speaker2 = this.getAudioSourceById('speaker2'); + if (!speaker2) { + console.log('Creating default audio source: speaker2'); + speaker2 = this.createAudioSource({ + id: 'speaker2', + name: 'Speaker 2', + is_active: 0, + device_id: null, + device_name: null + }); + console.log('Default audio source speaker2 created:', speaker2); + } + + return { speaker1, speaker2 }; + } catch (error) { + console.error('Error seeding default audio sources:', error); + return null; + } + } + }; +} diff --git a/desktop/src/db/modules/base.js b/desktop/src/db/modules/base.js new file mode 100644 index 0000000..3a10880 --- /dev/null +++ b/desktop/src/db/modules/base.js @@ -0,0 +1,185 @@ +import Database from 'better-sqlite3'; +import path from 'path'; +import fs from 'fs'; +import { app } from 'electron'; +import { fileURLToPath } from 'url'; + +// 获取 __dirname 的 ESM 等效方式 +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +function isDirWritable(dirPath) { + try { + fs.mkdirSync(dirPath, { recursive: true }); + fs.accessSync(dirPath, fs.constants.W_OK); + return true; + } catch { + return false; + } +} + +export default class DatabaseBase { + constructor(options = {}) { + // 数据库文件路径优先级: + // 1) 显式传入 options.dbPath + // 2) 环境变量 LIVEGALGAME_DB_PATH + // 3) Electron userData 目录下的 livegalgame.db(可写) + // 4) 回落到仓库内 data 目录(开发模式) + const customPath = options.dbPath || process.env.LIVEGALGAME_DB_PATH; + const isPackaged = app?.isPackaged; + const repoDefaultPath = path.join(__dirname, '../../data/livegalgame.db'); + const userDataDir = app?.getPath ? app.getPath('userData') : null; + const userDbPath = userDataDir ? path.join(userDataDir, 'livegalgame.db') : null; + + // 备选路径按照可写优先级排序 + const candidates = [ + customPath ? path.resolve(customPath) : null, + userDbPath, + repoDefaultPath, + ].filter(Boolean); + + let resolvedPath = candidates.find((p) => isDirWritable(path.dirname(p))); + if (!resolvedPath) { + // 最后兜底:临时目录 + const tmpPath = path.join(app?.getPath?.('temp') || '/tmp', 'livegalgame.db'); + if (isDirWritable(path.dirname(tmpPath))) { + resolvedPath = tmpPath; + console.warn('[DB] All preferred locations not writable, falling back to temp DB:', resolvedPath); + } else { + throw new Error('No writable location found for database file'); + } + } + + // 如目标是用户目录且不存在,优先尝试从打包资源或仓库模板拷贝 + try { + if (resolvedPath === userDbPath && !fs.existsSync(resolvedPath)) { + // 打包场景:resources/data/livegalgame.db 或 app.asar/data/livegalgame.db + const resourceSeed = process.resourcesPath + ? path.join(process.resourcesPath, 'data', 'livegalgame.db') + : null; + const asarSeed = path.join(app.getAppPath(), 'data', 'livegalgame.db'); + const seedDb = (resourceSeed && fs.existsSync(resourceSeed)) + ? resourceSeed + : fs.existsSync(asarSeed) + ? asarSeed + : null; + + const fallbackSeed = fs.existsSync(repoDefaultPath) ? repoDefaultPath : null; + const source = seedDb || fallbackSeed; + + if (source) { + fs.mkdirSync(path.dirname(resolvedPath), { recursive: true }); + fs.copyFileSync(source, resolvedPath); + console.log(`[DB] seeded database to ${resolvedPath} from ${seedDb ? 'package' : 'repo'}`); + } + } + } catch (copyErr) { + console.error('[DB] Failed to bootstrap userData database:', copyErr); + } + + this.dbPath = resolvedPath; + + // 确保数据库目录存在 + const dataDir = path.dirname(this.dbPath); + if (!fs.existsSync(dataDir)) { + fs.mkdirSync(dataDir, { recursive: true }); + } + + // 创建数据库连接 + this.db = new Database(this.dbPath, { + // 过滤掉注释和空行,减少乱码可能性并保持日志整洁 + verbose: (sql) => { + if (sql.trim().startsWith('--')) return; + console.log(sql); + } + }); + + // 启用 WAL 模式以提高性能和并发 + this.db.pragma('journal_mode = WAL'); + this.db.pragma('synchronous = NORMAL'); + + try { + fs.accessSync(this.dbPath, fs.constants.W_OK); + console.log('[DB] Using writable database at:', this.dbPath); + } catch { + console.warn('[DB] Database path may be read-only:', this.dbPath); + } + + // 启用外键约束 + this.db.pragma('foreign_keys = ON'); + + // 初始化数据库表 + this.initialize(); + + console.log('Database initialized at:', this.dbPath); + } + + // 初始化数据库表 + initialize() { + console.log('Initializing database schema...'); + const schemaPath = path.join(__dirname, '../schema.sql'); + const schema = fs.readFileSync(schemaPath, 'utf-8'); + + // 执行SQL语句(分割并逐条执行) + const statements = schema.split(';').filter(stmt => stmt.trim()); + + // 开始事务 + const transaction = this.db.transaction(() => { + for (const statement of statements) { + if (statement.trim()) { + try { + this.db.exec(statement); + } catch (err) { + const msg = String(err?.message || err); + const upper = statement.trim().toUpperCase(); + const ignorableIndexError = + upper.startsWith('CREATE INDEX') && + (msg.includes('no such column') || msg.includes('has no column')); + if (ignorableIndexError) { + // 兼容老库:表已存在但列还未迁移时,schema.sql 的新索引会失败。后续 ensureSuggestionDecisionSchema 会补齐。 + console.warn('[DB] Ignoring index creation error (will be fixed by migration):', msg); + continue; + } + throw err; + } + } + } + }); + + transaction(); + console.log('Database schema initialized'); + + // 预先执行关键迁移,避免 seed.sql 因旧表结构导致插入失败 + try { + if (typeof this.ensureSuggestionDecisionSchema === 'function') { + this.ensureSuggestionDecisionSchema(); + } + } catch (err) { + console.warn('[DB] ensureSuggestionDecisionSchema failed (will retry lazily on access):', err); + } + + // 初始化示例数据(如果数据库为空) + this.seedSampleData(); + + // 初始化默认 ASR 配置(如果没有) + this.seedDefaultASRConfig(); + + // 修复 ASR 配置(迁移旧的/错误的模型名称) + this.fixASRConfig(); + + // 初始化默认音频源(如果没有) + this.seedDefaultAudioSources(); + + // 初始化默认对话建议配置 + if (typeof this.seedDefaultSuggestionConfig === 'function') { + this.seedDefaultSuggestionConfig(); + } + } + + // 关闭数据库连接 + close() { + if (this.db) { + this.db.close(); + } + } +} diff --git a/desktop/src/db/modules/character-details.js b/desktop/src/db/modules/character-details.js new file mode 100644 index 0000000..b29f56b --- /dev/null +++ b/desktop/src/db/modules/character-details.js @@ -0,0 +1,305 @@ +export default function CharacterDetailsManager(BaseClass) { + return class extends BaseClass { + // 获取角色详情 + getCharacterDetails(characterId) { + try { + const stmt = this.db.prepare('SELECT * FROM character_details WHERE character_id = ?'); + const row = stmt.get(characterId); + + if (!row) { + // 如果没有详情记录,尝试从会话中生成 + return this.generateCharacterDetailsFromConversations(characterId); + } + + // 解析JSON字段 + return { + character_id: row.character_id, + profile: row.profile ? JSON.parse(row.profile) : null, + personality_traits: row.personality_traits ? JSON.parse(row.personality_traits) : null, + likes_dislikes: row.likes_dislikes ? JSON.parse(row.likes_dislikes) : null, + important_events: row.important_events ? JSON.parse(row.important_events) : null, + conversation_summary: row.conversation_summary, + custom_fields: row.custom_fields ? JSON.parse(row.custom_fields) : {}, + updated_at: row.updated_at + }; + } catch (error) { + console.error('Error getting character details:', error); + return null; + } + } + + // 从会话中生成角色详情 + generateCharacterDetailsFromConversations(characterId) { + try { + // 获取角色的所有对话 + const conversations = this.getConversationsByCharacter(characterId); + + if (conversations.length === 0) { + return { + character_id: characterId, + profile: null, + personality_traits: null, + likes_dislikes: null, + important_events: null, + conversation_summary: '暂无对话记录', + custom_fields: {}, + updated_at: Date.now() + }; + } + + // 收集所有消息 + const allMessages = []; + const allSummaries = []; + const allTags = []; + const affinityChanges = []; + + for (const conv of conversations) { + const messages = this.getMessagesByConversation(conv.id); + allMessages.push(...messages); + + if (conv.summary) { + allSummaries.push(conv.summary); + } + + if (conv.tags) { + allTags.push(...conv.tags.split(',').map(t => t.trim())); + } + + if (conv.affinity_change) { + affinityChanges.push(conv.affinity_change); + } + } + + // 提取角色消息(sender = 'character') + const characterMessages = allMessages + .filter(msg => msg.sender === 'character') + .map(msg => msg.content); + + // 生成性格特点(从消息中提取关键词和模式) + const personalityTraits = this.extractPersonalityTraits(characterMessages, allTags); + + // 生成喜好厌恶(从消息中提取) + const likesDislikes = this.extractLikesDislikes(characterMessages); + + // 生成重要事件(从对话标题和摘要中提取) + const importantEvents = this.extractImportantEvents(conversations); + + // 生成对话总结 + const conversationSummary = this.generateConversationSummary(conversations, allSummaries, affinityChanges); + + // 生成角色档案(基本信息) + const character = this.getCharacterById(characterId); + const profile = character ? { + name: character.name, + nickname: character.nickname, + relationship_label: character.relationship_label, + affinity: character.affinity, + tags: character.tags || [], + created_at: character.created_at, + notes: character.notes + } : null; + + const details = { + character_id: characterId, + profile: profile, + personality_traits: personalityTraits, + likes_dislikes: likesDislikes, + important_events: importantEvents, + conversation_summary: conversationSummary, + custom_fields: {}, + updated_at: Date.now() + }; + + // 保存到数据库 + this.saveCharacterDetails(characterId, details); + + return details; + } catch (error) { + console.error('Error generating character details:', error); + return null; + } + } + + // 提取性格特点 + extractPersonalityTraits(messages, tags) { + const traits = { + keywords: [], + descriptions: [] + }; + + // 从标签中提取 + if (tags && tags.length > 0) { + traits.keywords = [...new Set(tags)]; + } + + // 从消息中分析(简单关键词匹配) + const traitKeywords = { + '温柔': ['温柔', '体贴', '关心', '照顾'], + '活泼': ['开心', '快乐', '兴奋', '活泼', '活跃'], + '认真': ['认真', '负责', '仔细', '专注'], + '内向': ['安静', '内向', '害羞', '沉默'], + '外向': ['外向', '开朗', '健谈', '热情'], + '幽默': ['有趣', '幽默', '搞笑', '玩笑'], + '真诚': ['真诚', '诚实', '真实', '坦率'] + }; + + const foundTraits = new Set(); + const messageText = messages.join(' '); + + for (const [trait, keywords] of Object.entries(traitKeywords)) { + if (keywords.some(keyword => messageText.includes(keyword))) { + foundTraits.add(trait); + } + } + + traits.keywords = [...new Set([...traits.keywords, ...foundTraits])]; + + // 生成描述 + if (traits.keywords.length > 0) { + traits.descriptions = [ + `从对话中可以看出,${traits.keywords.slice(0, 3).join('、')}是主要特点。`, + `在互动中表现出${traits.keywords[0]}的一面。` + ]; + } + + return traits; + } + + // 提取喜好厌恶 + extractLikesDislikes(messages) { + const likes = []; + const dislikes = []; + + const messageText = messages.join(' '); + + // 简单的关键词匹配(实际应用中可以使用更复杂的NLP) + const likeKeywords = ['喜欢', '爱好', '感兴趣', '爱', '享受', '享受', '享受']; + const dislikeKeywords = ['不喜欢', '讨厌', '厌恶', '反感', '不感兴趣']; + + // 提取包含"喜欢"的句子片段 + const likePatterns = messageText.match(/喜欢[^,。!?]*/g) || []; + likePatterns.forEach(pattern => { + const cleaned = pattern.replace(/喜欢/g, '').trim(); + if (cleaned && cleaned.length < 20) { + likes.push(cleaned); + } + }); + + // 提取包含"不喜欢"的句子片段 + const dislikePatterns = messageText.match(/不(喜欢|感兴趣)[^,。!?]*/g) || []; + dislikePatterns.forEach(pattern => { + const cleaned = pattern.replace(/不(喜欢|感兴趣)/g, '').trim(); + if (cleaned && cleaned.length < 20) { + dislikes.push(cleaned); + } + }); + + return { + likes: [...new Set(likes)].slice(0, 10), // 最多10个 + dislikes: [...new Set(dislikes)].slice(0, 10) + }; + } + + // 提取重要事件 + extractImportantEvents(conversations) { + const events = []; + + conversations.forEach(conv => { + if (conv.title || conv.summary) { + events.push({ + title: conv.title || '对话', + summary: conv.summary || '', + date: conv.date, + affinity_change: conv.affinity_change || 0 + }); + } + }); + + // 按日期排序,最新的在前 + events.sort((a, b) => b.date - a.date); + + return events.slice(0, 10); // 最多10个重要事件 + } + + // 生成对话总结 + generateConversationSummary(conversations, summaries, affinityChanges) { + const totalConversations = conversations.length; + const totalAffinityChange = affinityChanges.reduce((sum, change) => sum + change, 0); + const avgAffinityChange = affinityChanges.length > 0 + ? Math.round(totalAffinityChange / affinityChanges.length) + : 0; + + let summary = `共进行了 ${totalConversations} 次对话。`; + + if (summaries.length > 0) { + summary += `主要话题包括:${summaries.slice(0, 3).join('、')}。`; + } + + if (totalAffinityChange !== 0) { + const trend = totalAffinityChange > 0 ? '上升' : '下降'; + summary += `好感度总体${trend}了 ${Math.abs(totalAffinityChange)} 点。`; + } + + return summary; + } + + // 保存角色详情 + saveCharacterDetails(characterId, details) { + try { + const stmt = this.db.prepare(` + INSERT OR REPLACE INTO character_details + (character_id, profile, personality_traits, likes_dislikes, important_events, conversation_summary, custom_fields, updated_at) + VALUES (@character_id, @profile, @personality_traits, @likes_dislikes, @important_events, @conversation_summary, @custom_fields, @updated_at) + `); + + stmt.run({ + character_id: characterId, + profile: details.profile ? JSON.stringify(details.profile) : null, + personality_traits: details.personality_traits ? JSON.stringify(details.personality_traits) : null, + likes_dislikes: details.likes_dislikes ? JSON.stringify(details.likes_dislikes) : null, + important_events: details.important_events ? JSON.stringify(details.important_events) : null, + conversation_summary: details.conversation_summary || null, + custom_fields: details.custom_fields ? JSON.stringify(details.custom_fields) : '{}', + updated_at: details.updated_at || Date.now() + }); + + return true; + } catch (error) { + console.error('Error saving character details:', error); + return false; + } + } + + // 更新角色详情的自定义字段 + updateCharacterDetailsCustomFields(characterId, customFields) { + try { + const currentDetails = this.getCharacterDetails(characterId); + if (!currentDetails) { + return false; + } + + const updatedCustomFields = { + ...(currentDetails.custom_fields || {}), + ...customFields + }; + + const stmt = this.db.prepare(` + UPDATE character_details + SET custom_fields = @custom_fields, updated_at = @updated_at + WHERE character_id = @character_id + `); + + stmt.run({ + character_id: characterId, + custom_fields: JSON.stringify(updatedCustomFields), + updated_at: Date.now() + }); + + return true; + } catch (error) { + console.error('Error updating custom fields:', error); + return false; + } + } + }; +} \ No newline at end of file diff --git a/desktop/src/db/modules/character.js b/desktop/src/db/modules/character.js new file mode 100644 index 0000000..de23f26 --- /dev/null +++ b/desktop/src/db/modules/character.js @@ -0,0 +1,113 @@ +export default function CharacterManager(BaseClass) { + return class extends BaseClass { + // 创建角色 + createCharacter(characterData) { + const stmt = this.db.prepare(` + INSERT INTO characters (id, name, nickname, relationship_label, avatar_color, affinity, created_at, updated_at, notes) + VALUES (@id, @name, @nickname, @relationship_label, @avatar_color, @affinity, @created_at, @updated_at, @notes) + `); + + // 在 TEXT 主键表上 lastInsertRowid 不可用于取回 ID,必须使用实际写入的 id + const id = characterData.id || this.generateId(); + stmt.run({ + id, + name: characterData.name, + nickname: characterData.nickname || null, + relationship_label: characterData.relationship_label || null, + avatar_color: characterData.avatar_color || '#ff6b6b', + affinity: characterData.affinity || 50, + created_at: Date.now(), + updated_at: Date.now(), + notes: characterData.notes || null + }); + + return this.getCharacterById(id); + } + + // 获取所有角色 + getAllCharacters() { + const stmt = this.db.prepare(` + SELECT c.*, + GROUP_CONCAT(t.name) as tags + FROM characters c + LEFT JOIN character_tags ct ON c.id = ct.character_id + LEFT JOIN tags t ON ct.tag_id = t.id + GROUP BY c.id + ORDER BY c.updated_at DESC + `); + + return stmt.all().map(row => ({ + ...row, + tags: row.tags ? row.tags.split(',') : [] + })); + } + + // 获取单个角色 + getCharacterById(id) { + const stmt = this.db.prepare(` + SELECT c.*, + GROUP_CONCAT(t.name) as tags + FROM characters c + LEFT JOIN character_tags ct ON c.id = ct.character_id + LEFT JOIN tags t ON ct.tag_id = t.id + WHERE c.id = ? + GROUP BY c.id + `); + + const row = stmt.get(id); + if (!row) return null; + + return { + ...row, + tags: row.tags ? row.tags.split(',') : [] + }; + } + + // 更新角色 + updateCharacter(id, updates) { + const fields = []; + const values = { id }; + + for (const [key, value] of Object.entries(updates)) { + if (key !== 'id' && key !== 'tags') { + fields.push(`${key} = @${key}`); + values[key] = value; + } + } + + fields.push('updated_at = @updated_at'); + values.updated_at = Date.now(); + + const stmt = this.db.prepare(` + UPDATE characters + SET ${fields.join(', ')} + WHERE id = @id + `); + + stmt.run(values); + return this.getCharacterById(id); + } + + // 删除角色(级联删除相关对话、消息、标签关联、角色详情) + deleteCharacter(characterId) { + const stmt = this.db.prepare('DELETE FROM characters WHERE id = ?'); + const info = stmt.run(characterId); + return info.changes > 0; + } + + // 获取角色的对话统计 + getCharacterStats(characterId) { + const stmt = this.db.prepare(` + SELECT + COUNT(DISTINCT c.id) as conversation_count, + COUNT(m.id) as message_count, + MAX(c.date) as last_conversation_date + FROM conversations c + LEFT JOIN messages m ON c.id = m.conversation_id + WHERE c.character_id = ? + `); + + return stmt.get(characterId); + } + }; +} diff --git a/desktop/src/db/modules/conversation-review.js b/desktop/src/db/modules/conversation-review.js new file mode 100644 index 0000000..7def8a8 --- /dev/null +++ b/desktop/src/db/modules/conversation-review.js @@ -0,0 +1,56 @@ +export default function ConversationReviewManager(BaseClass) { + return class extends BaseClass { + // 保存复盘报告 + saveConversationReview(data) { + if (!data.conversation_id || !data.review_data) { + throw new Error('Missing required fields: conversation_id or review_data'); + } + + const stmt = this.db.prepare(` + INSERT INTO conversation_reviews (id, conversation_id, review_data, created_at, model_used) + VALUES (@id, @conversation_id, @review_data, @created_at, @model_used) + ON CONFLICT(id) DO UPDATE SET + review_data = excluded.review_data, + created_at = excluded.created_at, + model_used = excluded.model_used + `); + + const reviewId = data.id || `review-${Date.now()}`; + const info = stmt.run({ + id: reviewId, + conversation_id: data.conversation_id, + review_data: typeof data.review_data === 'string' ? data.review_data : JSON.stringify(data.review_data), + created_at: data.created_at || Date.now(), + model_used: data.model_used || null + }); + + return reviewId; + } + + // 获取复盘报告 + getConversationReview(conversationId) { + const stmt = this.db.prepare(` + SELECT * FROM conversation_reviews + WHERE conversation_id = ? + ORDER BY created_at DESC + LIMIT 1 + `); + + const result = stmt.get(conversationId); + if (!result) return null; + + try { + return { + ...result, + review_data: JSON.parse(result.review_data) + }; + } catch (e) { + console.error('Failed to parse review data:', e); + return { + ...result, + review_data: null + }; + } + } + }; +} diff --git a/desktop/src/db/modules/conversation.js b/desktop/src/db/modules/conversation.js new file mode 100644 index 0000000..b8abe80 --- /dev/null +++ b/desktop/src/db/modules/conversation.js @@ -0,0 +1,132 @@ +export default function ConversationManager(BaseClass) { + return class extends BaseClass { + // 创建对话 + createConversation(conversationData) { + // 统一使用写入时的主键值,避免 TEXT 主键返回 rowid 导致二次查询为空 + const id = conversationData.id || this.generateId(); + const now = Date.now(); + + const stmt = this.db.prepare(` + INSERT INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) + VALUES (@id, @character_id, @title, @date, @affinity_change, @summary, @tags, @created_at, @updated_at) + `); + + stmt.run({ + id, + character_id: conversationData.character_id, + title: conversationData.title || null, + date: conversationData.date || now, + affinity_change: conversationData.affinity_change || 0, + summary: conversationData.summary || null, + tags: conversationData.tags || null, + created_at: now, + updated_at: now + }); + + return this.getConversationById(id); + } + + // 获取角色的所有对话(带角色信息和消息数) + getConversationsByCharacter(characterId) { + const stmt = this.db.prepare(` + SELECT + c.*, + char.name as character_name, + char.avatar_color as character_avatar_color, + char.id as character_id, + COUNT(m.id) as message_count + FROM conversations c + INNER JOIN characters char ON c.character_id = char.id + LEFT JOIN messages m ON c.id = m.conversation_id + WHERE c.character_id = ? + GROUP BY c.id + ORDER BY c.created_at DESC + `); + + return stmt.all(characterId); + } + + // 获取单个对话 + getConversationById(id) { + const stmt = this.db.prepare('SELECT * FROM conversations WHERE id = ?'); + return stmt.get(id); + } + + // 更新对话 + updateConversation(id, updates) { + const fields = []; + const values = { id }; + + for (const [key, value] of Object.entries(updates)) { + if (key !== 'id') { + fields.push(`${key} = @${key}`); + values[key] = value; + } + } + + fields.push('updated_at = @updated_at'); + values.updated_at = Date.now(); + + const stmt = this.db.prepare(` + UPDATE conversations + SET ${fields.join(', ')} + WHERE id = @id + `); + + stmt.run(values); + return this.getConversationById(id); + } + + // 删除对话(级联删除相关消息、AI分析、AI建议) + deleteConversation(conversationId) { + const stmt = this.db.prepare('DELETE FROM conversations WHERE id = ?'); + const info = stmt.run(conversationId); + return info.changes > 0; + } + + // 获取对话总数 + getConversationCount() { + const stmt = this.db.prepare('SELECT COUNT(*) as count FROM conversations'); + return stmt.get().count; + } + + // 获取最近对话(带角色信息) + getRecentConversations(limit = 10) { + const stmt = this.db.prepare(` + SELECT + c.*, + char.name as character_name, + char.avatar_color as character_avatar_color, + char.id as character_id, + COUNT(m.id) as message_count + FROM conversations c + INNER JOIN characters char ON c.character_id = char.id + LEFT JOIN messages m ON c.id = m.conversation_id + GROUP BY c.id + ORDER BY c.updated_at DESC + LIMIT ? + `); + + return stmt.all(limit); + } + + // 获取所有对话(带角色信息) + getAllConversations() { + const stmt = this.db.prepare(` + SELECT + c.*, + char.name as character_name, + char.avatar_color as character_avatar_color, + char.id as character_id, + COUNT(m.id) as message_count + FROM conversations c + INNER JOIN characters char ON c.character_id = char.id + LEFT JOIN messages m ON c.id = m.conversation_id + GROUP BY c.id + ORDER BY c.created_at DESC + `); + + return stmt.all(); + } + }; +} diff --git a/desktop/src/db/modules/index.js b/desktop/src/db/modules/index.js new file mode 100644 index 0000000..9199b90 --- /dev/null +++ b/desktop/src/db/modules/index.js @@ -0,0 +1,40 @@ +import DatabaseBase from './base.js'; +import CharacterManager from './character.js'; +import ConversationManager from './conversation.js'; +import MessageManager from './message.js'; +import TagManager from './tag.js'; +import AIAnalysisManager from './ai-analysis.js'; +import CharacterDetailsManager from './character-details.js'; +import LLMConfigManager from './llm-config.js'; +import SuggestionConfigManager from './suggestion-config.js'; +import ASRManager from './asr.js'; +import ConversationReviewManager from './conversation-review.js'; +import Utils from './utils.js'; + +class DatabaseManager extends Utils( + ConversationReviewManager( + ASRManager( + SuggestionConfigManager( + LLMConfigManager( + CharacterDetailsManager( + AIAnalysisManager( + TagManager( + MessageManager( + ConversationManager( + CharacterManager(DatabaseBase) + ) + ) + ) + ) + ) + ) + ) + ) + ) +) { + constructor(options = {}) { + super(options); + } +} + +export default DatabaseManager; \ No newline at end of file diff --git a/desktop/src/db/modules/llm-config.js b/desktop/src/db/modules/llm-config.js new file mode 100644 index 0000000..412b0e6 --- /dev/null +++ b/desktop/src/db/modules/llm-config.js @@ -0,0 +1,435 @@ +export default function LLMConfigManager(BaseClass) { + return class extends BaseClass { + normalizeFeatureKey(feature) { + if (typeof feature !== 'string') return null; + const trimmed = feature.trim().toLowerCase(); + return trimmed || null; + } + + // 创建或更新LLM配置 + saveLLMConfig(configData) { + this.ensureLLMSchema(); + const now = Date.now(); + + if (configData.is_default) { + const clearDefaultStmt = this.db.prepare('UPDATE llm_configs SET is_default = 0 WHERE is_default = 1'); + clearDefaultStmt.run(); + } + + const existingStmt = this.db.prepare('SELECT * FROM llm_configs WHERE id = ? OR name = ?'); + const existing = existingStmt.get(configData.id || '', configData.name || ''); + + if (existing) { + const updateStmt = this.db.prepare(` + UPDATE llm_configs + SET name = @name, + api_key = @api_key, + base_url = @base_url, + model_name = @model_name, + timeout_ms = @timeout_ms, + is_default = @is_default, + updated_at = @updated_at + WHERE id = @id + `); + + updateStmt.run({ + id: existing.id, + name: configData.name || existing.name, + api_key: configData.api_key || existing.api_key, + base_url: configData.base_url !== undefined ? configData.base_url : existing.base_url, + model_name: this.normalizeModelName( + configData.model_name ?? configData.modelName, + existing.model_name || 'gpt-4o-mini' + ), + timeout_ms: configData.timeout_ms !== undefined + ? this.normalizeTimeoutMs(configData.timeout_ms, null) + : existing.timeout_ms, + is_default: configData.is_default !== undefined ? (configData.is_default ? 1 : 0) : existing.is_default, + updated_at: now + }); + + return this.getLLMConfigById(existing.id); + } + + const insertStmt = this.db.prepare(` + INSERT INTO llm_configs (id, name, api_key, base_url, model_name, timeout_ms, is_default, created_at, updated_at) + VALUES (@id, @name, @api_key, @base_url, @model_name, @timeout_ms, @is_default, @created_at, @updated_at) + `); + + const id = configData.id || this.generateId(); + insertStmt.run({ + id, + name: configData.name || '默认配置', + api_key: configData.api_key, + base_url: configData.base_url || null, + model_name: this.normalizeModelName(configData.model_name ?? configData.modelName), + timeout_ms: this.normalizeTimeoutMs(configData.timeout_ms, null), + is_default: configData.is_default ? 1 : 0, + created_at: now, + updated_at: now + }); + + return this.getLLMConfigById(id); + } + + getAllLLMConfigs() { + this.ensureLLMSchema(); + const stmt = this.db.prepare('SELECT * FROM llm_configs ORDER BY is_default DESC, updated_at DESC'); + return stmt.all(); + } + + getDefaultLLMConfig() { + this.ensureLLMSchema(); + const stmt = this.db.prepare('SELECT * FROM llm_configs WHERE is_default = 1 LIMIT 1'); + return stmt.get(); + } + + getLLMConfigById(id) { + this.ensureLLMSchema(); + const stmt = this.db.prepare('SELECT * FROM llm_configs WHERE id = ?'); + return stmt.get(id); + } + + deleteLLMConfig(id) { + this.ensureLLMSchema(); + this.ensureLLMFeatureSchema(); + // 先解绑功能映射,避免留下悬空引用 + this.db.prepare('UPDATE llm_feature_configs SET llm_config_id = NULL WHERE llm_config_id = ?').run(id); + const stmt = this.db.prepare('DELETE FROM llm_configs WHERE id = ?'); + return stmt.run(id); + } + + setDefaultLLMConfig(id) { + this.ensureLLMSchema(); + const clearDefaultStmt = this.db.prepare('UPDATE llm_configs SET is_default = 0 WHERE is_default = 1'); + clearDefaultStmt.run(); + + const setDefaultStmt = this.db.prepare('UPDATE llm_configs SET is_default = 1, updated_at = ? WHERE id = ?'); + setDefaultStmt.run(Date.now(), id); + + return this.getLLMConfigById(id); + } + + setLLMFeatureConfig(feature, llmConfigId) { + this.ensureLLMFeatureSchema(); + const featureKey = this.normalizeFeatureKey(feature); + if (!featureKey) { + throw new Error('feature 不能为空'); + } + + const now = Date.now(); + let validatedId = null; + if (llmConfigId) { + const exists = this.getLLMConfigById(llmConfigId); + if (!exists) { + throw new Error('指定的 LLM 配置不存在'); + } + validatedId = llmConfigId; + } + + const existing = this.db + .prepare('SELECT feature FROM llm_feature_configs WHERE feature = ?') + .get(featureKey); + + if (existing) { + this.db + .prepare( + 'UPDATE llm_feature_configs SET llm_config_id = @llm_config_id, updated_at = @updated_at WHERE feature = @feature' + ) + .run({ + feature: featureKey, + llm_config_id: validatedId, + updated_at: now + }); + } else { + this.db + .prepare( + 'INSERT INTO llm_feature_configs (feature, llm_config_id, created_at, updated_at) VALUES (@feature, @llm_config_id, @created_at, @updated_at)' + ) + .run({ + feature: featureKey, + llm_config_id: validatedId, + created_at: now, + updated_at: now + }); + } + + return this.getLLMFeatureConfig(featureKey); + } + + getLLMFeatureConfig(feature) { + this.ensureLLMFeatureSchema(); + const featureKey = this.normalizeFeatureKey(feature); + if (!featureKey) return null; + return ( + this.db + .prepare('SELECT feature, llm_config_id FROM llm_feature_configs WHERE feature = ?') + .get(featureKey) || null + ); + } + + getAllLLMFeatureConfigs() { + this.ensureLLMFeatureSchema(); + const rows = this.db + .prepare('SELECT feature, llm_config_id FROM llm_feature_configs') + .all(); + const map = {}; + for (const row of rows) { + map[row.feature] = row.llm_config_id || null; + } + return map; + } + + /** + * 获取某功能应使用的 LLM 配置 + * 若未绑定,则回落到默认配置 + */ + getLLMConfigForFeature(feature) { + this.ensureLLMFeatureSchema(); + const featureKey = this.normalizeFeatureKey(feature); + let config = null; + if (featureKey) { + const binding = this.db + .prepare('SELECT llm_config_id FROM llm_feature_configs WHERE feature = ?') + .get(featureKey); + if (binding?.llm_config_id) { + config = this.getLLMConfigById(binding.llm_config_id); + // 若配置已被删除,则清理绑定,继续回落默认 + if (!config) { + this.db + .prepare('UPDATE llm_feature_configs SET llm_config_id = NULL WHERE feature = ?') + .run(featureKey); + } + } + } + return config || this.getDefaultLLMConfig(); + } + + async testLLMConnection(configData) { + this.ensureLLMSchema(); + let requestParams; + let clientConfig; + let timeoutMs; + + try { + const { default: OpenAI } = await import('openai'); + clientConfig = { apiKey: configData.api_key }; + const normalizedBaseURL = this.normalizeLLMBaseURL(configData.base_url); + if (normalizedBaseURL) { + clientConfig.baseURL = normalizedBaseURL; + } + + const client = new OpenAI(clientConfig); + timeoutMs = this.normalizeTimeoutMs(configData.timeout_ms, null); + requestParams = { + model: configData.model_name || 'gpt-4o-mini', + messages: [{ role: 'user', content: 'test' }], + max_tokens: 1, + temperature: 0 + }; + + console.log('LLM Connection Test Debug Info:', { + configData: { + name: configData.name, + base_url: configData.base_url, + model_name: configData.model_name, + timeout_ms: timeoutMs + }, + requestParams, + clientConfig + }); + + const controller = timeoutMs ? new AbortController() : null; + const timer = controller + ? setTimeout(() => controller.abort(new Error('LLM连接超时,请稍后再试')), timeoutMs) + : null; + let testResponse; + try { + testResponse = await client.chat.completions.create( + requestParams, + controller ? { signal: controller.signal } : undefined + ); + } finally { + if (timer) clearTimeout(timer); + } + + if (testResponse && testResponse.choices && testResponse.choices.length > 0) { + return { success: true, message: '连接成功', status: 200 }; + } + + return { success: false, message: 'API响应格式异常', status: testResponse?.status || null }; + } catch (error) { + console.error('LLM Connection Test Failed - Full Debug Info:', { + error: { + message: error.message, + status: error.status, + code: error.code, + type: error.type, + param: error.param, + headers: error.headers, + requestID: error.requestID + }, + configData: { + name: configData.name, + base_url: configData.base_url, + model_name: configData.model_name, + timeout_ms: timeoutMs + }, + requestParams: requestParams || null, + clientConfig: clientConfig || null + }); + + let errorMessage = '连接失败'; + if (error.status === 401) { + errorMessage = 'API密钥无效'; + } else if (error.status === 403) { + errorMessage = 'API密钥无权限'; + } else if (error.status === 404) { + errorMessage = 'API端点不存在或模型不可用'; + } else if (error.status === 429) { + errorMessage = '请求频率过高,请稍后再试'; + } else if (error.name === 'AbortError' || /timeout/i.test(error.message || '')) { + errorMessage = '请求超时,请稍后再试'; + } else if (error.message) { + errorMessage = error.message; + } + + return { success: false, message: errorMessage, status: error.status || null, error: error.message }; + } + } + + normalizeModelName(value, fallback = 'gpt-4o-mini') { + if (typeof value === 'string') { + const trimmed = value.trim(); + if (trimmed) { + return trimmed; + } + } + return fallback; + } + + normalizeLLMBaseURL(baseURL) { + if (!baseURL || typeof baseURL !== 'string') { + return undefined; + } + const trimmed = baseURL.trim(); + if (!trimmed) { + return undefined; + } + // OpenAI SDK 会自动拼接 /chat/completions,这里去掉用户可能输入的终点路径,避免 404 + if (trimmed.endsWith('/chat/completions')) { + return trimmed.replace(/\/chat\/completions\/?$/, ''); + } + // 统一去掉尾部斜杠 + return trimmed.replace(/\/+$/, ''); + } + + normalizeTimeoutMs(value, fallback = null) { + if (value === null || value === undefined || value === '') { + return fallback; + } + const parsed = Number(value); + if (!Number.isFinite(parsed) || parsed <= 0) { + return fallback; + } + return Math.round(parsed); + } + + ensureLLMSchema() { + if (this._llmSchemaEnsured) { + this.ensureLLMFeatureSchema(); + return; + } + + const columns = this.db.prepare('PRAGMA table_info(llm_configs)').all(); + if (!columns.length) { + // Table not created yet; schema initialization will handle it. + this.ensureLLMFeatureSchema(); + this._llmSchemaEnsured = true; + return; + } + const columnNames = columns.map((column) => column.name); + const hasProvider = columnNames.includes('provider'); + const hasModel = columnNames.includes('model_name'); + const hasTimeout = columnNames.includes('timeout_ms'); + + if (hasProvider) { + this.rebuildLLMConfigTable(columns); + } else if (!hasModel) { + this.db.prepare("ALTER TABLE llm_configs ADD COLUMN model_name TEXT NOT NULL DEFAULT 'gpt-4o-mini'").run(); + this.db + .prepare( + "UPDATE llm_configs SET model_name = 'gpt-4o-mini' WHERE model_name IS NULL OR TRIM(model_name) = ''" + ) + .run(); + } + if (!hasTimeout) { + this.db.prepare('ALTER TABLE llm_configs ADD COLUMN timeout_ms INTEGER').run(); + } + + this.ensureLLMFeatureSchema(); + this._llmSchemaEnsured = true; + } + + ensureLLMFeatureSchema() { + if (this._llmFeatureSchemaEnsured) return; + this.db.exec(` + CREATE TABLE IF NOT EXISTS llm_feature_configs ( + feature TEXT PRIMARY KEY, + llm_config_id TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + FOREIGN KEY (llm_config_id) REFERENCES llm_configs(id) ON DELETE SET NULL + ) + `); + this.db + .prepare('CREATE INDEX IF NOT EXISTS idx_llm_feature_config_id ON llm_feature_configs(llm_config_id)') + .run(); + this._llmFeatureSchemaEnsured = true; + } + + rebuildLLMConfigTable(existingColumns) { + const transaction = this.db.transaction(() => { + this.db.prepare('ALTER TABLE llm_configs RENAME TO llm_configs_backup').run(); + this.db.exec(` + CREATE TABLE IF NOT EXISTS llm_configs ( + id TEXT PRIMARY KEY, + name TEXT NOT NULL, + api_key TEXT NOT NULL, + base_url TEXT, + model_name TEXT NOT NULL DEFAULT 'gpt-4o-mini', + timeout_ms INTEGER, + is_default INTEGER DEFAULT 0, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL + ) + `); + + const hasModel = existingColumns.some((column) => column.name === 'model_name'); + const modelExpression = hasModel ? "COALESCE(model_name, 'gpt-4o-mini')" : "'gpt-4o-mini'"; + const hasTimeout = existingColumns.some((column) => column.name === 'timeout_ms'); + const timeoutExpression = hasTimeout ? 'timeout_ms' : 'NULL'; + + this.db.prepare(` + INSERT INTO llm_configs (id, name, api_key, base_url, model_name, timeout_ms, is_default, created_at, updated_at) + SELECT + id, + name, + api_key, + base_url, + ${modelExpression}, + ${timeoutExpression}, + is_default, + created_at, + updated_at + FROM llm_configs_backup + `).run(); + + this.db.prepare('CREATE INDEX IF NOT EXISTS idx_llm_configs_is_default ON llm_configs(is_default)').run(); + this.db.prepare('DROP TABLE llm_configs_backup').run(); + }); + + transaction(); + } + }; +} diff --git a/desktop/src/db/modules/message.js b/desktop/src/db/modules/message.js new file mode 100644 index 0000000..df0dbb9 --- /dev/null +++ b/desktop/src/db/modules/message.js @@ -0,0 +1,86 @@ +export default function MessageManager(BaseClass) { + return class extends BaseClass { + // 创建消息 + createMessage(messageData) { + const stmt = this.db.prepare(` + INSERT INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) + VALUES (@id, @conversation_id, @sender, @content, @timestamp, @is_ai_generated) + `); + + const messageId = messageData.id || this.generateId(); + + stmt.run({ + id: messageId, + conversation_id: messageData.conversation_id, + sender: messageData.sender, // 'user' or 'character' + content: messageData.content, + timestamp: messageData.timestamp || Date.now(), + is_ai_generated: messageData.is_ai_generated ? 1 : 0 + }); + + return this.getMessageById(messageId); + } + + // 获取对话的所有消息 + getMessagesByConversation(conversationId) { + const stmt = this.db.prepare(` + SELECT * FROM messages + WHERE conversation_id = ? + ORDER BY timestamp ASC + `); + + return stmt.all(conversationId); + } + + // 获取对话的最近消息(用于上下文构建) + getRecentMessagesByConversation(conversationId, limit = 10) { + const stmt = this.db.prepare(` + SELECT * + FROM messages + WHERE conversation_id = ? + ORDER BY timestamp DESC + LIMIT ? + `); + + const rows = stmt.all(conversationId, limit || 10); + return rows.reverse(); + } + + // 获取单个消息 + getMessageById(id) { + const stmt = this.db.prepare('SELECT * FROM messages WHERE id = ?'); + return stmt.get(id); + } + + // 更新消息 + updateMessage(id, updates) { + const allowedFields = ['content']; + const updateFields = Object.keys(updates).filter(key => allowedFields.includes(key)); + + if (updateFields.length === 0) { + return this.getMessageById(id); + } + + const setClause = updateFields.map(field => `${field} = @${field}`).join(', '); + const stmt = this.db.prepare(` + UPDATE messages + SET ${setClause} + WHERE id = @id + `); + + const params = { id }; + updateFields.forEach(field => { + params[field] = updates[field]; + }); + + stmt.run(params); + return this.getMessageById(id); + } + + // 获取消息总数 + getMessageCount() { + const stmt = this.db.prepare('SELECT COUNT(*) as count FROM messages'); + return stmt.get().count; + } + }; +} \ No newline at end of file diff --git a/desktop/src/db/modules/suggestion-config.js b/desktop/src/db/modules/suggestion-config.js new file mode 100644 index 0000000..8980bec --- /dev/null +++ b/desktop/src/db/modules/suggestion-config.js @@ -0,0 +1,147 @@ +export default function SuggestionConfigManager(BaseClass) { + return class extends BaseClass { + ensureSuggestionSchema() { + if (this._suggestionSchemaEnsured) return; + + const columns = this.db.prepare('PRAGMA table_info(suggestion_configs)').all(); + const names = columns.map((c) => c.name); + + const ensureColumn = (name, sql) => { + if (!names.includes(name)) { + this.db.prepare(sql).run(); + } + }; + + ensureColumn('topic_detection_enabled', 'ALTER TABLE suggestion_configs ADD COLUMN topic_detection_enabled INTEGER DEFAULT 0'); + ensureColumn('model_name', "ALTER TABLE suggestion_configs ADD COLUMN model_name TEXT DEFAULT 'gpt-4o-mini'"); + ensureColumn('situation_llm_enabled', 'ALTER TABLE suggestion_configs ADD COLUMN situation_llm_enabled INTEGER DEFAULT 0'); + ensureColumn('situation_model_name', "ALTER TABLE suggestion_configs ADD COLUMN situation_model_name TEXT DEFAULT 'gpt-4o-mini'"); + ensureColumn('thinking_enabled', 'ALTER TABLE suggestion_configs ADD COLUMN thinking_enabled INTEGER DEFAULT 0'); + + // 补齐已有行的默认值 + this.db + .prepare( + "UPDATE suggestion_configs SET topic_detection_enabled = COALESCE(topic_detection_enabled, 0), model_name = COALESCE(NULLIF(TRIM(model_name), ''), 'gpt-4o-mini'), situation_llm_enabled = COALESCE(situation_llm_enabled, 0), situation_model_name = COALESCE(NULLIF(TRIM(situation_model_name), ''), 'gpt-4o-mini'), thinking_enabled = COALESCE(thinking_enabled, 0)" + ) + .run(); + + this._suggestionSchemaEnsured = true; + } + + // 初始化默认建议配置 + seedDefaultSuggestionConfig() { + this.ensureSuggestionSchema(); + const existing = this.db.prepare('SELECT COUNT(*) as count FROM suggestion_configs').get().count; + if (existing > 0) { + return; + } + + const now = Date.now(); + const stmt = this.db.prepare(` + INSERT INTO suggestion_configs ( + id, + enable_passive_suggestion, + suggestion_count, + silence_threshold_seconds, + message_threshold_count, + cooldown_seconds, + context_message_limit, + topic_detection_enabled, + situation_llm_enabled, + model_name, + situation_model_name, + thinking_enabled, + created_at, + updated_at + ) VALUES ( + @id, + @enable_passive_suggestion, + @suggestion_count, + @silence_threshold_seconds, + @message_threshold_count, + @cooldown_seconds, + @context_message_limit, + @topic_detection_enabled, + @situation_llm_enabled, + @model_name, + @situation_model_name, + @thinking_enabled, + @created_at, + @updated_at + ) + `); + + stmt.run({ + id: 'default', + enable_passive_suggestion: 1, + suggestion_count: 3, + silence_threshold_seconds: 3, + message_threshold_count: 3, + cooldown_seconds: 15, + context_message_limit: 20, + topic_detection_enabled: 0, + situation_llm_enabled: 0, + model_name: 'gpt-4o-mini', + situation_model_name: 'gpt-4o-mini', + thinking_enabled: 0, + created_at: now, + updated_at: now + }); + } + + // 获取建议配置 + getSuggestionConfig() { + this.ensureSuggestionSchema(); + let config = this.db.prepare('SELECT * FROM suggestion_configs ORDER BY updated_at DESC LIMIT 1').get(); + if (!config) { + this.seedDefaultSuggestionConfig(); + config = this.db.prepare('SELECT * FROM suggestion_configs ORDER BY updated_at DESC LIMIT 1').get(); + } + return config; + } + + // 更新建议配置 + updateSuggestionConfig(updates) { + this.ensureSuggestionSchema(); + const current = this.getSuggestionConfig(); + if (!current) { + throw new Error('Suggestion config not found'); + } + + const fields = []; + const params = { id: current.id, updated_at: Date.now() }; + + const updatableFields = [ + 'enable_passive_suggestion', + 'suggestion_count', + 'silence_threshold_seconds', + 'message_threshold_count', + 'cooldown_seconds', + 'context_message_limit', + 'topic_detection_enabled', + 'model_name', + 'thinking_enabled' + ]; + + updatableFields.forEach((field) => { + if (updates[field] !== undefined && updates[field] !== null) { + fields.push(`${field} = @${field}`); + params[field] = updates[field]; + } + }); + + if (fields.length === 0) { + return current; + } + + const stmt = this.db.prepare(` + UPDATE suggestion_configs + SET ${fields.join(', ')}, updated_at = @updated_at + WHERE id = @id + `); + + stmt.run(params); + return this.getSuggestionConfig(); + } + }; +} diff --git a/desktop/src/db/modules/tag.js b/desktop/src/db/modules/tag.js new file mode 100644 index 0000000..4ef5561 --- /dev/null +++ b/desktop/src/db/modules/tag.js @@ -0,0 +1,43 @@ +export default function TagManager(BaseClass) { + return class extends BaseClass { + // 创建标签 + createTag(tagData) { + const stmt = this.db.prepare(` + INSERT OR IGNORE INTO tags (id, name, color) + VALUES (@id, @name, @color) + `); + + const id = tagData.id || this.generateId(); + + stmt.run({ + id, + name: tagData.name, + color: tagData.color || 'primary' + }); + + return this.getTagById(id); + } + + // 获取所有标签 + getAllTags() { + const stmt = this.db.prepare('SELECT * FROM tags ORDER BY name'); + return stmt.all(); + } + + // 获取单个标签 + getTagById(id) { + const stmt = this.db.prepare('SELECT * FROM tags WHERE id = ?'); + return stmt.get(id); + } + + // 为角色添加标签 + addTagToCharacter(characterId, tagId) { + const stmt = this.db.prepare(` + INSERT OR IGNORE INTO character_tags (character_id, tag_id) + VALUES (?, ?) + `); + + stmt.run(characterId, tagId); + } + }; +} diff --git a/desktop/src/db/modules/utils.js b/desktop/src/db/modules/utils.js new file mode 100644 index 0000000..ebfe153 --- /dev/null +++ b/desktop/src/db/modules/utils.js @@ -0,0 +1,350 @@ +import path from 'path'; +import fs from 'fs'; +import { fileURLToPath } from 'url'; + +// ESM 下补充 __dirname +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +export default function Utils(BaseClass) { + return class extends BaseClass { + // 生成ID + generateId() { + return Date.now().toString(36) + Math.random().toString(36).substr(2); + } + + // 获取统计数据 + getStatistics() { + const characterCount = this.db.prepare('SELECT COUNT(*) as count FROM characters').get().count; + const conversationCount = this.db.prepare('SELECT COUNT(*) as count FROM conversations').get().count; + const messageCount = this.db.prepare('SELECT COUNT(*) as count FROM messages').get().count; + + // 计算平均好感度 + const avgAffinity = this.db.prepare('SELECT AVG(affinity) as avg FROM characters').get().avg || 0; + + return { + characterCount, + conversationCount, + messageCount, + avgAffinity: Math.round(avgAffinity) + }; + } + + // 获取角色页面的统计数据 + getCharacterPageStatistics() { + // 总计攻略对象 + const characterCount = this.db.prepare('SELECT COUNT(*) as count FROM characters').get().count; + + // 活跃对话:两天内创建的新对话 + const twoDaysAgo = Date.now() - (2 * 24 * 60 * 60 * 1000); + const activeConversationCount = this.db.prepare(` + SELECT COUNT(*) as count + FROM conversations + WHERE created_at >= ? + `).get(twoDaysAgo).count; + + // 计算平均好感度 + const avgAffinity = this.db.prepare('SELECT AVG(affinity) as avg FROM characters').get().avg || 0; + + return { + characterCount, + activeConversationCount, + avgAffinity: Math.round(avgAffinity) + }; + } + + // 批量插入示例数据(从SQL文件加载) + seedSampleData() { + console.log('Seeding sample data...'); + + // 检查对话数据是否存在 + const conversationCount = this.db.prepare('SELECT COUNT(*) as count FROM conversations').get().count; + const characterCount = this.db.prepare('SELECT COUNT(*) as count FROM characters').get().count; + const aiAnalysisCount = this.db.prepare('SELECT COUNT(*) as count FROM ai_analysis').get().count; + + console.log(`Current database state: ${characterCount} characters, ${conversationCount} conversations, ${aiAnalysisCount} AI analyses`); + + // 如果对话数据已存在,检查是否需要插入AI分析数据 + if (conversationCount > 0) { + // 如果AI分析数据不存在,只插入AI分析相关的数据 + if (aiAnalysisCount === 0) { + console.log('Conversation data exists but AI analysis data missing, inserting AI analysis data only...'); + this.seedAIDataOnly(); + } else { + console.log(`Conversation data already exists (${aiAnalysisCount} AI analyses found), skipping seed...`); + // 即使有数据,也检查一下是否有分析报告数据 + const reportCount = this.db.prepare('SELECT COUNT(*) as count FROM ai_analysis WHERE insight_type = ?').get('analysis_report').count; + console.log(`Found ${reportCount} analysis reports in database`); + } + return; + } + + // 如果没有角色数据,需要先插入角色 + if (characterCount === 0) { + console.log('No characters found, will insert all data including characters'); + } else { + console.log('Characters exist, will only insert conversations and messages'); + } + + // 如果角色数据不存在,需要先插入角色数据 + const needCharacters = characterCount === 0; + + try { + // 读取并执行SQL种子文件 + const seedPath = path.join(__dirname, '../seed.sql'); + if (fs.existsSync(seedPath)) { + const seedSQL = fs.readFileSync(seedPath, 'utf-8'); + + // 改进SQL语句分割:先移除注释行,然后按分号分割 + const lines = seedSQL.split('\n'); + let cleanedLines = []; + let inMultiLineStatement = false; + let currentStatement = ''; + + for (let i = 0; i < lines.length; i++) { + let line = lines[i].trim(); + + // 跳过空行和纯注释行 + if (!line || line.startsWith('--')) { + continue; + } + + // 移除行内注释(-- 后面的内容) + const commentIndex = line.indexOf('--'); + if (commentIndex >= 0) { + line = line.substring(0, commentIndex).trim(); + if (!line) continue; + } + + // 累积到当前语句 + currentStatement += (currentStatement ? ' ' : '') + line; + + // 如果行以分号结尾,说明语句完整 + if (line.endsWith(';')) { + const statement = currentStatement.slice(0, -1).trim(); // 移除末尾的分号 + if (statement) { + cleanedLines.push(statement); + } + currentStatement = ''; + } + } + + // 处理最后可能没有分号的语句 + if (currentStatement.trim()) { + cleanedLines.push(currentStatement.trim()); + } + + console.log(`Found ${cleanedLines.length} SQL statements to execute`); + + const transaction = this.db.transaction(() => { + for (let i = 0; i < cleanedLines.length; i++) { + const statement = cleanedLines[i]; + + // 如果角色数据已存在,跳过角色相关的INSERT语句 + if (!needCharacters && statement.toUpperCase().includes('INSERT') && + (statement.includes('INSERT INTO characters') || + statement.includes('INSERT INTO tags') || + statement.includes('INSERT INTO character_tags'))) { + console.log(`Skipping statement ${i + 1}: character data (already exists)`); + continue; + } + + try { + // 执行SQL语句(添加分号) + this.db.exec(statement + ';'); + if (statement.includes('INSERT INTO conversations')) { + console.log(`✓ Executed conversation INSERT statement ${i + 1}`); + } + } catch (err) { + // 忽略重复插入的错误(INSERT OR IGNORE 会处理) + if (err.message.includes('UNIQUE constraint') || err.message.includes('already exists')) { + console.log(`Statement ${i + 1}: skipped (duplicate)`); + } else { + console.error(`Error executing statement ${i + 1}:`, err.message); + console.error('Statement preview:', statement.substring(0, 150) + '...'); + // 继续执行其他语句,不中断 + } + } + } + }); + + transaction(); + console.log('Sample data seeded successfully from SQL file'); + + // 验证数据插入 + const finalConvCount = this.db.prepare('SELECT COUNT(*) as count FROM conversations').get().count; + const finalMsgCount = this.db.prepare('SELECT COUNT(*) as count FROM messages').get().count; + const finalCharCount = this.db.prepare('SELECT COUNT(*) as count FROM characters').get().count; + console.log(`Data verification: ${finalCharCount} characters, ${finalConvCount} conversations, ${finalMsgCount} messages`); + + if (finalConvCount === 0) { + console.warn('⚠️ Warning: No conversations were inserted!'); + console.warn('This might indicate a SQL parsing or execution issue.'); + } else { + console.log('✅ Data seeding completed successfully'); + } + } else { + console.warn('Seed SQL file not found, skipping data seeding'); + } + } catch (error) { + console.error('Error seeding sample data:', error); + console.error(error.stack); + // 不抛出错误,允许应用继续运行 + } + } + + // 只插入AI分析数据(当对话数据已存在但AI分析数据缺失时) + seedAIDataOnly() { + console.log('Seeding AI analysis data only...'); + + try { + const seedPath = path.join(__dirname, '../seed.sql'); + if (!fs.existsSync(seedPath)) { + console.warn('Seed SQL file not found, skipping AI data seeding'); + return; + } + + const seedSQL = fs.readFileSync(seedPath, 'utf-8'); + const lines = seedSQL.split('\n'); + let cleanedLines = []; + let currentStatement = ''; + + for (let i = 0; i < lines.length; i++) { + let line = lines[i]; + const originalLine = line; + + // 跳过空行 + if (!line.trim()) { + continue; + } + + // 跳过纯注释行(整行都是注释) + // 但如果currentStatement已经有内容,说明这是多行语句中的注释,应该跳过但不清空currentStatement + if (line.trim().startsWith('--')) { + continue; // 跳过注释行,但保留currentStatement + } + + // 移除行内注释(但保留SQL代码) + const commentIndex = line.indexOf('--'); + if (commentIndex >= 0) { + // 检查--是否在字符串内(简单检查) + const beforeComment = line.substring(0, commentIndex); + const singleQuotes = (beforeComment.match(/'/g) || []).length; + // 如果单引号数量是偶数,说明--不在字符串内,可以移除注释 + if (singleQuotes % 2 === 0) { + line = line.substring(0, commentIndex).trim(); + if (!line) continue; + } + } + + line = line.trim(); + if (!line) continue; + + // 累积到当前语句 + if (currentStatement) { + currentStatement += ' ' + line; + } else { + currentStatement = line; + } + + // 如果行以分号结尾,说明语句完整 + if (line.endsWith(';')) { + const statement = currentStatement.slice(0, -1).trim(); // 移除末尾的分号 + if (statement) { + // 只处理AI分析相关的INSERT语句 + const upperStatement = statement.toUpperCase(); + const isAIAnalysis = upperStatement.includes('INSERT') && upperStatement.includes('AI_ANALYSIS'); + const isAISuggestions = upperStatement.includes('INSERT') && upperStatement.includes('AI_SUGGESTIONS'); + + if (isAIAnalysis || isAISuggestions) { + cleanedLines.push(statement); + console.log(`[SQL Parser] Found AI statement (line ${i + 1}): ${statement.substring(0, 150)}...`); + } + } + currentStatement = ''; + } + } + + if (currentStatement.trim()) { + const statement = currentStatement.trim(); + const upperStatement = statement.toUpperCase(); + if (upperStatement.includes('INSERT') && + (upperStatement.includes('INSERT INTO AI_ANALYSIS') || + upperStatement.includes('INSERT INTO AI_SUGGESTIONS'))) { + cleanedLines.push(statement); + console.log(`[SQL Parser] Found AI statement (final): ${statement.substring(0, 100)}...`); + } + } + + console.log(`Found ${cleanedLines.length} AI-related SQL statements to execute`); + + // 如果没找到,打印一些调试信息 + if (cleanedLines.length === 0) { + console.log('[SQL Parser] Debug: Checking seed.sql content...'); + const seedSQL = fs.readFileSync(seedPath, 'utf-8'); + const hasAIAnalysis = seedSQL.includes('INSERT') && seedSQL.includes('ai_analysis'); + const hasAISuggestions = seedSQL.includes('INSERT') && seedSQL.includes('ai_suggestions'); + console.log(`[SQL Parser] seed.sql contains ai_analysis: ${hasAIAnalysis}, ai_suggestions: ${hasAISuggestions}`); + + // 尝试直接查找包含ai_analysis的行 + const lines = seedSQL.split('\n'); + let aiAnalysisLines = 0; + let aiSuggestionLines = 0; + for (let i = 0; i < lines.length; i++) { + if (lines[i].includes('ai_analysis')) aiAnalysisLines++; + if (lines[i].includes('ai_suggestions')) aiSuggestionLines++; + } + console.log(`[SQL Parser] Lines containing ai_analysis: ${aiAnalysisLines}, ai_suggestions: ${aiSuggestionLines}`); + } + + if (cleanedLines.length === 0) { + console.log('No AI analysis data found in seed file'); + return; + } + + // 打印前几个语句用于调试 + if (cleanedLines.length > 0) { + console.log('First statement preview:', cleanedLines[0].substring(0, 200) + '...'); + } + + const transaction = this.db.transaction(() => { + let successCount = 0; + let errorCount = 0; + for (let i = 0; i < cleanedLines.length; i++) { + const statement = cleanedLines[i]; + try { + this.db.exec(statement + ';'); + successCount++; + if (statement.includes('INSERT INTO ai_analysis')) { + console.log(`✓ Executed AI analysis INSERT statement ${i + 1}/${cleanedLines.length}`); + } else if (statement.includes('INSERT INTO ai_suggestions')) { + console.log(`✓ Executed AI suggestion INSERT statement ${i + 1}/${cleanedLines.length}`); + } + } catch (err) { + errorCount++; + if (err.message.includes('UNIQUE constraint') || err.message.includes('already exists')) { + console.log(`Statement ${i + 1}: skipped (duplicate)`); + } else { + console.error(`Error executing AI statement ${i + 1}:`, err.message); + console.error('Statement preview:', statement.substring(0, 200) + '...'); + } + } + } + console.log(`AI data insertion summary: ${successCount} succeeded, ${errorCount} errors`); + }); + + transaction(); + + // 验证数据插入 + const finalAICount = this.db.prepare('SELECT COUNT(*) as count FROM ai_analysis').get().count; + const finalSuggestionCount = this.db.prepare('SELECT COUNT(*) as count FROM ai_suggestions').get().count; + console.log(`AI data verification: ${finalAICount} AI analyses, ${finalSuggestionCount} AI suggestions`); + console.log('✅ AI analysis data seeding completed successfully'); + + } catch (error) { + console.error('Error seeding AI analysis data:', error); + console.error(error.stack); + } + } + }; +} \ No newline at end of file diff --git a/desktop/src/db/schema-base.sql b/desktop/src/db/schema-base.sql new file mode 100644 index 0000000..18a3d1c --- /dev/null +++ b/desktop/src/db/schema-base.sql @@ -0,0 +1,214 @@ +-- LiveGalGame Database Base Schema +-- 基础版本,不包含新的 decision_points 和 suggestion_batches 表 +-- 用于向后兼容旧数据库 + +-- 攻略对象表 +CREATE TABLE IF NOT EXISTS characters ( + id TEXT PRIMARY KEY, + name TEXT NOT NULL, + nickname TEXT, + relationship_label TEXT, -- 关系标签(青梅竹马、学生会长等) + avatar_color TEXT, -- 头像颜色 + affinity INTEGER DEFAULT 50, -- 好感度(0-100) + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + notes TEXT -- 备注 +); + +-- 关键词标签表(用于角色特点) +CREATE TABLE IF NOT EXISTS tags ( + id TEXT PRIMARY KEY, + name TEXT NOT NULL UNIQUE, + color TEXT DEFAULT 'primary' -- 标签颜色 +); + +-- 角色-标签关联表 +CREATE TABLE IF NOT EXISTS character_tags ( + character_id TEXT NOT NULL, + tag_id TEXT NOT NULL, + PRIMARY KEY (character_id, tag_id), + FOREIGN KEY (character_id) REFERENCES characters(id) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES tags(id) ON DELETE CASCADE +); + +-- 对话记录表 +CREATE TABLE IF NOT EXISTS conversations ( + id TEXT PRIMARY KEY, + character_id TEXT NOT NULL, + title TEXT, -- 对话标题 + date INTEGER NOT NULL, -- 对话日期(时间戳) + affinity_change INTEGER DEFAULT 0, -- 好感度变化 + summary TEXT, -- 对话摘要 + tags TEXT, -- 标签(逗号分隔) + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + FOREIGN KEY (character_id) REFERENCES characters(id) ON DELETE CASCADE +); + +-- 消息记录表 +CREATE TABLE IF NOT EXISTS messages ( + id TEXT PRIMARY KEY, + conversation_id TEXT NOT NULL, + sender TEXT NOT NULL, -- 'user' or 'character' + content TEXT NOT NULL, + timestamp INTEGER NOT NULL, + is_ai_generated INTEGER DEFAULT 0, -- 是否AI生成 + FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE +); + +-- AI分析记录表 +CREATE TABLE IF NOT EXISTS ai_analysis ( + id TEXT PRIMARY KEY, + conversation_id TEXT NOT NULL, + message_id TEXT, -- 关联的具体消息(可选) + insight_type TEXT, -- 洞察类型(情感分析、建议等) + content TEXT NOT NULL, -- 分析内容 + created_at INTEGER NOT NULL, + FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE, + FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE +); + +-- AI建议记录表(基础版本,兼容旧数据库) +CREATE TABLE IF NOT EXISTS ai_suggestions ( + id TEXT PRIMARY KEY, + conversation_id TEXT NOT NULL, + message_id TEXT, -- 关联的具体消息(可选) + title TEXT NOT NULL, + content TEXT NOT NULL, + affinity_prediction INTEGER, -- 好感度变化预测 + tags TEXT, -- 标签(逗号分隔) + is_selected INTEGER DEFAULT 0, -- 用户显式选择(用于“我采用了哪个建议”) + selected_at INTEGER, -- 选择时间戳(毫秒) + created_at INTEGER NOT NULL, + FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE, + FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE +); + +-- 角色详细信息表(从会话中总结) +CREATE TABLE IF NOT EXISTS character_details ( + character_id TEXT PRIMARY KEY, + profile TEXT, -- JSON格式:角色档案(基本信息、背景等) + personality_traits TEXT, -- JSON格式:性格特点(从对话中总结) + likes_dislikes TEXT, -- JSON格式:喜好厌恶(从对话中提取) + important_events TEXT, -- JSON格式:重要事件(从对话中提取) + conversation_summary TEXT, -- 对话总结 + custom_fields TEXT, -- JSON格式:自定义字段(可扩展) + updated_at INTEGER NOT NULL, + FOREIGN KEY (character_id) REFERENCES characters(id) ON DELETE CASCADE +); + +-- LLM配置表 +CREATE TABLE IF NOT EXISTS llm_configs ( + id TEXT PRIMARY KEY, + name TEXT NOT NULL, -- 配置名称 + api_key TEXT NOT NULL, -- API密钥 + base_url TEXT, -- API基础URL(可选,默认使用提供商的标准URL) + model_name TEXT NOT NULL DEFAULT 'gpt-4o-mini', -- 模型名称 + timeout_ms INTEGER, -- 请求超时(毫秒,可选) + is_default INTEGER DEFAULT 0, -- 是否为默认配置 + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL +); + +-- LLM 功能绑定表(为特定功能选择指定的 LLM 配置;未配置则回落到默认 LLM) +CREATE TABLE IF NOT EXISTS llm_feature_configs ( + feature TEXT PRIMARY KEY, -- 功能标识,如 suggestion / situation / review + llm_config_id TEXT, -- 绑定的 llm_configs.id + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + FOREIGN KEY (llm_config_id) REFERENCES llm_configs(id) ON DELETE SET NULL +); + +-- 对话建议配置表 +CREATE TABLE IF NOT EXISTS suggestion_configs ( + id TEXT PRIMARY KEY, + enable_passive_suggestion INTEGER DEFAULT 1, + suggestion_count INTEGER DEFAULT 3, + silence_threshold_seconds INTEGER DEFAULT 3, + message_threshold_count INTEGER DEFAULT 3, + cooldown_seconds INTEGER DEFAULT 30, + context_message_limit INTEGER DEFAULT 10, + topic_detection_enabled INTEGER DEFAULT 0, + situation_llm_enabled INTEGER DEFAULT 0, + model_name TEXT DEFAULT 'gpt-4o-mini', + situation_model_name TEXT DEFAULT 'gpt-4o-mini', + thinking_enabled INTEGER DEFAULT 0, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL +); + +-- ==================== ASR(语音识别)相关表 ==================== + +-- 音频源配置表(两个音源:speaker1, speaker2) +CREATE TABLE IF NOT EXISTS audio_sources ( + id TEXT PRIMARY KEY, + name TEXT NOT NULL, -- 音源名称(Speaker 1 / Speaker 2) + is_active INTEGER DEFAULT 0, -- 是否启用 + device_id TEXT, -- 设备ID(系统音频设备标识) + device_name TEXT, -- 设备名称(显示用) + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL +); + +-- 语音识别记录表 +CREATE TABLE IF NOT EXISTS speech_recognition_records ( + id TEXT PRIMARY KEY, + conversation_id TEXT NOT NULL, -- 关联的对话 + source_id TEXT NOT NULL, -- 音频源ID + message_id TEXT, -- 关联的消息(识别完成后关联) + audio_data BLOB, -- 音频数据(可选,用于回放,如果 retain_audio_files=0 则为 NULL) + audio_file_path TEXT, -- 音频文件路径(如果 retain_audio_files=1) + audio_duration REAL, -- 音频时长(秒) + recognized_text TEXT, -- 识别结果 + confidence REAL, -- 置信度 + start_time INTEGER NOT NULL, -- 识别开始时间 + end_time INTEGER, -- 识别结束时间 + status TEXT NOT NULL, -- 状态:'recording', 'recognizing', 'completed', 'failed' + error_message TEXT, -- 错误信息 + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE, + FOREIGN KEY (source_id) REFERENCES audio_sources(id) ON DELETE CASCADE, + FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE +); + +-- ASR 配置表 +CREATE TABLE IF NOT EXISTS asr_configs ( + id TEXT PRIMARY KEY, + model_name TEXT NOT NULL, -- 模型名称(whisper-tiny/whisper-base 等) + language TEXT NOT NULL DEFAULT 'zh', -- 识别语言 + enable_vad INTEGER DEFAULT 1, -- 是否启用 VAD + sentence_pause_threshold REAL DEFAULT 1.0, -- 分句停顿阈值(秒) + retain_audio_files INTEGER DEFAULT 0, -- 是否保留录音文件(0: 不保留,1: 保留) + audio_retention_days INTEGER DEFAULT 30, -- 录音文件保留天数 + audio_storage_path TEXT, -- 录音文件存储路径(为空则使用默认路径) + is_default INTEGER DEFAULT 0, -- 是否为默认配置 + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL +); + +-- 索引优化 +CREATE INDEX IF NOT EXISTS idx_conversations_character_id ON conversations(character_id); +CREATE INDEX IF NOT EXISTS idx_conversations_date ON conversations(date); +CREATE INDEX IF NOT EXISTS idx_messages_conversation_id ON messages(conversation_id); +CREATE INDEX IF NOT EXISTS idx_messages_timestamp ON messages(timestamp); +CREATE INDEX IF NOT EXISTS idx_character_tags_character_id ON character_tags(character_id); +CREATE INDEX IF NOT EXISTS idx_ai_analysis_conversation_id ON ai_analysis(conversation_id); +CREATE INDEX IF NOT EXISTS idx_ai_suggestions_conversation_id ON ai_suggestions(conversation_id); + +-- 索引优化 +CREATE INDEX IF NOT EXISTS idx_speech_records_conversation_id ON speech_recognition_records(conversation_id); +CREATE INDEX IF NOT EXISTS idx_speech_records_source_id ON speech_recognition_records(source_id); +CREATE INDEX IF NOT EXISTS idx_speech_records_status ON speech_recognition_records(status); +CREATE INDEX IF NOT EXISTS idx_speech_records_created_at ON speech_recognition_records(created_at); +CREATE INDEX IF NOT EXISTS idx_asr_configs_is_default ON asr_configs(is_default); + +-- 剧情复盘表 +CREATE TABLE IF NOT EXISTS conversation_reviews ( + id TEXT PRIMARY KEY, + conversation_id TEXT NOT NULL, + review_data TEXT NOT NULL, -- TOON 格式解析后存为 JSON + created_at INTEGER NOT NULL, + model_used TEXT, + FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE +); diff --git a/desktop/src/db/schema.sql b/desktop/src/db/schema.sql index 653b94e..ae19b00 100644 --- a/desktop/src/db/schema.sql +++ b/desktop/src/db/schema.sql @@ -71,16 +71,40 @@ CREATE TABLE IF NOT EXISTS ai_suggestions ( id TEXT PRIMARY KEY, conversation_id TEXT NOT NULL, message_id TEXT, -- 关联的具体消息(可选) + decision_point_id TEXT, -- 关联的决策点(同一决策点可有多批建议) + batch_id TEXT, -- 关联的建议批次(一次生成/换一批) + suggestion_index INTEGER, -- 批次内序号(0..n-1) title TEXT NOT NULL, content TEXT NOT NULL, affinity_prediction INTEGER, -- 好感度变化预测 tags TEXT, -- 标签(逗号分隔) - is_used INTEGER DEFAULT 0, -- 是否被采用 + is_selected INTEGER DEFAULT 0, -- 用户显式选择(用于“我采用了哪个建议”) + selected_at INTEGER, -- 选择时间戳(毫秒) created_at INTEGER NOT NULL, FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE, FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE ); +-- 决策点表:代表“用户需要决定下一步怎么回复”的关键时刻 +CREATE TABLE IF NOT EXISTS decision_points ( + id TEXT PRIMARY KEY, + conversation_id TEXT NOT NULL, + anchor_message_id TEXT, -- 锚点消息(触发时上下文的最后一条消息) + created_at INTEGER NOT NULL, + FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE, + FOREIGN KEY (anchor_message_id) REFERENCES messages(id) ON DELETE SET NULL +); + +-- 建议批次表:一次生成/换一批 = 一个批次 +CREATE TABLE IF NOT EXISTS suggestion_batches ( + id TEXT PRIMARY KEY, + decision_point_id TEXT NOT NULL, + trigger TEXT, -- manual/passive 等 + reason TEXT, -- manual/refresh/silence/topic_change 等 + created_at INTEGER NOT NULL, + FOREIGN KEY (decision_point_id) REFERENCES decision_points(id) ON DELETE CASCADE +); + -- 角色详细信息表(从会话中总结) CREATE TABLE IF NOT EXISTS character_details ( character_id TEXT PRIMARY KEY, @@ -98,14 +122,41 @@ CREATE TABLE IF NOT EXISTS character_details ( CREATE TABLE IF NOT EXISTS llm_configs ( id TEXT PRIMARY KEY, name TEXT NOT NULL, -- 配置名称 - provider TEXT NOT NULL DEFAULT 'openai', -- 提供商(openai等) api_key TEXT NOT NULL, -- API密钥 base_url TEXT, -- API基础URL(可选,默认使用提供商的标准URL) + model_name TEXT NOT NULL DEFAULT 'gpt-4o-mini', -- 模型名称 + timeout_ms INTEGER, -- 请求超时(毫秒,可选) is_default INTEGER DEFAULT 0, -- 是否为默认配置 created_at INTEGER NOT NULL, updated_at INTEGER NOT NULL ); +-- LLM 功能绑定表(为特定功能选择指定的 LLM 配置;未配置则回落到默认 LLM) +CREATE TABLE IF NOT EXISTS llm_feature_configs ( + feature TEXT PRIMARY KEY, -- 功能标识,如 suggestion / situation / review + llm_config_id TEXT, -- 绑定的 llm_configs.id + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + FOREIGN KEY (llm_config_id) REFERENCES llm_configs(id) ON DELETE SET NULL +); + +-- 对话建议配置表 +CREATE TABLE IF NOT EXISTS suggestion_configs ( + id TEXT PRIMARY KEY, + enable_passive_suggestion INTEGER DEFAULT 1, + suggestion_count INTEGER DEFAULT 3, + silence_threshold_seconds INTEGER DEFAULT 3, + message_threshold_count INTEGER DEFAULT 3, + cooldown_seconds INTEGER DEFAULT 30, + context_message_limit INTEGER DEFAULT 10, + topic_detection_enabled INTEGER DEFAULT 0, + situation_llm_enabled INTEGER DEFAULT 0, + model_name TEXT DEFAULT 'gpt-4o-mini', + situation_model_name TEXT DEFAULT 'gpt-4o-mini', + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL +); + -- 索引优化 CREATE INDEX IF NOT EXISTS idx_conversations_character_id ON conversations(character_id); CREATE INDEX IF NOT EXISTS idx_conversations_date ON conversations(date); @@ -114,6 +165,10 @@ CREATE INDEX IF NOT EXISTS idx_messages_timestamp ON messages(timestamp); CREATE INDEX IF NOT EXISTS idx_character_tags_character_id ON character_tags(character_id); CREATE INDEX IF NOT EXISTS idx_ai_analysis_conversation_id ON ai_analysis(conversation_id); CREATE INDEX IF NOT EXISTS idx_ai_suggestions_conversation_id ON ai_suggestions(conversation_id); +CREATE INDEX IF NOT EXISTS idx_ai_suggestions_decision_point_id ON ai_suggestions(decision_point_id); +CREATE INDEX IF NOT EXISTS idx_ai_suggestions_batch_id ON ai_suggestions(batch_id); +CREATE INDEX IF NOT EXISTS idx_decision_points_conversation_id ON decision_points(conversation_id); +CREATE INDEX IF NOT EXISTS idx_suggestion_batches_decision_point_id ON suggestion_batches(decision_point_id); CREATE INDEX IF NOT EXISTS idx_character_details_character_id ON character_details(character_id); CREATE INDEX IF NOT EXISTS idx_llm_configs_is_default ON llm_configs(is_default); @@ -173,3 +228,13 @@ CREATE INDEX IF NOT EXISTS idx_speech_records_source_id ON speech_recognition_re CREATE INDEX IF NOT EXISTS idx_speech_records_status ON speech_recognition_records(status); CREATE INDEX IF NOT EXISTS idx_speech_records_created_at ON speech_recognition_records(created_at); CREATE INDEX IF NOT EXISTS idx_asr_configs_is_default ON asr_configs(is_default); + +-- 剧情复盘表 +CREATE TABLE IF NOT EXISTS conversation_reviews ( + id TEXT PRIMARY KEY, + conversation_id TEXT NOT NULL, + review_data TEXT NOT NULL, -- TOON 格式解析后存为 JSON + created_at INTEGER NOT NULL, + model_used TEXT, + FOREIGN KEY (conversation_id) REFERENCES conversations(id) ON DELETE CASCADE +); diff --git a/desktop/src/db/seed.sql b/desktop/src/db/seed.sql index 026dd13..8dd6b11 100644 --- a/desktop/src/db/seed.sql +++ b/desktop/src/db/seed.sql @@ -81,11 +81,11 @@ INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, ('msg_hana_3_3', 'conv_hana_3', 'character', '那我们以后可以经常一起来。', 1735689804000, 0); -- 插入AI建议数据 -INSERT OR IGNORE INTO ai_suggestions (id, conversation_id, message_id, title, content, affinity_prediction, tags, is_used, created_at) VALUES -('sugg_miyu_1', 'conv_miyu_1', 'msg_miyu_1_2', '提议具体地点', '我知道附近有个很棒的公园,樱花特别美,要不要去那里?', 15, '主动,体贴', 1, 1735689603000), -('sugg_miyu_2', 'conv_miyu_1', 'msg_miyu_1_2', '表达期待', '太好了!我一直想和你一起去散步呢。', 10, '情感,真诚', 0, 1735689602000), -('sugg_akira_1', 'conv_akira_2', 'msg_akira_2_2', '主动配合', '好的,我们一起看看哪些地方可以改进。', 8, '合作,积极', 1, 1735689702000), -('sugg_hana_1', 'conv_hana_2', 'msg_hana_2_2', '表达兴趣', '太好了!我也正好想找新书看。', 12, '兴趣,共鸣', 1, 1735689703000); +INSERT OR IGNORE INTO ai_suggestions (id, conversation_id, message_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, tags, created_at) VALUES +('sugg_miyu_1', 'conv_miyu_1', 'msg_miyu_1_2', NULL, NULL, NULL, '提议具体地点', '我知道附近有个很棒的公园,樱花特别美,要不要去那里?', 15, '主动,体贴', 1735689603000), +('sugg_miyu_2', 'conv_miyu_1', 'msg_miyu_1_2', NULL, NULL, NULL, '表达期待', '太好了!我一直想和你一起去散步呢。', 10, '情感,真诚', 1735689602000), +('sugg_akira_1', 'conv_akira_2', 'msg_akira_2_2', NULL, NULL, NULL, '主动配合', '好的,我们一起看看哪些地方可以改进。', 8, '合作,积极', 1735689702000), +('sugg_hana_1', 'conv_hana_2', 'msg_hana_2_2', NULL, NULL, NULL, '表达兴趣', '太好了!我也正好想找新书看。', 12, '兴趣,共鸣', 1735689703000); -- 插入AI分析报告数据 INSERT OR IGNORE INTO ai_analysis (id, conversation_id, message_id, insight_type, content, created_at) VALUES @@ -143,14 +143,630 @@ INSERT OR IGNORE INTO ai_analysis (id, conversation_id, message_id, insight_type ('attitude_hana_3', 'conv_hana_3', NULL, 'attitude_analysis', '{"description":"对方在安静的环境中表现出放松和舒适,愿意一起度过安静的时光。虽然对话简短,但态度友好,愿意继续接触。","affinityChange":7,"trend":"上升"}', 1735689804000); -- 插入行动建议数据(可以尝试的话题和避开的话题) -INSERT OR IGNORE INTO ai_suggestions (id, conversation_id, message_id, title, content, affinity_prediction, tags, is_used, created_at) VALUES +INSERT OR IGNORE INTO ai_suggestions (id, conversation_id, message_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, tags, created_at) VALUES -- Miyu 的行动建议 -('action_miyu_1', 'conv_miyu_1', NULL, '可以尝试的话题', '最近读过的书、喜欢的音乐风格、户外活动、美食推荐', NULL, '可以尝试,话题', 0, 1735689604000), -('action_miyu_2', 'conv_miyu_1', NULL, '避开的话题', '过于功利的现实问题、工作压力、负面情绪', NULL, '避开,话题', 0, 1735689604000), +('action_miyu_1', 'conv_miyu_1', NULL, NULL, NULL, NULL, '可以尝试的话题', '最近读过的书、喜欢的音乐风格、户外活动、美食推荐', NULL, '可以尝试,话题', 1735689604000), +('action_miyu_2', 'conv_miyu_1', NULL, NULL, NULL, NULL, '避开的话题', '过于功利的现实问题、工作压力、负面情绪', NULL, '避开,话题', 1735689604000), -- Akira 的行动建议 -('action_akira_1', 'conv_akira_1', NULL, '可以尝试的话题', '学习计划、未来规划、兴趣爱好、共同目标', NULL, '可以尝试,话题', 0, 1735689604000), -('action_akira_2', 'conv_akira_1', NULL, '避开的话题', '过于私人的问题、负面评价、抱怨', NULL, '避开,话题', 0, 1735689604000), +('action_akira_1', 'conv_akira_1', NULL, NULL, NULL, NULL, '可以尝试的话题', '学习计划、未来规划、兴趣爱好、共同目标', NULL, '可以尝试,话题', 1735689604000), +('action_akira_2', 'conv_akira_1', NULL, NULL, NULL, NULL, '避开的话题', '过于私人的问题、负面评价、抱怨', NULL, '避开,话题', 1735689604000), -- Hana 的行动建议 -('action_hana_1', 'conv_hana_1', NULL, '可以尝试的话题', '最近读过的书、喜欢的音乐风格、文学作品、安静的活动', NULL, '可以尝试,话题', 0, 1735689604000), -('action_hana_2', 'conv_hana_1', NULL, '避开的话题', '过于功利的现实问题、嘈杂的环境、过于活跃的话题', NULL, '避开,话题', 0, 1735689604000); +('action_hana_1', 'conv_hana_1', NULL, NULL, NULL, NULL, '可以尝试的话题', '最近读过的书、喜欢的音乐风格、文学作品、安静的活动', NULL, '可以尝试,话题', 1735689604000), +('action_hana_2', 'conv_hana_1', NULL, NULL, NULL, NULL, '避开的话题', '过于功利的现实问题、嘈杂的环境、过于活跃的话题', NULL, '避开,话题', 1735689604000); + +-- 插入线上场景的复杂对话数据(面向宅男用户) + +-- === 游戏开黑场景 === + +-- 1. Miyu - MOBA游戏开黑(好感度60,熟悉阶段)- 长对话35回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_miyu_game_1', 'miyu', '周末的LOL开黑时光', 1735776000000, 15, '一起打LOL排位赛,配合默契,聊了很多游戏话题', '游戏,开黑,MOBA,配合,愉快', 1735776000000, 1735776000000); + +-- 插入35条消息数据 +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_miyu_g1_1', 'conv_miyu_game_1', 'character', '今晚有空吗?要不要一起打排位?', 1735776000000, 0), +('msg_miyu_g1_2', 'conv_miyu_game_1', 'user', '好啊!我刚好也想打LOL了', 1735776000100, 0), +('msg_miyu_g1_3', 'conv_miyu_game_1', 'character', '太好了!我最近在练新英雄,阿卡丽超帅的!', 1735776000200, 0), +('msg_miyu_g1_4', 'conv_miyu_game_1', 'user', '阿卡丽确实很强,你玩得怎么样?', 1735776000300, 0), +('msg_miyu_g1_5', 'conv_miyu_game_1', 'character', '还行吧,就是有时候切入时机把握不好,你来打野吧,我们配合', 1735776000400, 0), +('msg_miyu_g1_6', 'conv_miyu_game_1', 'user', '没问题,我玩蔚,前期帮你抓中', 1735776000500, 0), +('msg_miyu_g1_7', 'conv_miyu_game_1', 'character', '好耶!那我稳住发育,等你三级来', 1735776000600, 0), +('msg_miyu_g1_8', 'conv_miyu_game_1', 'character', '对了,你最近看什么新番了吗?', 1735776000700, 0), +('msg_miyu_g1_9', 'conv_miyu_game_1', 'user', '看了《葬送的芙莉莲》,剧情很棒', 1735776000800, 0), +('msg_miyu_g1_10', 'conv_miyu_game_1', 'character', '啊啊啊我也在看!芙莉莲太可爱了,特别是她不懂感情的样子', 1735776000900, 0), +('msg_miyu_g1_11', 'conv_miyu_game_1', 'user', '是啊,那种反差萌真的很戳我', 1735776001000, 0), +('msg_miyu_g1_12', 'conv_miyu_game_1', 'character', '哈哈,我懂!不过先不说了,游戏开始了', 1735776001100, 0), +('msg_miyu_g1_13', 'conv_miyu_game_1', 'character', '哇,对方是亚索,我要被秀了', 1735776001200, 0), +('msg_miyu_g1_14', 'conv_miyu_game_1', 'user', '没事,我刷完红buff就来帮你', 1735776001300, 0), +('msg_miyu_g1_15', 'conv_miyu_game_1', 'character', 'okk,我尽量控线', 1735776001400, 0), +('msg_miyu_g1_16', 'conv_miyu_game_1', 'character', '救命!对面打野来抓我了!', 1735776001500, 0), +('msg_miyu_g1_17', 'conv_miyu_game_1', 'user', '来了来了!反打!', 1735776001600, 0), +('msg_miyu_g1_18', 'conv_miyu_game_1', 'character', 'nice!双杀!你操作好秀!', 1735776001700, 0), +('msg_miyu_g1_19', 'conv_miyu_game_1', 'user', '哈哈,是你控的好', 1735776001800, 0), +('msg_miyu_g1_20', 'conv_miyu_game_1', 'character', '这波配合满分!', 1735776001900, 0), +('msg_miyu_g1_21', 'conv_miyu_game_1', 'character', '我们去下路游走一波吧', 1735776002000, 0), +('msg_miyu_g1_22', 'conv_miyu_game_1', 'user', '好,我先去控小龙', 1735776002100, 0), +('msg_miyu_g1_23', 'conv_miyu_game_1', 'character', 'ok,我去下路蹲着', 1735776002200, 0), +('msg_miyu_g1_24', 'conv_miyu_game_1', 'character', '下路打起来了!快来!', 1735776002300, 0), +('msg_miyu_g1_25', 'conv_miyu_game_1', 'user', '马上到!', 1735776002400, 0), +('msg_miyu_g1_26', 'conv_miyu_game_1', 'character', '这波团赢了!', 1735776002500, 0), +('msg_miyu_g1_27', 'conv_miyu_game_1', 'character', '我们去推中塔吧', 1735776002600, 0), +('msg_miyu_g1_28', 'conv_miyu_game_1', 'user', 'okk,我喊上单一起来', 1735776002700, 0), +('msg_miyu_g1_29', 'conv_miyu_game_1', 'character', 'nice,中塔推了', 1735776002800, 0), +('msg_miyu_g1_30', 'conv_miyu_game_1', 'character', '我们去大龙做视野', 1735776002900, 0), +('msg_miyu_g1_31', 'conv_miyu_game_1', 'user', '好,我扫描排眼', 1735776003000, 0), +('msg_miyu_g1_32', 'conv_miyu_game_1', 'character', '对面来了!准备开团!', 1735776003100, 0), +('msg_miyu_g1_33', 'conv_miyu_game_1', 'user', '我打先手!', 1735776003200, 0), +('msg_miyu_g1_34', 'conv_miyu_game_1', 'character', '赢了赢了!一波了!', 1735776003300, 0), +('msg_miyu_g1_35', 'conv_miyu_game_1', 'character', '哈哈,今晚配合超棒!下次继续!', 1735776003400, 0); + +-- 2. Akira - FPS游戏开黑(好感度45,破冰到熟悉阶段)- 中对话20回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_akira_game_1', 'akira', '战术分析:Valorant排位赛', 1735862400000, 12, '一起玩Valorant,讨论了战术配合和游戏策略', '游戏,FPS,战术,配合,认真', 1735862400000, 1735862400000); + +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_akira_g1_1', 'conv_akira_game_1', 'character', '要不要试试Valorant?我最近在练这个英雄', 1735862400000, 0), +('msg_akira_g1_2', 'conv_akira_game_1', 'user', '好啊,不过我玩得不太好', 1735862400100, 0), +('msg_akira_g1_3', 'conv_akira_game_1', 'character', '没关系,我们可以慢慢配合。我研究了一些战术', 1735862400200, 0), +('msg_akira_g1_4', 'conv_akira_game_1', 'user', '什么战术?', 1735862400300, 0), +('msg_akira_g1_5', 'conv_akira_game_1', 'character', '比如A点的烟雾弹投掷点位,我画了个图', 1735862400400, 0), +('msg_akira_g1_6', 'conv_akira_game_1', 'user', '看起来很专业啊', 1735862400500, 0), +('msg_akira_g1_7', 'conv_akira_game_1', 'character', '只是做了些准备。游戏中注意听我报点', 1735862400600, 0), +('msg_akira_g1_8', 'conv_akira_game_1', 'character', '对方在中路,小心点', 1735862400700, 0), +('msg_akira_g1_9', 'conv_akira_game_1', 'user', '收到,我去防守B点', 1735862400800, 0), +('msg_akira_g1_10', 'conv_akira_game_1', 'character', '好,我架住中路', 1735862400900, 0), +('msg_akira_g1_11', 'conv_akira_game_1', 'character', '他们rush B了!请求支援!', 1735862401000, 0), +('msg_akira_g1_12', 'conv_akira_game_1', 'user', '我马上回防!', 1735862401100, 0), +('msg_akira_g1_13', 'conv_akira_game_1', 'character', 'nice,守住这波了', 1735862401200, 0), +('msg_akira_g1_14', 'conv_akira_game_1', 'user', '刚才那波配合得不错', 1735862401300, 0), +('msg_akira_g1_15', 'conv_akira_game_1', 'character', '是的,你的反应很快', 1735862401400, 0), +('msg_akira_g1_16', 'conv_akira_game_1', 'character', '下半场我们换个战术如何?', 1735862401500, 0), +('msg_akira_g1_17', 'conv_akira_game_1', 'user', '好啊,你有什么想法?', 1735862401600, 0), +('msg_akira_g1_18', 'conv_akira_game_1', 'character', '我们可以试试快攻战术,打他们个措手不及', 1735862401700, 0), +('msg_akira_g1_19', 'conv_akira_game_1', 'user', '听起来不错,我配合你', 1735862401800, 0), +('msg_akira_g1_20', 'conv_akira_game_1', 'character', '很好,那就这么定了', 1735862401900, 0); + +-- 3. Hana - 线上桌游(好感度80,亲密阶段)- 中对话18回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_hana_game_1', 'hana', '线上狼人杀游戏', 1735948800000, 8, '一起玩线上桌游,展现了Hana敏锐的观察力', '游戏,桌游,策略,安静,观察', 1735948800000, 1735948800000); + +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_hana_g1_1', 'conv_hana_game_1', 'character', '要玩线上狼人杀吗?我建了个房间', 1735948800000, 0), +('msg_hana_g1_2', 'conv_hana_game_1', 'user', '好啊,人多吗?', 1735948800100, 0), +('msg_hikla_g1_3', 'conv_hana_game_1', 'character', '七八个人,够玩了', 1735948800200, 0), +('msg_hana_g1_4', 'conv_hana_game_1', 'character', '我是预言家,昨晚查了3号,是狼人', 1735948800300, 0), +('msg_hana_g1_5', 'conv_hana_game_1', 'user', '真的吗?那这轮投3号', 1735948800400, 0), +('msg_hana_g1_6', 'conv_hana_game_1', 'character', '等等,我觉得2号也很可疑', 1735948800500, 0), +('msg_hana_g1_7', 'conv_hana_game_1', 'character', '他刚才发言的时候有些犹豫', 1735948800600, 0), +('msg_hana_g1_8', 'conv_hana_game_1', 'user', '你这么一说,确实有点怪', 1735948800700, 0), +('msg_hana_g1_9', 'conv_hana_game_1', 'character', '我们分票吧,投2号和3号', 1735948800800, 0), +('msg_hana_g1_10', 'conv_hana_game_1', 'character', '果然,2号是狼人,我们赢了', 1735948800900, 0), +('msg_hana_g1_11', 'conv_hana_game_1', 'user', '厉害啊,观察力好敏锐', 1735948801000, 0), +('msg_hana_g1_12', 'conv_hana_game_1', 'character', '只是比较注意细节而已', 1735948801100, 0), +('msg_hana_g1_13', 'conv_hana_game_1', 'character', '下一局你来当预言家吗?', 1735948801200, 0), +('msg_hana_g1_14', 'conv_hana_game_1', 'user', '好啊,我试试', 1735948801300, 0), +('msg_hana_g1_15', 'conv_hana_game_1', 'character', '别紧张,按照你的直觉来就好', 1735948801400, 0), +('msg_hana_g1_16', 'conv_hana_game_1', 'user', '嗯,我会加油的', 1735948801500, 0), +('msg_hana_g1_17', 'conv_hana_game_1', 'character', '我相信你', 1735948801600, 0), +('msg_hana_g1_18', 'conv_hana_game_1', 'character', '游戏结束后,要不要单独聊会儿?', 1735948801700, 0); + +-- === 连麦看电影场景 === + +-- 4. Miyu - 一起看番剧(好感度85,亲密阶段)- 长对话32回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_miyu_movie_1', 'miyu', '深夜番剧连麦:《电锯人》', 1736035200000, 18, '深夜连麦看番剧,讨论剧情和角色,关系更加亲密', '番剧,电锯人,深夜,讨论,亲密', 1736035200000, 1736035200000); + +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_miyu_m1_1', 'conv_miyu_movie_1', 'character', '今晚有安排吗?要不要一起连麦看电锯人新番?', 1736035200000, 0), +('msg_miyu_m1_2', 'conv_miyu_movie_1', 'user', '好啊!我正想看呢', 1736035200100, 0), +('msg_miyu_m1_3', 'conv_miyu_movie_1', 'character', '太好了!我已经准备好零食了', 1736035200200, 0), +('msg_miyu_m1_4', 'conv_miyu_movie_1', 'user', '哈哈,我也是,买了可乐和薯片', 1736035200300, 0), +('msg_miyu_m1_5', 'conv_miyu_movie_1', 'character', '那我们就开始吧!第一集好紧张', 1736035200400, 0), +('msg_miyu_m1_6', 'conv_miyu_movie_1', 'character', '哇,玛奇玛好漂亮!但是感觉不简单', 1736035200500, 0), +('msg_miyu_m1_7', 'conv_miyu_movie_1', 'user', '是啊,她肯定有什么秘密', 1736035200600, 0), +('msg_miyu_m1_8', 'conv_miyu_movie_1', 'character', '电次好惨啊,之前的生活太苦了', 1736035200700, 0), +('msg_miyu_m1_9', 'conv_miyu_movie_1', 'user', '但是他变成电锯人好帅!', 1736035200800, 0), +('msg_miyu_m1_10', 'conv_miyu_movie_1', 'character', '对对对!变身那段超燃!', 1736035200900, 0), +('msg_miyu_m1_11', 'conv_miyu_movie_1', 'character', '帕瓦好可爱啊,虽然脾气不好', 1736035201000, 0), +('msg_miyu_m1_12', 'conv_miyu_movie_1', 'user', '她那种反差萌很戳我', 1736035201100, 0), +('msg_miyu_m1_13', 'conv_miyu_movie_1', 'character', '我懂!傲娇属性赛高!', 1736035201200, 0), +('msg_miyu_m1_14', 'conv_miyu_movie_1', 'character', '哇,这个战斗场面好血腥', 1736035201300, 0), +('msg_miyu_m1_15', 'conv_miyu_movie_1', 'user', '藤本树的作品就是这样,很疯狂', 1736035201400, 0), +('msg_miyu_m1_16', 'conv_miyu_movie_1', 'character', '但是很有魅力不是吗?', 1736035201500, 0), +('msg_miyu_m1_17', 'conv_miyu_movie_1', 'user', '是的,剧情展开很出人意料', 1736035201600, 0), +('msg_miyu_m1_18', 'conv_miyu_movie_1', 'character', '玛奇玛对电次的态度好暧昧啊', 1736035201700, 0), +('msg_miyu_m1_19', 'conv_miyu_movie_1', 'user', '她肯定在利用电次吧', 1736035201800, 0), +('msg_miyu_m1_20', 'conv_miyu_movie_1', 'character', '有可能,但是她给电次的感觉太温暖了', 1736035201900, 0), +('msg_miyu_m1_21', 'conv_miyu_movie_1', 'character', '电次从小到大都没有被温柔对待过', 1736035202000, 0), +('msg_miyu_m1_22', 'conv_miyu_movie_1', 'user', '所以玛奇玛的出现对他很重要', 1736035202100, 0), +('msg_miyu_m1_23', 'conv_miyu_movie_1', 'character', '嗯嗯,这种心理描写很细腻', 1736035202200, 0), +('msg_miyu_m1_24', 'conv_miyu_movie_1', 'character', '要不要喝点水?我嗓子有点干', 1736035202300, 0), +('msg_miyu_m1_25', 'conv_miyu_movie_1', 'user', '哈哈,我也是,说了好多话', 1736035202400, 0), +('msg_miyu_m1_26', 'conv_miyu_movie_1', 'character', '下一集好像更刺激', 1736035202500, 0), +('msg_miyu_m1_27', 'conv_miyu_movie_1', 'user', '那我们继续看?', 1736035202600, 0), +('msg_miyu_m1_28', 'conv_miyu_movie_1', 'character', '当然!我今晚不睡了!', 1736035202700, 0), +('msg_miyu_m1_29', 'conv_miyu_movie_1', 'character', '有你陪我看番,好开心', 1736035202800, 0), +('msg_miyu_m1_30', 'conv_miyu_movie_1', 'user', '我也是,感觉距离更近了', 1736035202900, 0), +('msg_miyu_m1_31', 'conv_miyu_game_1', 'character', '嗯嗯,以后经常一起看番吧!', 1736035203000, 0), +('msg_miyu_m1_32', 'conv_miyu_movie_1', 'user', '好!说定了!', 1736035203100, 0); + +-- 5. Hana - 深夜电影讨论(好感度50,熟悉阶段)- 中对话22回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_hana_movie_1', 'hana', '深夜观影:《你的名字》', 1736121600000, 10, '深夜连麦看《你的名字》,Hana分享了很多细腻的情感体会', '电影,新海诚,深夜,情感,细腻', 1736121600000, 1736121600000); + +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_hana_m1_1', 'conv_hana_movie_1', 'character', '今晚...要不要一起看电影?', 1736121600000, 0), +('msg_hana_m1_2', 'conv_hana_movie_1', 'user', '好啊,看什么?', 1736121600100, 0), +('msg_hana_m1_3', 'conv_hana_movie_1', 'character', '《你的名字》可以吗?我想重温一下', 1736121600200, 0), +('msg_hana_m1_4', 'conv_hana_movie_1', 'user', '当然可以,经典作品', 1736121600300, 0), +('msg_hana_m1_5', 'conv_hana_movie_1', 'character', '嗯,新海诚的作品总是很触动我', 1736121600400, 0), +('msg_hana_m1_6', 'conv_hana_movie_1', 'character', '三叶和泷的相遇...真的很美好', 1736121600500, 0), +('msg_hana_m1_7', 'conv_hana_movie_1', 'user', '是啊,那种跨越时空的联系', 1736121600600, 0), +('msg_hana_m1_8', 'conv_hana_movie_1', 'character', '你相信...这种命运般的相遇吗?', 1736121600700, 0), +('msg_hana_m1_9', 'conv_hana_movie_1', 'user', '我相信,虽然很少见', 1736121600800, 0), +('msg_hana_m1_10', 'conv_hana_movie_1', 'character', '嗯...我也觉得', 1736121600900, 0), +('msg_hana_m1_11', 'conv_hana_movie_1', 'character', '电影中那种似曾相识的感觉...很奇妙', 1736121601000, 0), +('msg_hana_m1_12', 'conv_hana_movie_1', 'user', '我也有过类似的感觉', 1736121601100, 0), +('msg_hana_m1_13', 'conv_hana_movie_1', 'character', '真的吗?', 1736121601200, 0), +('msg_hana_m1_14', 'conv_hana_movie_1', 'user', '嗯,有时候会觉得某个场景好像经历过', 1736121601300, 0), +('msg_hana_m1_15', 'conv_hana_movie_1', 'character', '那种感觉...既陌生又熟悉', 1736121601400, 0), +('msg_hana_m1_16', 'conv_hana_movie_1', 'character', '也许我们也像三叶和泷一样...', 1736121601500, 0), +('msg_hana_m1_17', 'conv_hana_movie_1', 'user', '在寻找着某个人?', 1736121601600, 0), +('msg_hana_m1_18', 'conv_hana_movie_1', 'character', '嗯...', 1736121601700, 0), +('msg_hana_m1_19', 'conv_hana_movie_1', 'character', '电影结束了...但是感觉很温暖', 1736121601800, 0), +('msg_hana_m1_20', 'conv_hana_movie_1', 'user', '是啊,结局很治愈', 1736121601900, 0), +('msg_hana_m1_21', 'conv_hana_movie_1', 'character', '谢谢你陪我看电影', 1736121602000, 0), +('msg_hana_m1_22', 'conv_hana_movie_1', 'user', '我也很开心', 1736121602100, 0); + +-- === 深夜语音聊天场景 === + +-- 6. Miyu - 深夜情感电台(好感度90,亲密阶段)- 超长对话40回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_miyu_late_1', 'miyu', '凌晨3点的真心话', 1736284800000, 20, '深夜语音聊天,聊了很多真心话和童年回忆,关系更进一步', '深夜,真心话,回忆,亲密,信任', 1736284800000, 1736284800000); + +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_miyu_l1_1', 'conv_miyu_late_1', 'character', '睡了吗?', 1736284800000, 0), +('msg_miyu_l1_2', 'conv_miyu_late_1', 'user', '还没,怎么了?', 1736284800100, 0), +('msg_miyu_l1_3', 'conv_miyu_late_1', 'character', '有点睡不着...想找人聊聊天', 1736284800200, 0), +('msg_miyu_l1_4', 'conv_miyu_late_1', 'user', '好啊,我陪你', 1736284800300, 0), +('msg_miyu_l1_5', 'conv_miyu_late_1', 'character', '谢谢你...总是陪着我', 1736284800400, 0), +('msg_miyu_l1_6', 'conv_miyu_late_1', 'user', '我们是朋友嘛', 1736284800500, 0), +('msg_miyu_l1_7', 'conv_miyu_late_1', 'character', '只是朋友吗...', 1736284800600, 0), +('msg_miyu_l1_8', 'conv_miyu_late_1', 'user', '嗯?', 1736284800700, 0), +('msg_miyu_l1_9', 'conv_miyu_late_1', 'character', '没什么...', 1736284800800, 0), +('msg_miyu_l1_10', 'conv_miyu_late_1', 'character', '我小时候...其实很孤单', 1736284800900, 0), +('msg_miyu_l1_11', 'conv_miyu_late_1', 'user', '怎么了?', 1736284801000, 0), +('msg_miyu_l1_12', 'conv_miyu_late_1', 'character', '父母总是忙工作,经常一个人在家', 1736284801100, 0), +('msg_miyu_l1_13', 'conv_miyu_late_1', 'user', '那一定很寂寞吧', 1736284801200, 0), +('msg_miyu_l1_14', 'conv_miyu_late_1', 'character', '嗯...所以我很喜欢玩游戏', 1736284801300, 0), +('msg_miyu_l1_15', 'conv_miyu_late_1', 'character', '因为在游戏里,可以认识很多人', 1736284801400, 0), +('msg_miyu_l1_16', 'conv_miyu_late_1', 'user', '我也是,网上认识的朋友也很重要', 1736284801500, 0), +('msg_miyu_l1_17', 'conv_miyu_late_1', 'character', '嗯!特别是你...', 1736284801600, 0), +('msg_miyu_l1_18', 'conv_miyu_late_1', 'user', '我?', 1736284801700, 0), +('msg_miyu_l1_19', 'conv_miyu_late_1', 'character', '你总是陪着我...我很开心', 1736284801800, 0), +('msg_miyu_l1_20', 'conv_miyu_late_1', 'user', '我也很开心能认识你', 1736284801900, 0), +('msg_miyu_l1_21', 'conv_miyu_late_1', 'character', '真的吗?', 1736284802000, 0), +('msg_miyu_l1_22', 'conv_miyu_late_1', 'user', '真的', 1736284802100, 0), +('msg_miyu_l1_23', 'conv_miyu_late_1', 'character', '那...我有话想对你说', 1736284802200, 0), +('msg_miyu_l1_24', 'conv_miyu_late_1', 'user', '什么话?', 1736284802300, 0), +('msg_miyu_l1_25', 'conv_miyu_late_1', 'character', '我...我可能喜欢你', 1736284802400, 0), +('msg_miyu_l1_26', 'conv_miyu_late_1', 'user', '啊...', 1736284802500, 0), +('msg_miyu_l1_27', 'conv_miyu_late_1', 'character', '对不起,是不是吓到你了?', 1736284802600, 0), +('msg_miyu_l1_28', 'conv_miyu_late_1', 'user', '没有...我只是没想到', 1736284802700, 0), +('msg_miyu_l1_29', 'conv_miyu_late_1', 'character', '你不用现在回答我...', 1736284802800, 0), +('msg_miyu_l1_30', 'conv_miyu_late_1', 'character', '我只是想让你知道我的心意', 1736284802900, 0), +('msg_miyu_l1_31', 'conv_miyu_late_1', 'user', '嗯,我知道了', 1736284803000, 0), +('msg_miyu_l1_32', 'conv_miyu_late_1', 'character', '那...我们还做朋友吗?', 1736284803100, 0), +('msg_miyu_l1_33', 'conv_miyu_late_1', 'user', '当然,我们一直是朋友', 1736284803200, 0), +('msg_miyu_l1_34', 'conv_miyu_late_1', 'character', '太好了...', 1736284803300, 0), +('msg_miyu_l1_35', 'conv_miyu_late_1', 'character', '其实,说出来之后,心里轻松多了', 1736284803400, 0), +('msg_miyu_l1_36', 'conv_miyu_late_1', 'user', '我明白这种感觉', 1736284803500, 0), +('msg_miyu_l1_37', 'conv_miyu_late_1', 'character', '谢谢你听我说这些', 1736284803600, 0), +('msg_miyu_l1_38', 'conv_miyu_late_1', 'user', '不客气,我也想说', 1736284803700, 0), +('msg_miyu_l1_39', 'conv_miyu_late_1', 'character', '什么?', 1736284803800, 0), +('msg_miyu_l1_40', 'conv_miyu_late_1', 'user', '我也很喜欢你', 1736284803900, 0); + +-- 7. Akira - 深夜学习监督(好感度70,熟悉到亲密阶段)- 中对话25回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_akira_late_1', 'akira', '深夜学习监督会议', 1736371200000, 14, '深夜连麦学习,Akira展现认真的一面,互相监督鼓励', '学习,深夜,监督,认真,鼓励', 1736371200000, 1736371200000); + +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_akira_l1_1', 'conv_akira_late_1', 'character', '在吗?我看到你游戏在线', 1736371200000, 0), +('msg_akira_l1_2', 'conv_akira_late_1', 'user', '啊...放松一下', 1736371200100, 0), +('msg_akira_l1_3', 'conv_akira_late_1', 'character', '作业写完了吗?', 1736371200200, 0), +('msg_akira_l1_4', 'conv_akira_late_1', 'user', '还没...', 1736371200300, 0), +('msg_akira_l1_5', 'conv_akira_late_1', 'character', '那我们连麦学习吧,互相监督', 1736371200400, 0), +('msg_akira_l1_6', 'conv_akira_late_1', 'user', '好...好吧', 1736371200500, 0), +('msg_akira_l1_7', 'conv_akira_late_1', 'character', '先把数学作业做完,然后休息10分钟', 1736371200600, 0), +('msg_akira_l1_8', 'conv_akira_late_1', 'user', '收到', 1736371200700, 0), +('msg_akira_l1_9', 'conv_akira_late_1', 'character', '我这边也还有英语阅读要做', 1736371200800, 0), +('msg_akira_l1_10', 'conv_akira_late_1', 'character', '遇到不会的题可以问我', 1736371200900, 0), +('msg_akira_l1_11', 'conv_akira_late_1', 'user', '这题函数题好难', 1736371201000, 0), +('msg_akira_l1_12', 'conv_akira_late_1', 'character', '我看看...这个要用导数', 1736371201100, 0), +('msg_akira_l1_13', 'conv_akira_late_1', 'character', '先求导,然后找极值点', 1736371201200, 0), +('msg_akira_l1_14', 'conv_akira_late_1', 'user', '原来如此,我明白了', 1736371201300, 0), +('msg_akira_l1_15', 'conv_akira_late_1', 'character', '不错,继续加油', 1736371201400, 0), +('msg_akira_l1_16', 'conv_akira_late_1', 'character', '作业写完了吗?', 1736371201500, 0), +('msg_akira_l1_17', 'conv_akira_late_1', 'user', '写完了!', 1736371201600, 0), +('msg_akira_l1_18', 'conv_akira_late_1', 'character', '很好,那我们休息一下吧', 1736371201700, 0), +('msg_akira_l1_19', 'conv_akira_late_1', 'user', '谢谢你陪我学习', 1736371201800, 0), +('msg_akira_l1_20', 'conv_akira_late_1', 'character', '不客气,我也需要人监督', 1736371201900, 0), +('msg_akira_l1_21', 'conv_akira_late_1', 'character', '其实...和你学习很开心', 1736371202000, 0), +('msg_akira_l1_22', 'conv_akira_late_1', 'user', '我也是', 1736371202100, 0), +('msg_akira_l1_23', 'conv_akira_late_1', 'character', '那...以后也经常一起学习吧', 1736371202200, 0), +('msg_akira_l1_24', 'conv_akira_late_1', 'user', '好!', 1736371202300, 0), +('msg_akira_l1_25', 'conv_akira_late_1', 'character', '那就这么说定了', 1736371202400, 0); + +-- 8. Hana - 深夜读书分享(好感度55,熟悉阶段)- 中对话20回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_hana_late_1', 'hana', '深夜读书分享会', 1736457600000, 11, '深夜分享读书心得,聊了很多关于文学和人生的思考', '读书,深夜,分享,思考,安静', 1736457600000, 1736457600000); + +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_hana_l1_1', 'conv_hana_late_1', 'character', '晚上好...这么晚还没睡吗?', 1736457600000, 0), +('msg_hana_l1_2', 'conv_hana_late_1', 'user', '嗯,在看小说', 1736457600100, 0), +('msg_hana_l1_3', 'conv_hana_late_1', 'character', '什么小说?', 1736457600200, 0), +('msg_hana_l1_4', 'conv_hana_late_1', 'user', '《挪威的森林》', 1736457600300, 0), +('msg_hana_l1_5', 'conv_hana_late_1', 'character', '啊...我也很喜欢这本书', 1736457600400, 0), +('msg_hana_l1_6', 'conv_hana_late_1', 'character', '渡边和直子的感情...让人难过', 1736457600500, 0), +('msg_hana_l1_7', 'conv_hana_late_1', 'user', '是啊,那种无法挽回的感觉', 1736457600600, 0), +('msg_hana_l1_8', 'conv_hana_late_1', 'character', '但是绿子的出现...带来了希望', 1736457600700, 0), +('msg_hana_l1_9', 'conv_hana_late_1', 'user', '嗯,生命中总会有新的可能', 1736457600800, 0), +('msg_hana_l1_10', 'conv_hana_late_1', 'character', '你相信吗?', 1736457600900, 0), +('msg_hana_l1_11', 'conv_hana_late_1', 'user', '相信什么?', 1736457601000, 0), +('msg_hana_l1_12', 'conv_hana_late_1', 'character', '新的可能...新的人', 1736457601100, 0), +('msg_hana_l1_13', 'conv_hana_late_1', 'user', '我相信', 1736457601200, 0), +('msg_hana_l1_14', 'conv_hana_late_1', 'character', '那就好...', 1736457601300, 0), +('msg_hana_l1_15', 'conv_hana_late_1', 'character', '和你聊天...感觉很舒服', 1736457601400, 0), +('msg_hana_l1_16', 'conv_hana_late_1', 'user', '我也是', 1736457601500, 0), +('msg_hana_l1_17', 'conv_hana_late_1', 'character', '我们可以...经常这样聊天吗?', 1736457601600, 0), +('msg_hana_l1_18', 'conv_hana_late_1', 'user', '当然可以', 1736457601700, 0), +('msg_hana_l1_19', 'conv_hana_late_1', 'character', '谢谢你', 1736457601800, 0), +('msg_hana_l1_20', 'conv_hana_late_1', 'user', '该说谢谢的是我', 1736457601900, 0); + +-- === 破冰阶段对话(好感度低,短对话) === + +-- 9. Miyu - 破冰:第一次线上相遇(好感度20,陌生阶段)- 短对话8回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_miyu_ice_1', 'miyu', '游戏公会的初次相遇', 1736544000000, 8, '在游戏公会中初次交流,互相介绍', '破冰,初遇,游戏,公会,陌生', 1736544000000, 1736544000000); + +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_miyu_i1_1', 'conv_miyu_ice_1', 'character', 'hi,我是刚加入公会的', 1736544000000, 0), +('msg_miyu_i1_2', 'conv_miyu_ice_1', 'user', '欢迎欢迎!', 1736544000100, 0), +('msg_miyu_i1_3', 'conv_miyu_ice_1', 'character', '谢谢!我看到你们在打副本', 1736544000200, 0), +('msg_miyu_i1_4', 'conv_miyu_ice_1', 'user', '是啊,要不要一起来?', 1736544000300, 0), +('msg_miyu_i1_5', 'conv_miyu_ice_1', 'character', '好啊!我玩输出位', 1736544000400, 0), +('msg_miyu_i1_6', 'conv_miyu_ice_1', 'user', 'okk,我们正好缺输出', 1736544000500, 0), +('msg_miyu_i1_7', 'conv_miyu_ice_1', 'character', '嘿嘿,那我不客气啦', 1736544000600, 0), +('msg_miyu_i1_8', 'conv_miyu_ice_1', 'user', '加油!', 1736544000700, 0); + +-- 10. Akira - 破冰:学习小组交流(好感度25,陌生阶段)- 短对话7回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_akira_ice_1', 'akira', '线上学习小组的讨论', 1736630400000, 6, '在线上学习小组中初次交流,讨论学习问题', '破冰,学习,小组,讨论,陌生', 1736630400000, 1736630400000); + +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_akira_i1_1', 'conv_akira_ice_1', 'character', '这道题你会做吗?', 1736630400000, 0), +('msg_akira_i1_2', 'conv_akira_ice_1', 'user', '我看看...', 1736630400100, 0), +('msg_akira_i1_3', 'conv_akira_ice_1', 'character', '是关于微积分应用的', 1736630400200, 0), +('msg_akira_i1_4', 'conv_akira_ice_1', 'user', '这个要用导数求极值', 1736630400300, 0), +('msg_akira_i1_5', 'conv_akira_ice_1', 'character', '原来如此,我明白了', 1736630400400, 0), +('msg_akira_i1_6', 'conv_akira_ice_1', 'user', '不客气', 1736630400500, 0), +('msg_akira_i1_7', 'conv_akira_ice_1', 'character', '谢谢你,你讲解得很清楚', 1736630400600, 0); + +-- 11. Hana - 破冰:读书群交流(好感度15,陌生阶段)- 短对话6回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_hana_ice_1', 'hana', '读书群的初次对话', 1736716800000, 7, '在读书群中初次交流,讨论书籍', '破冰,读书,群聊,初次,陌生', 1736716800000, 1736716800000); + +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_hana_i1_1', 'conv_hana_ice_1', 'character', '你好,我看到你在读《挪威的森林》', 1736716800000, 0), +('msg_hana_i1_2', 'conv_hana_ice_1', 'user', '是啊,你也喜欢村上春树?', 1736716800100, 0), +('msg_hana_i1_3', 'conv_hana_ice_1', 'character', '嗯,他的文字很细腻', 1736716800200, 0), +('msg_hana_i1_4', 'conv_hana_ice_1', 'user', '是啊,特别是描写孤独的感觉', 1736716800300, 0), +('msg_hana_i1_5', 'conv_hana_ice_1', 'character', '嗯,很有共鸣', 1736716800400, 0), +('msg_hana_i1_6', 'conv_hana_ice_1', 'user', '以后可以多交流', 1736716800500, 0); + +-- === 不同好感度阶段对话 === + +-- 12. Miyu - 陌生阶段(好感度30)- 短对话10回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_miyu_stranger_1', 'miyu', '游戏好友的日常问候', 1736803200000, 5, '刚加好友不久,简单的日常问候', '陌生,日常,游戏,问候', 1736803200000, 1736803200000); + +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_miyu_s1_1', 'conv_miyu_stranger_1', 'character', 'hi,今天上线挺早啊', 1736803200000, 0), +('msg_miyu_s1_2', 'conv_miyu_stranger_1', 'user', '嗯,今天有空', 1736803200100, 0), +('msg_miyu_s1_3', 'conv_miyu_stranger_1', 'character', '要不要一起打匹配?', 1736803200200, 0), +('msg_miyu_s1_4', 'conv_miyu_stranger_1', 'user', '好啊', 1736803200300, 0), +('msg_miyu_s1_5', 'conv_miyu_stranger_1', 'character', '你玩什么位置?', 1736803200400, 0), +('msg_miyu_s1_6', 'conv_miyu_stranger_1', 'user', '我玩ADC', 1736803200500, 0), +('msg_miyu_s1_7', 'conv_miyu_stranger_1', 'character', '那我辅助你', 1736803200600, 0), +('msg_miyu_s1_8', 'conv_miyu_stranger_1', 'user', 'okk', 1736803200700, 0), +('msg_miyu_s1_9', 'conv_miyu_stranger_1', 'character', '加油!', 1736803200800, 0), +('msg_miyu_s1_10', 'conv_miyu_stranger_1', 'user', '嗯', 1736803200900, 0); + +-- 13. Akira - 熟悉阶段(好感度65)- 中对话20回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_akira_familiar_1', 'akira', '学习计划的讨论', 1736889600000, 13, '已经比较熟悉,讨论学习计划和未来目标', '熟悉,学习,计划,目标,认真', 1736889600000, 1736889600000); + +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_akira_f1_1', 'conv_akira_familiar_1', 'character', '在吗?我想和你讨论一下学习计划', 1736889600000, 0), +('msg_akira_f1_2', 'conv_akira_familiar_1', 'user', '好啊,你说', 1736889600100, 0), +('msg_akira_f1_3', 'conv_akira_familiar_1', 'character', '下个月的考试,我觉得我们应该制定一个详细的复习计划', 1736889600200, 0), +('msg_akira_f1_4', 'conv_akira_familiar_1', 'user', '嗯,你说得对', 1736889600300, 0), +('msg_akira_f1_5', 'conv_akira_familiar_1', 'character', '我已经做了一份时间表,你看看', 1736889600400, 0), +('msg_akira_f1_6', 'conv_akira_familiar_1', 'user', '好详细啊', 1736889600500, 0), +('msg_akira_f1_7', 'conv_akira_familiar_1', 'character', '我们可以按照这个时间学习,每天互相检查进度', 1736889600600, 0), +('msg_akira_f1_8', 'conv_akira_familiar_1', 'user', '好,我们一起努力', 1736889600700, 0), +('msg_akira_f1_9', 'conv_akira_familiar_1', 'character', '我相信我们能考好', 1736889600800, 0), +('msg_akira_f1_10', 'conv_akira_familiar_1', 'user', '嗯,有你在我就有信心', 1736889600900, 0), +('msg_akira_f1_11', 'conv_akira_familiar_1', 'character', '真的吗?', 1736889601000, 0), +('msg_akira_f1_12', 'conv_akira_familiar_1', 'user', '真的,你一直很靠谱', 1736889601100, 0), +('msg_akira_f1_13', 'conv_akira_familiar_1', 'character', '谢谢你这么信任我', 1736889601200, 0), +('msg_akira_f1_14', 'conv_akira_familiar_1', 'character', '其实...我很重视我们的关系', 1736889601300, 0), +('msg_akira_f1_15', 'conv_akira_familiar_1', 'user', '我也是', 1736889601400, 0), +('msg_akira_f1_16', 'conv_akira_familiar_1', 'character', '那我们要一起进步', 1736889601500, 0), +('msg_akira_f1_17', 'conv_akira_familiar_1', 'user', '好!', 1736889601600, 0), +('msg_akira_f1_18', 'conv_akira_familiar_1', 'character', '学习完后,要不要一起放松一下?', 1736889601700, 0), +('msg_akira_f1_19', 'conv_akira_familiar_1', 'user', '好啊,你想做什么?', 1736889601800, 0), +('msg_akira_f1_20', 'conv_akira_familiar_1', 'character', '一起听歌吧,我给你推荐一些', 1736889601900, 0); + +-- 14. Hana - 亲密阶段(好感度95)- 中对话24回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_hana_intimate_1', 'hana', '深夜读书与音乐分享', 1736976000000, 16, '关系已经非常亲密,分享读书心得和音乐,聊了很多内心想法', '亲密,读书,音乐,分享,深夜,信任', 1736976000000, 1736976000000); + +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_hana_i1_1', 'conv_hana_intimate_1', 'character', '晚上好,今天过得怎么样?', 1736976000000, 0), +('msg_hana_i1_2', 'conv_hana_intimate_1', 'user', '还不错,你呢?', 1736976000100, 0), +('msg_hana_i1_3', 'conv_hana_intimate_1', 'character', '我很好,特别是能和你聊天', 1736976000200, 0), +('msg_hana_i1_4', 'conv_hana_intimate_1', 'user', '我也是', 1736976000300, 0), +('msg_hana_i1_5', 'conv_hana_intimate_1', 'character', '我最近在读一本新书,想分享给你', 1736976000400, 0), +('msg_hana_i1_6', 'conv_hana_intimate_1', 'user', '什么书?', 1736976000500, 0), +('msg_hana_i1_7', 'conv_hana_intimate_1', 'character', '《月亮与六便士》,讲一个画家追求理想的故事', 1736976000600, 0), +('msg_hana_i1_8', 'conv_hana_intimate_1', 'user', '我知道这本书,毛姆的作品', 1736976000700, 0), +('msg_hana_i1_9', 'conv_hana_intimate_1', 'character', '嗯,我觉得主人公很像你', 1736976000800, 0), +('msg_hana_i1_10', 'conv_hana_intimate_1', 'user', '像我?', 1736976000900, 0), +('msg_hana_i1_11', 'conv_hana_intimate_1', 'character', '嗯,都很有自己的想法', 1736976001000, 0), +('msg_hana_i1_12', 'conv_hana_intimate_1', 'user', '谢谢你这么看我', 1736976001100, 0), +('msg_hana_i1_13', 'conv_hana_intimate_1', 'character', '我说的是真心话', 1736976001200, 0), +('msg_hana_i1_14', 'conv_hana_intimate_1', 'user', '我知道', 1736976001300, 0), +('msg_hana_i1_15', 'conv_hana_intimate_1', 'character', '还有...我想给你听一首歌', 1736976001400, 0), +('msg_hana_i1_16', 'conv_hana_intimate_1', 'user', '什么歌?', 1736976001500, 0), +('msg_hana_i1_17', 'conv_hana_intimate_1', 'character', '《First Love》,歌词很打动我', 1736976001600, 0), +('msg_hana_i1_18', 'conv_hana_intimate_1', 'user', '宇多田光的那首?', 1736976001700, 0), +('msg_hana_i1_19', 'conv_hana_intimate_1', 'character', '嗯,我想...我们的相遇也是命中注定', 1736976001800, 0), +('msg_hana_i1_20', 'conv_hana_intimate_1', 'user', '也许吧', 1736976001900, 0), +('msg_hana_i1_21', 'conv_hana_intimate_1', 'character', '我很感激能遇见你', 1736976002000, 0), +('msg_hana_i1_22', 'conv_hana_intimate_1', 'user', '我也是', 1736976002100, 0), +('msg_hana_i1_23', 'conv_hana_intimate_1', 'character', '那...我们就这样一直聊下去吧', 1736976002200, 0), +('msg_hana_i1_24', 'conv_hana_intimate_1', 'user', '好', 1736976002300, 0); + +-- === 其他线上场景 === + +-- 15. Miyu - 线上KTV(好感度75,熟悉到亲密阶段)- 中对话18回合 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_miyu_ktv_1', 'miyu', '线上KTV欢唱时光', 1737062400000, 15, '在Discord语音频道一起唱歌,玩得很开心', 'KTV,唱歌,Discord,欢乐,亲密', 1737062400000, 1737062400000); + +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_miyu_ktv1_1', 'conv_miyu_ktv_1', 'character', '来唱歌吧!我开了个KTV频道', 1737062400000, 0), +('msg_miyu_ktv1_2', 'conv_miyu_ktv_1', 'user', '好啊!', 1737062400100, 0), +('msg_miyu_ktv1_3', 'conv_miyu_ktv_1', 'character', '我先来!《Secret Base》', 1737062400200, 0), +('msg_miyu_ktv1_4', 'conv_miyu_ktv_1', 'character', '(唱歌中)', 1737062400300, 0), +('msg_mikla_ktv1_5', 'conv_miyu_ktv_1', 'user', '唱得真好听!', 1737062400400, 0), +('msg_miyu_ktv1_6', 'conv_miyu_ktv_1', 'character', '哈哈,谢谢!到你了', 1737062400500, 0), +('msg_miyu_ktv1_7', 'conv_miyu_ktv_1', 'user', '我唱《打上花火》吧', 1737062400600, 0), +('msg_miyu_ktv1_8', 'conv_miyu_ktv_1', 'character', '期待!', 1737062400700, 0), +('msg_miyu_ktv1_9', 'conv_miyu_ktv_1', 'user', '(唱歌中)', 1737062400800, 0), +('msg_miyu_ktv1_10', 'conv_miyu_ktv_1', 'character', '哇,你唱得好好听!', 1737062400900, 0), +('msg_miyu_ktv1_11', 'conv_miyu_ktv_1', 'user', '哈哈,没有啦', 1737062401000, 0), +('msg_miyu_ktv1_12', 'conv_miyu_ktv_1', 'character', '真的!声音很温柔', 1737062401100, 0), +('msg_miyu_ktv1_13', 'conv_miyu_ktv_1', 'user', '被夸得不好意思了', 1737062401200, 0), +('msg_miyu_ktv1_14', 'conv_miyu_late_1', 'character', '那我们合唱一首吧!', 1737062401300, 0), +('msg_miyu_ktv1_15', 'conv_miyu_ktv_1', 'user', '好啊,唱什么?', 1737062401400, 0), +('msg_miyu_ktv1_16', 'conv_miyu_ktv_1', 'character', '《前前前世》怎么样?', 1737062401500, 0), +('msg_miyu_ktv1_17', 'conv_miyu_ktv_1', 'user', '好!', 1737062401600, 0), +('msg_miyu_ktv1_18', 'conv_miyu_ktv_1', 'character', '(合唱中)', 1737062401700, 0); + +-- 16. 复盘压力测试会话(120条消息,多批建议)- 用于剧情复盘校验 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_review_stress_1', 'miyu', '高负载复盘测试-连麦日常', 1737600000000, 9, '一次超长的日常连麦对话,用于复盘压力测试', '测试,复盘,长对话,连麦,日常', 1737600000000, 1737600000000); + +-- 前40条消息 +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_review120_001', 'conv_review_stress_1', 'character', '在吗?今晚继续连麦吗?', 1737600000000, 0), +('msg_review120_002', 'conv_review_stress_1', 'user', '在的,我刚下班', 1737600000100, 0), +('msg_review120_003', 'conv_review_stress_1', 'character', '辛苦啦,要不要先喝口水', 1737600000200, 0), +('msg_review120_004', 'conv_review_stress_1', 'user', '好,我去倒杯水', 1737600000300, 0), +('msg_review120_005', 'conv_review_stress_1', 'character', '今天想聊什么?', 1737600000400, 0), +('msg_review120_006', 'conv_review_stress_1', 'user', '随便聊,放松一下', 1737600000500, 0), +('msg_review120_007', 'conv_review_stress_1', 'character', '我这边刚看完一部动漫', 1737600000600, 0), +('msg_review120_008', 'conv_review_stress_1', 'user', '什么动漫?', 1737600000700, 0), +('msg_review120_009', 'conv_review_stress_1', 'character', '《电锯人》第二季,节奏很快', 1737600000800, 0), +('msg_review120_010', 'conv_review_stress_1', 'user', '我也喜欢那部,玛奇玛太神秘了', 1737600000900, 0), +('msg_review120_011', 'conv_review_stress_1', 'character', '对啊,她的眼神好有压迫感', 1737600001000, 0), +('msg_review120_012', 'conv_review_stress_1', 'user', '你最喜欢哪个角色?', 1737600001100, 0), +('msg_review120_013', 'conv_review_stress_1', 'character', '帕瓦吧,傲娇又可爱', 1737600001200, 0), +('msg_review120_014', 'conv_review_stress_1', 'user', '哈哈,我也喜欢她的反差萌', 1737600001300, 0), +('msg_review120_015', 'conv_review_stress_1', 'character', '下次一起看新番?', 1737600001400, 0), +('msg_review120_016', 'conv_review_stress_1', 'user', '可以啊,周末有空', 1737600001500, 0), +('msg_review120_017', 'conv_review_stress_1', 'character', '那就定周六晚上', 1737600001600, 0), +('msg_review120_018', 'conv_review_stress_1', 'user', '好,我记下了', 1737600001700, 0), +('msg_review120_019', 'conv_review_stress_1', 'character', '你晚饭吃了没?', 1737600001800, 0), +('msg_review120_020', 'conv_review_stress_1', 'user', '吃了,简单炒了个菜', 1737600001900, 0), +('msg_review120_021', 'conv_review_stress_1', 'character', '我点了外卖,今晚偷个懒', 1737600002000, 0), +('msg_review120_022', 'conv_review_stress_1', 'user', '偶尔放松一下也好', 1737600002100, 0), +('msg_review120_023', 'conv_review_stress_1', 'character', '对了,我今天练歌了', 1737600002200, 0), +('msg_review120_024', 'conv_review_stress_1', 'user', '唱了什么?', 1737600002300, 0), +('msg_review120_025', 'conv_review_stress_1', 'character', '《secret base》,还不够熟', 1737600002400, 0), +('msg_review120_026', 'conv_review_stress_1', 'user', '这首很经典,多练就好了', 1737600002500, 0), +('msg_review120_027', 'conv_review_stress_1', 'character', '你会不会吉他版?', 1737600002600, 0), +('msg_review120_028', 'conv_review_stress_1', 'user', '会几个和弦,下次弹给你听', 1737600002700, 0), +('msg_review120_029', 'conv_review_stress_1', 'character', '期待!', 1737600002800, 0), +('msg_review120_030', 'conv_review_stress_1', 'user', '今天工作顺利吗?', 1737600002900, 0), +('msg_review120_031', 'conv_review_stress_1', 'character', '还行,就是会议有点多', 1737600003000, 0), +('msg_review120_032', 'conv_review_stress_1', 'user', '会议太多确实累', 1737600003100, 0), +('msg_review120_033', 'conv_review_stress_1', 'character', '嗯,想躺平', 1737600003200, 0), +('msg_review120_034', 'conv_review_stress_1', 'user', '周末好好休息', 1737600003300, 0), +('msg_review120_035', 'conv_review_stress_1', 'character', '想出去走走,你有推荐吗?', 1737600003400, 0), +('msg_review120_036', 'conv_review_stress_1', 'user', '可以去公园或书店', 1737600003500, 0), +('msg_review120_037', 'conv_review_stress_1', 'character', '书店好,我想买本新书', 1737600003600, 0), +('msg_review120_038', 'conv_review_stress_1', 'user', '最近我在看推理小说', 1737600003700, 0), +('msg_review120_039', 'conv_review_stress_1', 'character', '推荐我一本?', 1737600003800, 0), +('msg_review120_040', 'conv_review_stress_1', 'user', '东野圭吾的《解忧杂货店》不错', 1737600003900, 0); + +-- 41-80条消息 +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_review120_041', 'conv_review_stress_1', 'character', '这本我一直想看', 1737600004000, 0), +('msg_review120_042', 'conv_review_stress_1', 'user', '故事很温暖', 1737600004100, 0), +('msg_review120_043', 'conv_review_stress_1', 'character', '你最喜欢哪段?', 1737600004200, 0), +('msg_review120_044', 'conv_review_stress_1', 'user', '写信那段,很治愈', 1737600004300, 0), +('msg_review120_045', 'conv_review_stress_1', 'character', '我也喜欢那种互相治愈的感觉', 1737600004400, 0), +('msg_review120_046', 'conv_review_stress_1', 'user', '对了,今天你心情好吗', 1737600004500, 0), +('msg_review120_047', 'conv_review_stress_1', 'character', '比昨天好,和你聊天放松', 1737600004600, 0), +('msg_review120_048', 'conv_review_stress_1', 'user', '那就好', 1737600004700, 0), +('msg_review120_049', 'conv_review_stress_1', 'character', '你今天学了什么?', 1737600004800, 0), +('msg_review120_050', 'conv_review_stress_1', 'user', '复习了一些英语', 1737600004900, 0), +('msg_review120_051', 'conv_review_stress_1', 'character', '口语练了吗?', 1737600005000, 0), +('msg_review120_052', 'conv_review_stress_1', 'user', '练了点,想找你对话练习', 1737600005100, 0), +('msg_review120_053', 'conv_review_stress_1', 'character', '没问题,随时来', 1737600005200, 0), +('msg_review120_054', 'conv_review_stress_1', 'user', '谢谢你', 1737600005300, 0), +('msg_review120_055', 'conv_review_stress_1', 'character', '你今天有运动吗?', 1737600005400, 0), +('msg_review120_056', 'conv_review_stress_1', 'user', '跑了两公里', 1737600005500, 0), +('msg_review120_057', 'conv_review_stress_1', 'character', '厉害,我只做了拉伸', 1737600005600, 0), +('msg_review120_058', 'conv_review_stress_1', 'user', '拉伸也不错', 1737600005700, 0), +('msg_review120_059', 'conv_review_stress_1', 'character', '最近想练核心', 1737600005800, 0), +('msg_review120_060', 'conv_review_stress_1', 'user', '可以试试平板支撑', 1737600005900, 0), +('msg_review120_061', 'conv_review_stress_1', 'character', '你能坚持多久?', 1737600006000, 0), +('msg_review120_062', 'conv_review_stress_1', 'user', '两分钟左右', 1737600006100, 0), +('msg_review120_063', 'conv_review_stress_1', 'character', '我得努力追上', 1737600006200, 0), +('msg_review120_064', 'conv_review_stress_1', 'user', '我们互相监督', 1737600006300, 0), +('msg_review120_065', 'conv_review_stress_1', 'character', '好!', 1737600006400, 0), +('msg_review120_066', 'conv_review_stress_1', 'user', '周末要不要录首歌', 1737600006500, 0), +('msg_review120_067', 'conv_review_stress_1', 'character', '可以,我想试试二重唱', 1737600006600, 0), +('msg_review120_068', 'conv_review_stress_1', 'user', '选什么歌?', 1737600006700, 0), +('msg_review120_069', 'conv_review_stress_1', 'character', '《群青》?', 1737600006800, 0), +('msg_review120_070', 'conv_review_stress_1', 'user', '行,我先练歌词', 1737600006900, 0), +('msg_review120_071', 'conv_review_stress_1', 'character', '我练和声部分', 1737600007000, 0), +('msg_review120_072', 'conv_review_stress_1', 'user', '期待录出来', 1737600007100, 0), +('msg_review120_073', 'conv_review_stress_1', 'character', '录完可以发给朋友听吗', 1737600007200, 0), +('msg_review120_074', 'conv_review_stress_1', 'user', '可以,但先给我听', 1737600007300, 0), +('msg_review120_075', 'conv_review_stress_1', 'character', '哈哈,好', 1737600007400, 0), +('msg_review120_076', 'conv_review_stress_1', 'user', '你今天有没有被老板催?', 1737600007500, 0), +('msg_review120_077', 'conv_review_stress_1', 'character', '有一点,不过还能接受', 1737600007600, 0), +('msg_review120_078', 'conv_review_stress_1', 'user', '保持节奏就好', 1737600007700, 0), +('msg_review120_079', 'conv_review_stress_1', 'character', '谢谢关心', 1737600007800, 0), +('msg_review120_080', 'conv_review_stress_1', 'user', '随时聊', 1737600007900, 0); + +-- 81-120条消息 +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +('msg_review120_081', 'conv_review_stress_1', 'character', '晚上有看直播吗?', 1737600008000, 0), +('msg_review120_082', 'conv_review_stress_1', 'user', '看了点游戏直播', 1737600008100, 0), +('msg_review120_083', 'conv_review_stress_1', 'character', '哪款游戏?', 1737600008200, 0), +('msg_review120_084', 'conv_review_stress_1', 'user', 'APEX,主播很能刚', 1737600008300, 0), +('msg_review120_085', 'conv_review_stress_1', 'character', '我最近想练枪法', 1737600008400, 0), +('msg_review120_086', 'conv_review_stress_1', 'user', '一起开训练场', 1737600008500, 0), +('msg_review120_087', 'conv_review_stress_1', 'character', '好,明天晚上?', 1737600008600, 0), +('msg_review120_088', 'conv_review_stress_1', 'user', '明晚八点', 1737600008700, 0), +('msg_review120_089', 'conv_review_stress_1', 'character', '记得提醒我', 1737600008800, 0), +('msg_review120_090', 'conv_review_stress_1', 'user', '一定', 1737600008900, 0), +('msg_review120_091', 'conv_review_stress_1', 'character', '你今天听歌了吗?', 1737600009000, 0), +('msg_review120_092', 'conv_review_stress_1', 'user', '循环了日系歌单', 1737600009100, 0), +('msg_review120_093', 'conv_review_stress_1', 'character', '发给我!', 1737600009200, 0), +('msg_review120_094', 'conv_review_stress_1', 'user', '稍后分享', 1737600009300, 0), +('msg_review120_095', 'conv_review_stress_1', 'character', '想睡前听', 1737600009400, 0), +('msg_review120_096', 'conv_review_stress_1', 'user', '好的', 1737600009500, 0), +('msg_review120_097', 'conv_review_stress_1', 'character', '今天有发生好玩的事吗?', 1737600009600, 0), +('msg_review120_098', 'conv_review_stress_1', 'user', '同事带了自制甜品', 1737600009700, 0), +('msg_review120_099', 'conv_review_stress_1', 'character', '羡慕!', 1737600009800, 0), +('msg_review120_100', 'conv_review_stress_1', 'user', '味道不错', 1737600009900, 0), +('msg_review120_101', 'conv_review_stress_1', 'character', '下次带给我尝尝', 1737600010000, 0), +('msg_review120_102', 'conv_review_stress_1', 'user', '好呀', 1737600010100, 0), +('msg_review120_103', 'conv_review_stress_1', 'character', '明天的安排呢?', 1737600010200, 0), +('msg_review120_104', 'conv_review_stress_1', 'user', '上午开会,下午写文档', 1737600010300, 0), +('msg_review120_105', 'conv_review_stress_1', 'character', '别忘了休息', 1737600010400, 0), +('msg_review120_106', 'conv_review_stress_1', 'user', '知道啦', 1737600010500, 0), +('msg_review120_107', 'conv_review_stress_1', 'character', '有想看的电影吗?', 1737600010600, 0), +('msg_review120_108', 'conv_review_stress_1', 'user', '想看《沙丘2》', 1737600010700, 0), +('msg_review120_109', 'conv_review_stress_1', 'character', '我也想看!', 1737600010800, 0), +('msg_review120_110', 'conv_review_stress_1', 'user', '找个时间一起', 1737600010900, 0), +('msg_review120_111', 'conv_review_stress_1', 'character', '周日如何?', 1737600011000, 0), +('msg_review120_112', 'conv_review_stress_1', 'user', '可以', 1737600011100, 0), +('msg_review120_113', 'conv_review_stress_1', 'character', '那就约好啦', 1737600011200, 0), +('msg_review120_114', 'conv_review_stress_1', 'user', '好期待', 1737600011300, 0), +('msg_review120_115', 'conv_review_stress_1', 'character', '今天感觉状态不错', 1737600011400, 0), +('msg_review120_116', 'conv_review_stress_1', 'user', '我也是', 1737600011500, 0), +('msg_review120_117', 'conv_review_stress_1', 'character', '要不要早点休息?', 1737600011600, 0), +('msg_review120_118', 'conv_review_stress_1', 'user', '再聊几句', 1737600011700, 0), +('msg_review120_119', 'conv_review_stress_1', 'character', '好,我陪你', 1737600011800, 0), +('msg_review120_120', 'conv_review_stress_1', 'user', '谢谢,晚安', 1737600011900, 0); + +-- 多批次选项生成记录(用于节点分组) +INSERT OR IGNORE INTO ai_suggestions (id, conversation_id, message_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, tags, created_at) VALUES +('sugg_review1_a', 'conv_review_stress_1', 'msg_review120_040', NULL, NULL, NULL, '主动约时间', '提出周末一起逛书店', 2, '约会,书店', 1737600004050), +('sugg_review1_b', 'conv_review_stress_1', 'msg_review120_040', NULL, NULL, NULL, '保持关心', '询问她最近的压力点', 1, '关心,情绪', 1737600004050), +('sugg_review1_c', 'conv_review_stress_1', 'msg_review120_040', NULL, NULL, NULL, '轻松话题', '继续聊动漫和歌曲', 1, '兴趣,话题', 1737600004050), +('sugg_review2_a', 'conv_review_stress_1', 'msg_review120_080', NULL, NULL, NULL, '共创计划', '提出一起录歌并分享', 3, '音乐,合作', 1737600008050), +('sugg_review2_b', 'conv_review_stress_1', 'msg_review120_080', NULL, NULL, NULL, '健康建议', '建议一起保持运动打卡', 2, '健康,运动', 1737600008050), +('sugg_review2_c', 'conv_review_stress_1', 'msg_review120_080', NULL, NULL, NULL, '情感确认', '表达陪伴的感谢与肯定', 2, '情感,陪伴', 1737600008050), +('sugg_review3_a', 'conv_review_stress_1', 'msg_review120_110', NULL, NULL, NULL, '明确邀约', '确定周日看电影并买票', 3, '邀约,电影', 1737600011050), +('sugg_review3_b', 'conv_review_stress_1', 'msg_review120_110', NULL, NULL, NULL, '贴心提醒', '提前发送歌单帮助放松', 1, '贴心,音乐', 1737600011050), +('sugg_review3_c', 'conv_review_stress_1', 'msg_review120_110', NULL, NULL, NULL, '节奏控制', '建议早点休息保持状态', 1, '关心,休息', 1737600011050); + +-- === 复盘功能测试对话 === +-- 用于测试决策树展示效果的完整对话,包含多个决策点 + +-- 测试对话:与 Miyu 的深度交流 +INSERT OR IGNORE INTO conversations (id, character_id, title, date, affinity_change, summary, tags, created_at, updated_at) VALUES +('conv_review_test_1', 'miyu', '复盘测试:深夜游戏与情感交流', 1738000000000, 18, '一次包含多个关键决策点的对话,用于测试复盘功能和决策树可视化', '测试,复盘,游戏,情感,决策', 1738000000000, 1738000000000); + +-- 插入消息(15条消息,形成多个决策点) +INSERT OR IGNORE INTO messages (id, conversation_id, sender, content, timestamp, is_ai_generated) VALUES +-- 第一段对话:游戏邀请 +('msg_review_test_001', 'conv_review_test_1', 'character', '今晚有空吗?要不要一起打游戏?', 1738000000000, 0), +('msg_review_test_002', 'conv_review_test_1', 'user', '好啊!我刚好也想放松一下', 1738000001000, 0), +('msg_review_test_003', 'conv_review_test_1', 'character', '太好了!我最近在练新英雄,想找人配合', 1738000002000, 0), +-- 第二段对话:选择游戏 +('msg_review_test_004', 'conv_review_test_1', 'character', '你想玩什么?LOL还是原神?', 1738000003000, 0), +('msg_review_test_005', 'conv_review_test_1', 'user', 'LOL吧,好久没玩了', 1738000004000, 0), +('msg_review_test_006', 'conv_review_test_1', 'character', '好!那我选辅助,你打ADC', 1738000005000, 0), +-- 第三段对话:游戏中的交流 +('msg_review_test_007', 'conv_review_test_1', 'character', '这波配合不错!', 1738000006000, 0), +('msg_review_test_008', 'conv_review_test_1', 'user', '哈哈,你控得真好', 1738000007000, 0), +('msg_review_test_009', 'conv_review_test_1', 'character', '我们赢了!', 1738000008000, 0), +-- 第四段对话:游戏后的情感交流 +('msg_review_test_010', 'conv_review_test_1', 'character', '今天玩得真开心,谢谢你陪我', 1738000009000, 0), +('msg_review_test_011', 'conv_review_test_1', 'user', '我也很开心,和你一起玩总是很放松', 1738000010000, 0), +('msg_review_test_012', 'conv_review_test_1', 'character', '真的吗?那以后经常一起玩吧', 1738000011000, 0), +-- 第五段对话:深入话题 +('msg_review_test_013', 'conv_review_test_1', 'character', '其实...我最近心情不太好', 1738000012000, 0), +('msg_review_test_014', 'conv_review_test_1', 'user', '怎么了?愿意和我说说吗', 1738000013000, 0), +('msg_review_test_015', 'conv_review_test_1', 'character', '谢谢你愿意听我说...', 1738000014000, 0); + +-- 插入多个批次的 AI 建议(形成决策点) +-- 决策点1:回应游戏邀请(msg_review_test_002 之后) +INSERT OR IGNORE INTO ai_suggestions (id, conversation_id, message_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, tags, created_at) VALUES +('sugg_test_1_a', 'conv_review_test_1', 'msg_review_test_002', NULL, 'batch_test_1', 0, '积极回应', '好啊!我刚好也想放松一下', 5, '积极,游戏', 1738000001500), +('sugg_test_1_b', 'conv_review_test_1', 'msg_review_test_002', NULL, 'batch_test_1', 1, '询问细节', '好啊,你想玩什么游戏?', 3, '询问,关心', 1738000001500), +('sugg_test_1_c', 'conv_review_test_1', 'msg_review_test_002', NULL, 'batch_test_1', 2, '表达期待', '太好了!我正想找人一起玩', 4, '期待,热情', 1738000001500); + +-- 决策点2:选择游戏类型(msg_review_test_004 之后) +INSERT OR IGNORE INTO ai_suggestions (id, conversation_id, message_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, tags, created_at) VALUES +('sugg_test_2_a', 'conv_review_test_1', 'msg_review_test_004', NULL, 'batch_test_2', 0, '选择LOL', 'LOL吧,好久没玩了', 6, '选择,游戏', 1738000003500), +('sugg_test_2_b', 'conv_review_test_1', 'msg_review_test_004', NULL, 'batch_test_2', 1, '选择原神', '原神吧,我想刷副本', 5, '选择,游戏', 1738000003500), +('sugg_test_2_c', 'conv_review_test_1', 'msg_review_test_004', NULL, 'batch_test_2', 2, '让对方决定', '你决定吧,我都可以', 4, '体贴,随和', 1738000003500); + +-- 决策点3:游戏中的互动(msg_review_test_007 之后) +INSERT OR IGNORE INTO ai_suggestions (id, conversation_id, message_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, tags, created_at) VALUES +('sugg_test_3_a', 'conv_review_test_1', 'msg_review_test_007', NULL, 'batch_test_3', 0, '赞美配合', '哈哈,你控得真好', 7, '赞美,互动', 1738000006500), +('sugg_test_3_b', 'conv_review_test_1', 'msg_review_test_007', NULL, 'batch_test_3', 1, '谦虚回应', '没有啦,是你打得好', 5, '谦虚,互动', 1738000006500), +('sugg_test_3_c', 'conv_review_test_1', 'msg_review_test_007', NULL, 'batch_test_3', 2, '继续配合', '我们继续配合,争取赢下这局', 6, '积极,配合', 1738000006500); + +-- 决策点4:游戏后的情感回应(msg_review_test_010 之后) +INSERT OR IGNORE INTO ai_suggestions (id, conversation_id, message_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, tags, created_at) VALUES +('sugg_test_4_a', 'conv_review_test_1', 'msg_review_test_010', NULL, 'batch_test_4', 0, '表达共鸣', '我也很开心,和你一起玩总是很放松', 8, '情感,共鸣', 1738000009500), +('sugg_test_4_b', 'conv_review_test_1', 'msg_review_test_010', NULL, 'batch_test_4', 1, '简单回应', '不客气,我也玩得很开心', 5, '简单,礼貌', 1738000009500), +('sugg_test_4_c', 'conv_review_test_1', 'msg_review_test_010', NULL, 'batch_test_4', 2, '提出下次', '那我们下次再一起玩吧', 6, '主动,邀约', 1738000009500); + +-- 决策点5:深入情感话题(msg_review_test_013 之后) +INSERT OR IGNORE INTO ai_suggestions (id, conversation_id, message_id, decision_point_id, batch_id, suggestion_index, title, content, affinity_prediction, tags, created_at) VALUES +('sugg_test_5_a', 'conv_review_test_1', 'msg_review_test_013', NULL, 'batch_test_5', 0, '主动关心', '怎么了?愿意和我说说吗', 9, '关心,倾听', 1738000012500), +('sugg_test_5_b', 'conv_review_test_1', 'msg_review_test_013', NULL, 'batch_test_5', 1, '转移话题', '别想太多,我们继续玩游戏吧', 3, '回避,转移', 1738000012500), +('sugg_test_5_c', 'conv_review_test_1', 'msg_review_test_013', NULL, 'batch_test_5', 2, '表达支持', '我会陪着你的,有什么都可以和我说', 8, '支持,陪伴', 1738000012500); diff --git a/desktop/src/main.js b/desktop/src/main.js index 366a75b..f7ac8ff 100644 --- a/desktop/src/main.js +++ b/desktop/src/main.js @@ -3,6 +3,10 @@ import { initMain as initAudioLoopback } from 'electron-audio-loopback'; import path from 'path'; import fs from 'fs'; import { fileURLToPath } from 'url'; +import { performance } from 'perf_hooks'; +import os from 'os'; +import { getAsrCacheBaseSetting } from './core/app-settings.js'; +import { applyAsrCacheEnv } from './asr/asr-cache-env.js'; // 初始化 electron-audio-loopback(必须在 app.whenReady 之前调用) initAudioLoopback(); @@ -25,20 +29,120 @@ let shortcutManager; let asrPreloader; let permissionManager; +/** + * 初始化落盘日志(主进程 + 渲染进程 console 转发) + * - 写入位置: app.getPath('userData')/logs + * - 文件名: livegalgame-desktop_YYYYMMDD_HHMMSS.log + */ +function initFileLogging() { + try { + const userData = app.getPath('userData'); + const logsDir = path.join(userData, 'logs'); + fs.mkdirSync(logsDir, { recursive: true }); + + const pad2 = (n) => String(n).padStart(2, '0'); + const now = new Date(); + const ts = `${now.getFullYear()}${pad2(now.getMonth() + 1)}${pad2(now.getDate())}_${pad2(now.getHours())}${pad2(now.getMinutes())}${pad2(now.getSeconds())}`; + const logFile = path.join(logsDir, `livegalgame-desktop_${ts}.log`); + const stream = fs.createWriteStream(logFile, { flags: 'a', encoding: 'utf8' }); + + const writeLine = (level, args) => { + const time = new Date().toISOString(); + const msg = args + .map((a) => { + if (a instanceof Error) return a.stack || a.message; + if (typeof a === 'string') return a; + try { + return JSON.stringify(a); + } catch { + return String(a); + } + }) + .join(' '); + stream.write(`[${time}] [${level}] ${msg}${os.EOL}`); + }; + + const wrap = (level, original) => (...args) => { + try { + writeLine(level, args); + } catch { + // ignore file logging errors + } + try { + original(...args); + } catch { + // ignore console errors (e.g. EPIPE) + } + }; + + console.log = wrap('INFO', console.log); + console.info = wrap('INFO', console.info); + console.warn = wrap('WARN', console.warn); + console.error = wrap('ERROR', console.error); + console.debug = wrap('DEBUG', console.debug); + + process.on('uncaughtException', (err) => { + console.error('[Process] uncaughtException', err); + }); + process.on('unhandledRejection', (reason) => { + console.error('[Process] unhandledRejection', reason); + }); + + app.on('render-process-gone', (_event, webContents, details) => { + console.error('[Renderer] render-process-gone', { id: webContents?.id, ...details }); + }); + app.on('child-process-gone', (_event, details) => { + console.error('[Process] child-process-gone', details); + }); + + console.log('[Log] File logging enabled:', logFile); + } catch (error) { + try { + console.error('[Log] Failed to init file logging:', error); + } catch { + // ignore + } + } +} + +/** + * 启动阶段耗时记录工具 + * @param {string} label 标签 + * @returns {() => void} 结束计时打印日志 + */ +function startTimer(label) { + const start = performance.now(); + return () => { + const cost = (performance.now() - start).toFixed(1); + console.log(`[Perf] ${label}: ${cost}ms`); + }; +} + +/** + * 监听主窗口加载事件以输出耗时 + * @param {BrowserWindow} mainWindow + */ +function attachMainWindowPerf(mainWindow) { + if (!mainWindow) return; + + const endReadyToShow = startTimer('mainWindow ready-to-show'); + mainWindow.once('ready-to-show', () => endReadyToShow()); + + const endDomReady = startTimer('mainWindow dom-ready'); + mainWindow.webContents.once('dom-ready', () => endDomReady()); + + const endDidFinishLoad = startTimer('mainWindow did-finish-load'); + mainWindow.webContents.once('did-finish-load', () => endDidFinishLoad()); +} + /** * 确保 ASR 缓存环境变量 */ function ensureAsrCacheEnv() { try { const userData = app.getPath('userData'); - if (!process.env.HF_HOME) { - process.env.HF_HOME = path.join(userData, 'hf-home'); - } - fs.mkdirSync(process.env.HF_HOME, { recursive: true }); - if (!process.env.ASR_CACHE_DIR) { - process.env.ASR_CACHE_DIR = path.join(process.env.HF_HOME, 'hub'); - } - fs.mkdirSync(process.env.ASR_CACHE_DIR, { recursive: true }); + const persistedBase = process.env.ASR_CACHE_BASE ? null : getAsrCacheBaseSetting(); + applyAsrCacheEnv({ userDataDir: userData, asrCacheBase: process.env.ASR_CACHE_BASE || persistedBase }); } catch (error) { console.error('[ASR] Failed to ensure cache directories:', error); } @@ -157,35 +261,58 @@ function cleanup() { // ========== 主应用入口 ========== app.whenReady().then(async () => { + // 需要在尽可能早的阶段初始化 + initFileLogging(); console.log('Starting LiveGalGame Desktop...'); + const endAppReadyPipeline = startTimer('app.whenReady pipeline'); + // 确保 ASR 缓存环境 + const endEnsureCache = startTimer('ensureAsrCacheEnv'); ensureAsrCacheEnv(); + endEnsureCache(); // 初始化所有管理器 + const endInitManagers = startTimer('initializeManagers'); initializeManagers(); + endInitManagers(); // 注册 IPC 处理器 + console.log('[Main] Registering IPC handlers...'); + const endRegisterIPC = startTimer('ipcManager.registerHandlers'); ipcManager.registerHandlers(); + endRegisterIPC(); + console.log('[Main] IPC handlers registered successfully'); // 注册桌面捕获器 registerDesktopCapturer(); // 创建主窗口 + const endCreateWindow = startTimer('windowManager.createMainWindow'); windowManager.createMainWindow(() => ipcManager.checkASRReady()); + attachMainWindowPerf(windowManager.getMainWindow()); + endCreateWindow(); // 注册全局快捷键 + const endRegisterShortcut = startTimer('shortcutManager.registerAll'); shortcutManager.registerAll(); + endRegisterShortcut(); // 请求权限(macOS) + const endRequestPermissions = startTimer('permissionManager.requestStartupPermissions'); await permissionManager.requestStartupPermissions(); + endRequestPermissions(); // 预加载ASR模型(后台进行,不阻塞UI) - asrPreloader.preload(() => ipcManager.checkASRReady()).catch(err => { - console.error('[ASR] 预加载失败,将在使用时加载:', err); - }); + const endPreloadASR = startTimer('asrPreloader.preload (async)'); + asrPreloader.preload(() => ipcManager.checkASRReady()) + .then(() => endPreloadASR()) + .catch(err => { + console.error('[ASR] 预加载失败,将在使用时加载:', err); + }); setupAppEventListeners(); + endAppReadyPipeline(); console.log('LiveGalGame Desktop 启动成功!'); -}); \ No newline at end of file +}); diff --git a/desktop/src/preload.js b/desktop/src/preload.js index 599791d..73bb3fa 100644 --- a/desktop/src/preload.js +++ b/desktop/src/preload.js @@ -11,6 +11,85 @@ const logger = { debug: console.debug.bind(console) }; +// 由于我们在 `on/once` 中会包一层 listener 来去掉 event 参数, +// 这里维护一个映射,确保 `removeListener` 可以正确移除(避免监听器泄漏/重复触发) +const listenerRegistry = new Map(); // channel -> Map(originalCallback -> wrappedCallback) + +function getChannelRegistry(channel) { + if (!listenerRegistry.has(channel)) { + listenerRegistry.set(channel, new Map()); + } + return listenerRegistry.get(channel); +} + +function registerWrappedListener(channel, callback, { once = false } = {}) { + if (typeof channel !== 'string' || typeof callback !== 'function') { + return () => { }; + } + + const channelMap = getChannelRegistry(channel); + const existing = channelMap.get(callback); + if (existing) { + try { + ipcRenderer.removeListener(channel, existing); + } catch { + // ignore + } + } + + const wrapped = (event, ...args) => { + try { + callback(...args); + } finally { + // once 的 listener 触发后会自动移除,但我们也要清理映射 + if (once) { + channelMap.delete(callback); + } + } + }; + + channelMap.set(callback, wrapped); + if (once) { + ipcRenderer.once(channel, wrapped); + } else { + ipcRenderer.on(channel, wrapped); + } + + return () => { + try { + const stored = channelMap.get(callback); + if (!stored) return; + ipcRenderer.removeListener(channel, stored); + channelMap.delete(callback); + } catch { + // ignore + } + }; +} + +function removeWrappedListener(channel, callback) { + if (typeof channel !== 'string' || typeof callback !== 'function') { + return; + } + const channelMap = listenerRegistry.get(channel); + const stored = channelMap?.get(callback); + if (stored) { + try { + ipcRenderer.removeListener(channel, stored); + } catch { + // ignore + } + channelMap.delete(callback); + return; + } + // 兼容:如果外部传入的就是原生 listener + try { + ipcRenderer.removeListener(channel, callback); + } catch { + // ignore + } +} + // 暴露安全的API给渲染进程 contextBridge.exposeInMainWorld('electronAPI', { // 平台信息 @@ -22,9 +101,9 @@ contextBridge.exposeInMainWorld('electronAPI', { // IPC通信 send: (channel, data) => ipcRenderer.send(channel, data), - on: (channel, callback) => ipcRenderer.on(channel, (event, ...args) => callback(...args)), - once: (channel, callback) => ipcRenderer.once(channel, (event, ...args) => callback(...args)), - removeListener: (channel, callback) => ipcRenderer.removeListener(channel, callback), + on: (channel, callback) => registerWrappedListener(channel, callback, { once: false }), + once: (channel, callback) => registerWrappedListener(channel, callback, { once: true }), + removeListener: (channel, callback) => removeWrappedListener(channel, callback), // 日志 log: (message) => ipcRenderer.send('log', message), @@ -63,12 +142,24 @@ contextBridge.exposeInMainWorld('electronAPI', { getAllConversations: () => ipcRenderer.invoke('db-get-all-conversations'), updateMessage: (messageId, updates) => ipcRenderer.invoke('db-update-message', messageId, updates), getConversationAIData: (conversationId) => ipcRenderer.invoke('db-get-conversation-ai-data', conversationId), + selectActionSuggestion: (payload) => ipcRenderer.invoke('db-select-action-suggestion', payload), getCharacterDetails: (characterId) => ipcRenderer.invoke('db-get-character-details', characterId), updateCharacterDetailsCustomFields: (characterId, customFields) => ipcRenderer.invoke('db-update-character-details-custom-fields', characterId, customFields), regenerateCharacterDetails: (characterId) => ipcRenderer.invoke('db-regenerate-character-details', characterId), deleteConversation: (conversationId) => ipcRenderer.invoke('db-delete-conversation', conversationId), deleteCharacter: (characterId) => ipcRenderer.invoke('db-delete-character', characterId), + // Review API + getConversationById: (conversationId) => ipcRenderer.invoke('db-get-conversation-by-id', conversationId), + getConversationReview: (conversationId) => ipcRenderer.invoke('review:get', conversationId), + generateConversationReview: (conversationId, options = {}) => + ipcRenderer.invoke('review:generate', { conversationId, ...options }), + onReviewProgress: (callback) => { + const listener = (event, data) => callback(data); + ipcRenderer.on('review:progress', listener); + return () => ipcRenderer.removeListener('review:progress', listener); + }, + // LLM配置API saveLLMConfig: (configData) => ipcRenderer.invoke('llm-save-config', configData), getAllLLMConfigs: () => ipcRenderer.invoke('llm-get-all-configs'), @@ -77,6 +168,53 @@ contextBridge.exposeInMainWorld('electronAPI', { deleteLLMConfig: (id) => ipcRenderer.invoke('llm-delete-config', id), testLLMConnection: (configData) => ipcRenderer.invoke('llm-test-connection', configData), setDefaultLLMConfig: (id) => ipcRenderer.invoke('llm-set-default-config', id), + getLLMFeatureConfigs: () => ipcRenderer.invoke('llm-get-feature-configs'), + setLLMFeatureConfig: (feature, llmConfigId) => + ipcRenderer.invoke('llm-set-feature-config', { feature, llm_config_id: llmConfigId }), + generateLLMSuggestions: (payload) => ipcRenderer.invoke('llm-generate-suggestions', payload), + detectTopicShift: (payload) => ipcRenderer.invoke('llm-detect-topic-shift', payload), + startSuggestionStream: (payload) => { + console.log('[Preload] Sending llm-start-suggestion-stream:', payload); + ipcRenderer.send('llm-start-suggestion-stream', payload); + console.log('[Preload] llm-start-suggestion-stream sent successfully'); + }, + // Memory Service (结构化画像/事件) + memoryQueryProfiles: (payload) => ipcRenderer.invoke('memory-query-profiles', payload), + memoryQueryEvents: (payload) => ipcRenderer.invoke('memory-query-events', payload), + onSuggestionStreamStart: (callback) => { + const listener = (event, data) => callback(data); + ipcRenderer.on('llm-suggestion-stream-start', listener); + return () => ipcRenderer.removeListener('llm-suggestion-stream-start', listener); + }, + onSuggestionStreamHeader: (callback) => { + const listener = (event, data) => callback(data); + ipcRenderer.on('llm-suggestion-stream-header', listener); + return () => ipcRenderer.removeListener('llm-suggestion-stream-header', listener); + }, + onSuggestionStreamChunk: (callback) => { + const listener = (event, data) => callback(data); + ipcRenderer.on('llm-suggestion-stream-chunk', listener); + return () => ipcRenderer.removeListener('llm-suggestion-stream-chunk', listener); + }, + onSuggestionStreamPartial: (callback) => { + const listener = (event, data) => callback(data); + ipcRenderer.on('llm-suggestion-stream-partial', listener); + return () => ipcRenderer.removeListener('llm-suggestion-stream-partial', listener); + }, + onSuggestionStreamEnd: (callback) => { + const listener = (event, data) => callback(data); + ipcRenderer.on('llm-suggestion-stream-end', listener); + return () => ipcRenderer.removeListener('llm-suggestion-stream-end', listener); + }, + onSuggestionStreamError: (callback) => { + const listener = (event, data) => callback(data); + ipcRenderer.on('llm-suggestion-stream-error', listener); + return () => ipcRenderer.removeListener('llm-suggestion-stream-error', listener); + }, + + // 对话建议配置 + getSuggestionConfig: () => ipcRenderer.invoke('suggestion-get-config'), + updateSuggestionConfig: (updates) => ipcRenderer.invoke('suggestion-update-config', updates), // ASR(语音识别)API asrInitialize: (conversationId) => ipcRenderer.invoke('asr-initialize', conversationId), @@ -86,7 +224,8 @@ contextBridge.exposeInMainWorld('electronAPI', { asrGetModelPresets: () => ipcRenderer.invoke('asr-get-model-presets'), asrGetModelStatus: (modelId) => ipcRenderer.invoke('asr-get-model-status', modelId), asrGetAllModelStatuses: () => ipcRenderer.invoke('asr-get-all-model-statuses'), - asrDownloadModel: (modelId) => ipcRenderer.invoke('asr-download-model', modelId), + // 下载 ASR 模型,允许指定下载源(huggingface / modelscope) + asrDownloadModel: (modelId, source) => ipcRenderer.invoke('asr-download-model', modelId, source), asrCancelModelDownload: (modelId) => ipcRenderer.invoke('asr-cancel-model-download', modelId), asrGetConfigs: () => ipcRenderer.invoke('asr-get-configs'), asrCreateConfig: (configData) => ipcRenderer.invoke('asr-create-config', configData), @@ -98,12 +237,23 @@ contextBridge.exposeInMainWorld('electronAPI', { asrGetSpeechRecords: (conversationId) => ipcRenderer.invoke('asr-get-speech-records', conversationId), asrConvertToMessage: (recordId, conversationId) => ipcRenderer.invoke('asr-convert-to-message', recordId, conversationId), asrCleanupAudioFiles: (retentionDays) => ipcRenderer.invoke('asr-cleanup-audio-files', retentionDays), + asrGetAudioDataUrl: (filePath) => ipcRenderer.invoke('asr-get-audio-data-url', filePath), + asrDeleteAudioFile: (payload) => ipcRenderer.invoke('asr-delete-audio-file', payload), asrReloadModel: () => ipcRenderer.invoke('asr-reload-model'), + // 模型缓存目录(HF / ModelScope)配置 + appGetModelCachePaths: () => ipcRenderer.invoke('app-get-model-cache-paths'), + appSelectDirectory: (options) => ipcRenderer.invoke('app-select-directory', options), + appSetAsrCacheBase: (cacheBase) => ipcRenderer.invoke('app-set-asr-cache-base', cacheBase), onAsrModelDownloadStarted: (callback) => { const listener = (event, payload) => callback(payload); ipcRenderer.on('asr-model-download-started', listener); return () => ipcRenderer.removeListener('asr-model-download-started', listener); }, + onAsrModelDownloadLog: (callback) => { + const listener = (event, payload) => callback(payload); + ipcRenderer.on('asr-model-download-log', listener); + return () => ipcRenderer.removeListener('asr-model-download-log', listener); + }, onAsrModelDownloadProgress: (callback) => { const listener = (event, payload) => callback(payload); ipcRenderer.on('asr-model-download-progress', listener); @@ -139,4 +289,8 @@ ipcRenderer.on('window-focused', () => { logger.log('Window focused'); }); +console.log('[Preload] Preload script loaded successfully, exposing APIs:', { + hasStartSuggestionStream: !!window.electronAPI?.startSuggestionStream, + hasSuggestionStreamEvents: !!window.electronAPI?.onSuggestionStreamStart +}); logger.log('Preload script loaded successfully'); diff --git a/desktop/src/renderer/App.jsx b/desktop/src/renderer/App.jsx index 310470a..f9a441b 100644 --- a/desktop/src/renderer/App.jsx +++ b/desktop/src/renderer/App.jsx @@ -5,6 +5,7 @@ import Characters from './pages/Characters'; import ConversationEditor from './pages/ConversationEditor'; import Settings from './pages/Settings'; import ASRSettings from './pages/ASRSettings'; +import StoryTreePage from './pages/StoryTreePage'; function App() { console.log('App component rendering'); @@ -14,6 +15,7 @@ function App() { } /> } /> } /> + } /> } /> } /> } /> diff --git a/desktop/src/renderer/components/Audio/AudioDeviceSelector.jsx b/desktop/src/renderer/components/Audio/AudioDeviceSelector.jsx new file mode 100644 index 0000000..8cc9748 --- /dev/null +++ b/desktop/src/renderer/components/Audio/AudioDeviceSelector.jsx @@ -0,0 +1,68 @@ +/** + * 音频设备选择组件 + */ + +import React from 'react'; + +export const AudioDeviceSelector = ({ + audioDevices, + selectedAudioDevice, + onDeviceChange, + captureSystemAudio, + onSystemAudioToggle, + speaker1Source, + speaker2Source +}) => { + if (audioDevices.length === 0) { + return ( +
+

+ 未检测到音频输入设备 +

+
+ ); + } + + return ( +
+
+ + +

+ 选择要使用的麦克风设备(用于识别用户说话) +

+ {speaker1Source && ( +

+ check_circle + 已保存配置 +

+ )} +
+ +
+ onSystemAudioToggle(e.target.checked)} + className="rounded border-border-light dark:border-border-dark text-primary focus:ring-primary" + /> + +
+
+ ); +}; \ No newline at end of file diff --git a/desktop/src/renderer/components/Audio/AudioTester.jsx b/desktop/src/renderer/components/Audio/AudioTester.jsx new file mode 100644 index 0000000..c1332ff --- /dev/null +++ b/desktop/src/renderer/components/Audio/AudioTester.jsx @@ -0,0 +1,120 @@ +/** + * 音频测试组件 + */ + +import React from 'react'; + +export const AudioTester = ({ + isListening, + audioStatus, + desktopCapturerError, + micVolumeLevel, + systemVolumeLevel, + totalVolumeLevel, + onStart, + onStop, + captureSystemAudio +}) => { + return ( +
+

+ 测试麦克风监听 +

+
+
+ {!isListening ? ( + + ) : ( + + )} +
+ + {isListening && ( +
+
+ {audioStatus.includes('✅') || audioStatus.includes('成功') ? ( + check_circle + ) : audioStatus.includes('⚠️') || audioStatus.includes('❌') || audioStatus.includes('失败') || audioStatus.includes('错误') ? ( + error + ) : ( + mic + )} + {audioStatus.replace(/[✅⚠️❌]/g, '').trim()} +
+ + {desktopCapturerError && ( +
+

+ warning + 原生屏幕音频捕获失败 +

+

{desktopCapturerError}

+

+ 自动捕获系统音频失败。请检查系统权限或驱动。 +

+
+ )} + +
+
+ 麦克风 +
+
+
+ + {micVolumeLevel.toFixed(0)}% + +
+ + {captureSystemAudio && ( +
+ 系统音频 +
+
+
+ + {systemVolumeLevel.toFixed(0)}% + +
+ )} + +
+ 总音量 +
+
+
+ + {totalVolumeLevel.toFixed(0)}% + +
+
+
+ )} +
+
+ ); +}; \ No newline at end of file diff --git a/desktop/src/renderer/components/Chat/CompactHud.jsx b/desktop/src/renderer/components/Chat/CompactHud.jsx new file mode 100644 index 0000000..98f1d5d --- /dev/null +++ b/desktop/src/renderer/components/Chat/CompactHud.jsx @@ -0,0 +1,188 @@ +import React, { useMemo } from 'react'; + +const TAG_CLASSES = ['compact-tag-blue', 'compact-tag-red', 'compact-tag-purple']; + +const normalizeVolume = (value) => { + if (value === undefined || value === null || Number.isNaN(value)) return 0; + if (value > 1) return Math.min(100, value); + return Math.min(100, Math.max(0, Math.round(value * 100))); +}; + +export const CompactHud = ({ + isListening, + micVolumeLevel, + systemVolumeLevel, + suggestions, + suggestionMeta, + suggestionStatus, + suggestionError, + copiedSuggestionId, + onGenerate, + onCopy, + onToggleListening, + onSwitchSession, + onClose, + onToggleViewMode, + sessionInfo +}) => { + const micVolumePercent = normalizeVolume(micVolumeLevel); + const sysVolumePercent = normalizeVolume(systemVolumeLevel); + + const displaySuggestions = useMemo(() => { + if (!Array.isArray(suggestions) || suggestions.length === 0) return []; + return suggestions.slice(0, 3).map((s, index) => { + const normalizedTags = Array.isArray(s.tags) ? s.tags.filter(Boolean) : []; + return { + ...s, + tagClass: TAG_CLASSES[index % TAG_CLASSES.length], + displayTags: normalizedTags.slice(0, 3), + displayTitle: s.title || `建议 ${index + 1}`, + displayContent: s.content || s.title || '暂无内容' + }; + }); + }, [suggestions]); + + const listeningLabel = isListening ? 'LISTENING' : 'PAUSED'; + const hasSuggestions = Array.isArray(suggestions) && suggestions.length > 0; + const modelName = suggestionMeta?.model || '—'; + + return ( +
+
+
+
+
+
+
+
+
+
+ {listeningLabel} +
+ +
+
+ ME +
+
+
+ {micVolumePercent}% +
+
+ HER +
+
+
+ {sysVolumePercent}% +
+
+
+ +
+ + + +
+
+ +
+
+ {sessionInfo?.characterName || '心情助手'} + {sessionInfo?.conversationName ? ( + · {sessionInfo.conversationName} + ) : null} +
+
+ + +
+
+ +
+ {suggestionError && ( +
{suggestionError}
+ )} + {suggestionStatus === 'loading' && ( +
正在生成候选回复…
+ )} + {suggestionStatus === 'streaming' && suggestions.length === 0 && ( +
流式生成中…
+ )} + {suggestions.length === 0 && suggestionStatus !== 'loading' && suggestionStatus !== 'streaming' && ( +
暂无建议,点击下方“生成建议”
+ )} +
+ {displaySuggestions.map((item, index) => ( +
+
+ {item.displayTags?.length + ? item.displayTags.map((tag, tagIdx) => ( + + {tag} + + )) + : ( + + {item.displayTitle} + + )} +
+
{item.displayContent}
+
+ ))} +
+
+ +
+ Model: {modelName} + +
+
+
+ ); +}; + diff --git a/desktop/src/renderer/components/Chat/FullHud.jsx b/desktop/src/renderer/components/Chat/FullHud.jsx new file mode 100644 index 0000000..993af93 --- /dev/null +++ b/desktop/src/renderer/components/Chat/FullHud.jsx @@ -0,0 +1,116 @@ +import React from 'react'; +import { TranscriptView } from './TranscriptView.jsx'; +import { SuggestionsPanel } from './SuggestionsPanel.jsx'; + +export const FullHud = ({ + isListening, + toggleListening, + micVolumeLevel, + systemVolumeLevel, + systemAudioNotAuthorized, + chatSession, + messages, + suggestions, + onSwitchSession, + onToggleViewMode, + onClose +}) => { + const micVolumePercent = Math.min(100, Math.max(0, Math.round((micVolumeLevel || 0) * 100))); + const sysVolumePercent = Math.min(100, Math.max(0, Math.round((systemVolumeLevel || 0) * 100))); + + return ( +
+
+
+
+ +
+ {isListening ? 'LISTENING' : 'PAUSED'} +
+
+ ME +
+
+
+ {micVolumePercent}% +
+
+ HER +
+
+
+ {sysVolumePercent}% +
+
+
+ {chatSession.sessionInfo?.characterName || '心情助手'} +
+
+ + + + +
+
+ +
+
{chatSession.sessionInfo?.conversationName || '最近互动'}
+ +
+ + suggestions.updateSuggestionConfig({ enable_passive_suggestion: enabled ? 1 : 0 })} + sessionInfo={chatSession.sessionInfo} + /> +
+
+ ); +}; diff --git a/desktop/src/renderer/components/Chat/SuggestionsPanel.jsx b/desktop/src/renderer/components/Chat/SuggestionsPanel.jsx new file mode 100644 index 0000000..952b7ba --- /dev/null +++ b/desktop/src/renderer/components/Chat/SuggestionsPanel.jsx @@ -0,0 +1,187 @@ +/** + * 建议面板组件 + */ + +import React from 'react'; + +export const SuggestionsPanel = ({ + suggestions, + suggestionMeta, + suggestionStatus, + suggestionError, + PASSIVE_REASON_LABEL, + copiedSuggestionId, + onGenerate, + onCopy, + onSelectSuggestion, + suggestionConfig, + onTogglePassive, + sessionInfo +}) => { + const isStreaming = suggestionStatus === 'streaming'; + const expectedCount = suggestionMeta?.expectedCount || null; + const generatedCount = suggestions.length; + const passiveEnabled = Boolean(suggestionConfig?.enable_passive_suggestion); + + return ( +
+
+ AI 建议 +
+ {typeof onTogglePassive === 'function' && ( + + )} + {suggestionMeta?.reason && ( + + {PASSIVE_REASON_LABEL[suggestionMeta.reason] || '自动触发'} + + )} + {isStreaming && ( + + 实时生成 {generatedCount} + {expectedCount ? `/${expectedCount}` : ''} + + )} + +
+
+ {suggestionError && ( +
+

{suggestionError}

+
+ )} +
+ {suggestionStatus === 'loading' && ( +
+
+ )} + {isStreaming && generatedCount === 0 && ( +
+
+ )} + {!isStreaming && suggestionStatus !== 'loading' && suggestions.length === 0 && ( +
+

+ 暂无建议,点击上方按钮或等待系统自动推荐 +

+
+ )} + {suggestions.map((suggestion) => { + const showCombined = + !suggestion.content || suggestion.title === suggestion.content; + const mainText = suggestion.content || suggestion.title; + return ( +
onCopy?.(suggestion.id, mainText)} + onKeyDown={(e) => { + if (e.key === 'Enter' || e.key === ' ') { + e.preventDefault(); + onCopy?.(suggestion.id, mainText); + } + }} + > + {suggestion.is_selected ? ( +
+ ✓ +
+ ) : null} + {showCombined ? ( +

{mainText}

+ ) : ( + <> +
+ {suggestion.title} + {suggestion.tags?.length > 0 && ( +
+ {suggestion.tags.map((tag) => ( + + {tag} + + ))} +
+ )} +
+

{suggestion.content}

+ + )} + {showCombined && suggestion.tags?.length > 0 && ( +
+ {suggestion.tags.map((tag) => ( + + {tag} + + ))} +
+ )} + {!suggestion.is_selected ? ( +
+ +
+ ) : null} +
+ ); + })} + {isStreaming && generatedCount > 0 && (!expectedCount || generatedCount < expectedCount) && ( +
+
+ )} +
+ {suggestions.length > 0 && !isStreaming && suggestionStatus !== 'loading' && ( +
+ +
+ )} +
+ ); +}; diff --git a/desktop/src/renderer/components/Chat/TranscriptView.jsx b/desktop/src/renderer/components/Chat/TranscriptView.jsx new file mode 100644 index 0000000..b2dfac6 --- /dev/null +++ b/desktop/src/renderer/components/Chat/TranscriptView.jsx @@ -0,0 +1,68 @@ +/** + * 转录视图组件 + */ + +import React from 'react'; + +export const TranscriptView = ({ + messages, + loading, + error, + isListening, + isNew, + transcriptRef +}) => { + const renderContent = () => { + if (loading) { + return ( +
+
+ ); + } + + if (error) { + // 检查是否是系统音频捕获失败的错误(不应该阻止应用运行) + const isSystemAudioError = error.includes('系统音频捕获失败'); + + return ( +
+

+ {isSystemAudioError ? '⚠️ ' : '❌ '}{error} +

+
+ ); + } + + if (!messages.length) { + return ( +
+

+ {isListening ? (isNew ? '新对话,开始聊天吧!' : '该对话还没有消息') : '点击上方播放按钮开始监听'} +

+
+ ); + } + + return ( + <> + {messages.map((msg, index) => { + const isUser = msg.sender === 'user'; + const key = msg.id ?? `${msg.sender}-${msg.timestamp ?? index}`; + return ( +
+
{msg.content || msg.text || ''}
+
+ ); + })} + + ); + }; + + return ( +
+ {renderContent()} +
+ ); +}; \ No newline at end of file diff --git a/desktop/src/renderer/components/Chat/VolumeIndicators.jsx b/desktop/src/renderer/components/Chat/VolumeIndicators.jsx new file mode 100644 index 0000000..b2f1d0f --- /dev/null +++ b/desktop/src/renderer/components/Chat/VolumeIndicators.jsx @@ -0,0 +1,47 @@ +/** + * 音量指示器组件 + */ + +import React from 'react'; + +export const VolumeIndicators = ({ + micVolumeLevel, + systemVolumeLevel, + systemAudioNotAuthorized, + sessionInfo +}) => { + if (!sessionInfo) return null; + + return ( +
+
+ 用户 +
+
+
+ {micVolumeLevel.toFixed(0)}% +
+
+ 角色 +
+
+
+ {systemVolumeLevel.toFixed(0)}% + {systemAudioNotAuthorized && ( + ⚠️ + )} +
+ {systemAudioNotAuthorized && ( +
+ 💡 系统音频未授权,请检查设置 +
+ )} +
+ ); +}; \ No newline at end of file diff --git a/desktop/src/renderer/components/LLM/LLMConfigForm.jsx b/desktop/src/renderer/components/LLM/LLMConfigForm.jsx new file mode 100644 index 0000000..aaa6558 --- /dev/null +++ b/desktop/src/renderer/components/LLM/LLMConfigForm.jsx @@ -0,0 +1,147 @@ +/** + * LLM配置表单组件 + */ + +import React from 'react'; + +export const LLMConfigForm = ({ + newConfig, + onChange, + onSubmit, + onTest, + onCancel, + testingConfig, + testConfigMessage, + testConfigError, + isEditing = false +}) => { + return ( +
+

+ {isEditing ? '编辑配置' : '添加新配置'} +

+
+
+ + onChange({ ...newConfig, name: e.target.value })} + className="w-full px-3 py-2 border border-border-light dark:border-border-dark rounded-lg bg-surface-light dark:bg-surface-dark text-text-light dark:text-text-dark focus:outline-none focus:ring-2 focus:ring-primary/50" + placeholder="例如:OpenAI GPT-4" + /> +

+ 仅用于显示和区分不同配置,可随意填写 +

+
+ +
+ + onChange({ ...newConfig, apiKey: e.target.value })} + className="w-full px-3 py-2 border border-border-light dark:border-border-dark rounded-lg bg-surface-light dark:bg-surface-dark text-text-light dark:text-text-dark focus:outline-none focus:ring-2 focus:ring-primary/50" + placeholder="sk-..." + /> +
+ +
+ + onChange({ ...newConfig, modelName: e.target.value })} + className="w-full px-3 py-2 border border-border-light dark:border-border-dark rounded-lg bg-surface-light dark:bg-surface-dark text-text-light dark:text-text-dark focus:outline-none focus:ring-2 focus:ring-primary/50" + placeholder="例如:gpt-4o-mini, claude-3-5-sonnet-20241022" + /> +

+ 请填写 API 服务商提供的 Model ID(如 gpt-4o、claude-3-5-sonnet-20241022),留空将使用 gpt-4o-mini +

+
+ +
+ + onChange({ ...newConfig, baseUrl: e.target.value })} + className="w-full px-3 py-2 border border-border-light dark:border-border-dark rounded-lg bg-surface-light dark:bg-surface-dark text-text-light dark:text-text-dark focus:outline-none focus:ring-2 focus:ring-primary/50" + placeholder="https://api.openai.com/v1" + /> +
+ +
+ + onChange({ ...newConfig, timeoutMs: e.target.value })} + className="w-full px-3 py-2 border border-border-light dark:border-border-dark rounded-lg bg-surface-light dark:bg-surface-dark text-text-light dark:text-text-dark focus:outline-none focus:ring-2 focus:ring-primary/50" + placeholder="15000" + /> +

+ 留空使用默认超时,建议不低于 1000ms +

+
+ +
+ onChange({ ...newConfig, isDefault: e.target.checked })} + className="w-4 h-4 text-primary border-border-light dark:border-border-dark rounded focus:ring-primary" + /> + +
+ +
+ + + +
+ {(testConfigMessage || testConfigError) && ( +
+ {testConfigMessage && ( +

{testConfigMessage}

+ )} + {testConfigError && ( +

{testConfigError}

+ )} +
+ )} +
+
+ ); +}; diff --git a/desktop/src/renderer/components/LLM/LLMConfigList.jsx b/desktop/src/renderer/components/LLM/LLMConfigList.jsx new file mode 100644 index 0000000..ef9944a --- /dev/null +++ b/desktop/src/renderer/components/LLM/LLMConfigList.jsx @@ -0,0 +1,83 @@ +/** + * LLM配置列表组件 + */ + +import React from 'react'; + +export const LLMConfigList = ({ + configs, + defaultConfig, + onSetDefault, + onEdit, + onDelete, + loading +}) => { + if (loading) { + return ( +
+
+

加载中...

+
+ ); + } + + if (!configs || configs.length === 0) { + return ( +
+

暂无LLM配置

+
+ ); + } + + return ( +
+ {configs.map((config) => ( +
+
+
+

+ {config.name || '未命名配置'} + {defaultConfig?.id === config.id && ( + 默认 + )} +

+
+

模型:{config.model_name || '未配置'}

+ {config.base_url ?

Base URL:{config.base_url}

: null} + {config.timeout_ms ?

超时:{config.timeout_ms} ms

: null} +
+
+
+ {defaultConfig?.id !== config.id && ( + + )} + + +
+
+
+ ))} +
+ ); +}; diff --git a/desktop/src/renderer/components/Layout.jsx b/desktop/src/renderer/components/Layout.jsx index 242b119..2fa068c 100644 --- a/desktop/src/renderer/components/Layout.jsx +++ b/desktop/src/renderer/components/Layout.jsx @@ -1,9 +1,14 @@ import { Link, useLocation } from 'react-router-dom'; -import { useId } from 'react'; +import { useEffect, useId, useRef, useState } from 'react'; +import { calculateProgress, formatBytes, formatSpeed } from '../pages/asrSettingsUtils'; function Layout({ children }) { const location = useLocation(); const logoId = useId(); + const [downloadStatus, setDownloadStatus] = useState(null); + const hideTimerRef = useRef(null); + const [hudNotice, setHudNotice] = useState(null); + const hudNoticeTimerRef = useRef(null); const isActive = (path) => { if (path === '/') { @@ -12,6 +17,172 @@ function Layout({ children }) { return location.pathname.startsWith(path); }; + useEffect(() => { + const api = window.electronAPI; + if (!api) { + return undefined; + } + + const cleanups = []; + + const upsertStatus = (payload = {}) => { + setDownloadStatus((prev) => { + const base = prev || {}; + return { + ...base, + ...payload, + lastUpdated: Date.now() + }; + }); + }; + + const handleStart = (payload) => { + if (hideTimerRef.current) { + clearTimeout(hideTimerRef.current); + } + upsertStatus({ + ...payload, + activeDownload: true, + status: 'start', + source: payload?.source || 'preload' + }); + }; + + const handleProgress = (payload) => { + upsertStatus({ + ...payload, + activeDownload: true, + status: 'progress', + source: payload?.source || 'preload' + }); + }; + + const handleComplete = (payload) => { + const status = payload?.status || {}; + upsertStatus({ + ...status, + ...payload, + activeDownload: false, + status: 'complete', + isDownloaded: true, + source: payload?.source || status?.source || 'preload' + }); + hideTimerRef.current = setTimeout(() => { + setDownloadStatus(null); + }, 5000); + }; + + const handleError = (payload) => { + if (hideTimerRef.current) { + clearTimeout(hideTimerRef.current); + } + upsertStatus({ + ...payload, + activeDownload: false, + status: 'error', + source: payload?.source || 'preload' + }); + }; + + const handleCancelled = (payload) => { + if (hideTimerRef.current) { + clearTimeout(hideTimerRef.current); + } + upsertStatus({ + ...payload, + activeDownload: false, + status: 'cancelled', + source: payload?.source || 'preload' + }); + hideTimerRef.current = setTimeout(() => setDownloadStatus(null), 2000); + }; + + if (api.onAsrModelDownloadStarted) { + cleanups.push(api.onAsrModelDownloadStarted(handleStart)); + } + if (api.onAsrModelDownloadProgress) { + cleanups.push(api.onAsrModelDownloadProgress(handleProgress)); + } + if (api.onAsrModelDownloadComplete) { + cleanups.push(api.onAsrModelDownloadComplete(handleComplete)); + } + if (api.onAsrModelDownloadError) { + cleanups.push(api.onAsrModelDownloadError(handleError)); + } + if (api.onAsrModelDownloadCancelled) { + cleanups.push(api.onAsrModelDownloadCancelled(handleCancelled)); + } + + return () => { + cleanups.forEach((cleanup) => { + if (typeof cleanup === 'function') { + cleanup(); + } + }); + if (hideTimerRef.current) { + clearTimeout(hideTimerRef.current); + } + }; + }, []); + + useEffect(() => { + const api = window.electronAPI; + if (!api?.on) { + return undefined; + } + + const safeNumber = (value) => (typeof value === 'number' && !Number.isNaN(value) ? value : null); + + const handleHudLoading = (payload = {}) => { + if (hudNoticeTimerRef.current) { + clearTimeout(hudNoticeTimerRef.current); + } + const waitedSeconds = safeNumber(payload.waitedSeconds); + const message = payload.message + || (payload.downloading + ? '正在下载语音模型,首次下载可能较慢,请耐心等待...' + : 'ASR 模型正在加载,HUD 将在就绪后自动打开'); + setHudNotice({ + message, + waitedSeconds, + ready: false, + downloading: Boolean(payload.downloading) + }); + if (api.log) { + try { + const waitedLabel = waitedSeconds !== null ? `(已等待 ${waitedSeconds.toFixed(1)}s)` : ''; + api.log(`[HUD] ${message}${waitedLabel}`); + } catch (error) { + console.warn('[HUD] 记录日志失败:', error); + } + } + }; + + const handleHudReady = (payload = {}) => { + const waitedSeconds = safeNumber(payload.waitedSeconds); + setHudNotice((prev) => ({ + message: payload.message || 'ASR 模型已就绪,正在打开 HUD', + waitedSeconds: waitedSeconds ?? prev?.waitedSeconds ?? null, + ready: true + })); + if (hudNoticeTimerRef.current) { + clearTimeout(hudNoticeTimerRef.current); + } + hudNoticeTimerRef.current = setTimeout(() => setHudNotice(null), 3000); + }; + + api.on('hud-loading', handleHudLoading); + api.on('hud-ready', handleHudReady); + + return () => { + api.removeListener?.('hud-loading', handleHudLoading); + api.removeListener?.('hud-ready', handleHudReady); + if (hudNoticeTimerRef.current) { + clearTimeout(hudNoticeTimerRef.current); + } + }; + }, []); + const handleMinimize = () => { window.electronAPI?.minimizeWindow(); }; @@ -314,6 +485,83 @@ function Layout({ children }) { {/* 主内容区域 */}
+ {hudNotice && ( +
+
+
+
+
+ {hudNotice.ready ? 'ASR 模型已就绪,正在打开 HUD' : 'ASR 模型加载中,HUD 将在就绪后弹出'} +
+
+ {hudNotice.message} + {typeof hudNotice.waitedSeconds === 'number' + ? `(已等待 ${hudNotice.waitedSeconds.toFixed(1)}s)` + : ''} +
+
+ +
+
+
+ )} + {downloadStatus && ( +
+
+
+
+
+ ASR 模型下载中(首次运行可能较久) +
+
+ {downloadStatus.status === 'complete' + ? '下载完成,语音识别已就绪' + : downloadStatus.status === 'error' + ? `下载失败:${downloadStatus.message || '请检查网络后重试'}` + : '后台正在准备语音模型,请保持应用运行'} +
+
+ +
+
+
+
+
+
+ + {formatBytes(downloadStatus.downloadedBytes || 0)} / {formatBytes(downloadStatus.totalBytes || downloadStatus.sizeBytes || 0)} ({calculateProgress( + downloadStatus.downloadedBytes || 0, + downloadStatus.totalBytes || downloadStatus.sizeBytes || 0 + )}%) + + 速度:{formatSpeed(downloadStatus.bytesPerSecond)} +
+
+
+
+ )} {children}
diff --git a/desktop/src/renderer/components/Suggestions/SuggestionConfigForm.jsx b/desktop/src/renderer/components/Suggestions/SuggestionConfigForm.jsx new file mode 100644 index 0000000..d5f3eaf --- /dev/null +++ b/desktop/src/renderer/components/Suggestions/SuggestionConfigForm.jsx @@ -0,0 +1,155 @@ +/** + * 建议配置表单组件 + */ + +import React from 'react'; + +export const SuggestionConfigForm = ({ + form, + onUpdateField, + onNumberChange, + onSave, + loading, + saving, + message, + error +}) => { + return ( +
+ {error && ( +
+ {error} +
+ )} + +
+
+

被动推荐

+

+ 达到阈值时自动生成候选方向(静默、连续消息或话题转折)。 +

+
+ +
+ + {form.enable_passive_suggestion === false && ( +
+ 开启"被动推荐"后可配置详细触发策略和窗口。 +
+ )} + +
+
+ + +

+ 建议 2-5 个之间,避免信息过载。 +

+
+ +
+ + +

+ 仅截取最近 N 条历史,建议 20-50 条以获得更好的建议质量。 +

+
+ +
+ + +

+ 角色发言后等待多久仍未收到用户回复就自动给出建议,避免冷场。 +

+
+ +
+ + +

+ 连续收到角色多少条消息立刻触发,防止角色连讲用户无回应。 +

+
+ +
+ + +

+ 防止建议过于频繁的等待时间。达到触发条件后,需等待此时间才会再次生成建议。 +

+
+
+ +
+ {message && ( + {message} + )} + +
+
+ ); +}; diff --git a/desktop/src/renderer/components/review/ReviewSection.jsx b/desktop/src/renderer/components/review/ReviewSection.jsx new file mode 100644 index 0000000..811bf17 --- /dev/null +++ b/desktop/src/renderer/components/review/ReviewSection.jsx @@ -0,0 +1,181 @@ + +import React from 'react'; +import { useNavigate } from 'react-router-dom'; +import { useConversationReview } from '../../hooks/useConversationReview.js'; + +export default function ReviewSection({ conversationId, onReviewGenerated }) { + const { review, isLoading, progress, generate } = useConversationReview(conversationId); + const navigate = useNavigate(); + + if (isLoading) { + const hint = progress?.message || '正在生成复盘分析...'; + return ( +
+
+
+

{hint}

+
+
+ ); + } + + if (!review) { + return ( +
+
+ history_edu + 剧情复盘 +
+

+ 点击生成复盘,回顾关键决策点,探索"如果当时..."的可能性。 +

+ +
+ ); + } + + const { summary, has_nodes } = review; + const hasNodes = !!has_nodes; + const chatOverview = summary.chat_overview || summary.conversation_summary; + const selfEvaluation = summary.self_evaluation; + const performanceEval = summary.performance_evaluation || {}; + const expressionAbility = performanceEval.expression_ability || {}; + const topicSelection = performanceEval.topic_selection || {}; + const tags = summary.tags || []; + const attitudeAnalysis = summary.attitude_analysis || ""; + + return ( +
+
+
+ history_edu + 剧情复盘 +
+ + {hasNodes ? ( +
+ + {has_nodes && ( + + )} +
+ ) : ( + + )} +
+ +
+
+ 关键节点: {summary.node_count || 0} + 命中建议: {summary.matched_count || 0} +
+ {summary.total_affinity_change !== undefined && ( +
+ 好感变化: + 0 ? 'text-green-500' : summary.total_affinity_change < 0 ? 'text-red-500' : ''}> + {summary.total_affinity_change > 0 ? '+' : ''}{summary.total_affinity_change} + +
+ )} +
+ +
+ {/* 用户表现评价 - 评分卡片 */} + {(expressionAbility.score !== null || topicSelection.score !== null || selfEvaluation) && ( +
+ {(expressionAbility.score !== null || topicSelection.score !== null) && ( +
+ {expressionAbility.score !== null && ( +
+
+

表述能力

+ {expressionAbility.score}分 +
+ {expressionAbility.description && ( +

{expressionAbility.description}

+ )} +
+ )} + {topicSelection.score !== null && ( +
+
+

话题选择

+ {topicSelection.score}分 +
+ {topicSelection.description && ( +

{topicSelection.description}

+ )} +
+ )} +
+ )} + {selfEvaluation && ( +
+

整体表现评价

+

{selfEvaluation}

+
+ )} +
+ )} + {/* 聊天概要 */} + {chatOverview && ( +
+

聊天概要

+

{chatOverview}

+
+ )} + + {/* 标签 Tags */} + {tags.length > 0 && ( +
+ {tags.map((tag, i) => ( + + {tag} + + ))} +
+ )} + + {/* 对象态度分析 */} + {attitudeAnalysis && ( +
+

对象态度分析

+

{attitudeAnalysis}

+
+ )} +
+ +
+ ); +} diff --git a/desktop/src/renderer/hooks/useAudioCapture.js b/desktop/src/renderer/hooks/useAudioCapture.js new file mode 100644 index 0000000..2b5b597 --- /dev/null +++ b/desktop/src/renderer/hooks/useAudioCapture.js @@ -0,0 +1,414 @@ +import { useState, useRef, useCallback, useEffect } from 'react'; +import { + attachAudioContextDebugHandlers, + analyzeAudioVolume, + closeAudioContext +} from '../utils/audioUtils.js'; + +/** + * 音频捕获管理的自定义Hook + */ +export const useAudioCapture = () => { + // 状态 + const [isListening, setIsListening] = useState(false); + const [audioStatus, setAudioStatus] = useState(''); + const [desktopCapturerError, setDesktopCapturerError] = useState(null); + const [micVolumeLevel, setMicVolumeLevel] = useState(0); + const [systemVolumeLevel, setSystemVolumeLevel] = useState(0); + const [totalVolumeLevel, setTotalVolumeLevel] = useState(0); + + // Refs + const micAudioContextRef = useRef(null); + const systemAudioContextRef = useRef(null); + const audioContextRef = useRef(null); // 保留用于兼容性 + const micAnalyserRef = useRef(null); + const systemAnalyserRef = useRef(null); + const totalAnalyserRef = useRef(null); + const microphoneRef = useRef(null); + const systemAudioRef = useRef(null); + const systemAudioElementRef = useRef(null); + const micDataArrayRef = useRef(null); + const systemDataArrayRef = useRef(null); + const totalDataArrayRef = useRef(null); + const animationIdRef = useRef(null); + const audioContextStateLogRef = useRef({ mic: null, system: null }); + + /** + * 清理所有音频资源 + */ + const cleanup = useCallback(async () => { + if (animationIdRef.current) { + cancelAnimationFrame(animationIdRef.current); + animationIdRef.current = null; + } + + if (microphoneRef.current) { + microphoneRef.current.getTracks().forEach(track => track.stop()); + microphoneRef.current = null; + } + + if (systemAudioRef.current) { + systemAudioRef.current.getTracks().forEach(track => track.stop()); + systemAudioRef.current = null; + } + + if (systemAudioElementRef.current) { + systemAudioElementRef.current.pause(); + systemAudioElementRef.current.srcObject = null; + systemAudioElementRef.current = null; + } + + // 关闭所有AudioContext + await Promise.all([ + closeAudioContext(micAudioContextRef.current), + closeAudioContext(systemAudioContextRef.current), + closeAudioContext(audioContextRef.current) + ]); + + micAudioContextRef.current = null; + systemAudioContextRef.current = null; + audioContextRef.current = null; + + // 清理分析器引用 + micAnalyserRef.current = null; + systemAnalyserRef.current = null; + totalAnalyserRef.current = null; + audioContextStateLogRef.current = { mic: null, system: null }; + }, []); + + /** + * 分析音频音量并更新UI + */ + const analyzeAudio = useCallback(() => { + // 检查是否至少有一个 AudioContext 在运行 + const micContextActive = micAudioContextRef.current && micAudioContextRef.current.state !== 'closed'; + const systemContextActive = systemAudioContextRef.current && systemAudioContextRef.current.state !== 'closed'; + + if (!micContextActive && !systemContextActive) { + return; + } + + let hasMic = false; + let hasSystem = false; + let micVolume = 0; + let systemVolume = 0; + + // 分析麦克风音量 + if (micAnalyserRef.current && micDataArrayRef.current && micContextActive) { + micVolume = analyzeAudioVolume(micAnalyserRef.current, micDataArrayRef.current); + setMicVolumeLevel(micVolume); + hasMic = micVolume > 2; + } + + // 分析系统音频音量 + if (systemAnalyserRef.current && systemDataArrayRef.current && systemContextActive) { + systemVolume = analyzeAudioVolume(systemAnalyserRef.current, systemDataArrayRef.current); + setSystemVolumeLevel(systemVolume); + hasSystem = systemVolume > 2; + } + + // 计算总体音量(两个音源的最大值,而不是平均值,以便更好地显示活动) + const totalVolume = Math.max(micVolume, systemVolume); + setTotalVolumeLevel(totalVolume); + + // 更新状态文本 + let statusText = '正在监听'; + const activeSources = []; + if (hasMic) activeSources.push('麦克风'); + if (hasSystem) activeSources.push('系统音频'); + + if (activeSources.length > 0) { + statusText += ` - ${activeSources.join(' + ')} 有输入`; + } else { + statusText += ' - 等待音频输入...'; + } + + setAudioStatus(statusText); + + animationIdRef.current = requestAnimationFrame(analyzeAudio); + }, []); + + /** + * 停止监听 + */ + const stopListening = useCallback(async () => { + await cleanup(); + setIsListening(false); + setAudioStatus('监听已停止'); + setMicVolumeLevel(0); + setSystemVolumeLevel(0); + setTotalVolumeLevel(0); + }, [cleanup]); + + /** + * 开始监听 + * @param {Object} options - 选项 + * @param {string} options.selectedAudioDevice - 选中的音频设备ID + * @param {boolean} options.captureSystemAudio - 是否捕获系统音频 + */ + const startListening = useCallback(async ({ + selectedAudioDevice, + captureSystemAudio + }) => { + try { + // 停止之前的监听(如果有)并等待清理完成 + await stopListening(); + + // 额外等待一小段时间确保浏览器音频子系统完全释放 + await new Promise(resolve => setTimeout(resolve, 200)); + + setAudioStatus('正在检查权限...'); + setDesktopCapturerError(null); + + // macOS: 先检查并请求麦克风权限 + if (window.electronAPI?.checkMediaAccessStatus) { + const micStatus = await window.electronAPI.checkMediaAccessStatus('microphone'); + console.log('[Settings] 麦克风权限状态:', micStatus); + + if (micStatus.status !== 'granted') { + setAudioStatus('正在请求麦克风权限...'); + const result = await window.electronAPI.requestMediaAccess('microphone'); + console.log('[Settings] 麦克风权限请求结果:', result); + + if (!result.granted) { + throw new Error(result.message || '麦克风权限被拒绝,请在系统设置中允许'); + } + } + } + + setAudioStatus('正在初始化音频...'); + + let sourceCount = 0; + let micStreamObtained = false; + + // 1. 捕获麦克风音频 - 使用独立的 AudioContext + setAudioStatus('正在获取麦克风...'); + try { + // 为麦克风创建独立的 AudioContext,强制使用 48kHz 采样率以减少冲突 + const audioContextOptions = { sampleRate: 48000, latencyHint: 'playback' }; + micAudioContextRef.current = new (window.AudioContext || window.webkitAudioContext)(audioContextOptions); + attachAudioContextDebugHandlers(micAudioContextRef.current, 'mic'); + + const micAnalyser = micAudioContextRef.current.createAnalyser(); + micAnalyser.fftSize = 256; + micAnalyser.smoothingTimeConstant = 0.8; + micAnalyserRef.current = micAnalyser; + micDataArrayRef.current = new Uint8Array(micAnalyser.frequencyBinCount); + + const micConstraints = { + audio: { + deviceId: selectedAudioDevice ? { exact: selectedAudioDevice } : undefined, + echoCancellation: true, + noiseSuppression: true + } + }; + + const micStream = await navigator.mediaDevices.getUserMedia(micConstraints); + microphoneRef.current = micStream; + + const micSource = micAudioContextRef.current.createMediaStreamSource(micStream); + micSource.connect(micAnalyser); + sourceCount++; + micStreamObtained = true; + console.log('[Settings] ✅ 麦克风捕获成功'); + } catch (micError) { + console.error('[Settings] ❌ 麦克风捕获失败:', micError); + // 麦克风捕获失败时,如果也要捕获系统音频,继续执行;否则抛出错误 + if (!captureSystemAudio) { + throw micError; + } + setAudioStatus(`⚠️ 麦克风捕获失败: ${micError.message},尝试捕获系统音频...`); + } + + // 2. 如果启用了系统音频捕获,使用 electron-audio-loopback + if (captureSystemAudio) { + setAudioStatus('正在尝试捕获系统音频...'); + console.log('[Settings] 系统音频捕获: 使用 electron-audio-loopback...'); + + // 为系统音频创建独立的 AudioContext + await new Promise(resolve => setTimeout(resolve, 500)); + + const sysAudioContextOptions = { sampleRate: 48000, latencyHint: 'playback' }; + systemAudioContextRef.current = new (window.AudioContext || window.webkitAudioContext)(sysAudioContextOptions); + + // 检查 AudioContext 是否成功创建 + if (!systemAudioContextRef.current) { + throw new Error('无法创建系统音频 AudioContext'); + } + + attachAudioContextDebugHandlers(systemAudioContextRef.current, 'system'); + + const systemAnalyser = systemAudioContextRef.current.createAnalyser(); + systemAnalyser.fftSize = 256; + systemAnalyser.smoothingTimeConstant = 0.8; + systemAnalyserRef.current = systemAnalyser; + systemDataArrayRef.current = new Uint8Array(systemAnalyser.frequencyBinCount); + + try { + // 使用 electron-audio-loopback 方案 + // 1. 启用 loopback 音频 + if (window.electronAPI?.enableLoopbackAudio) { + await window.electronAPI.enableLoopbackAudio(); + console.log('[Settings] Loopback audio enabled'); + } + + // 2. 使用 getDisplayMedia 获取系统音频 + setAudioStatus('正在获取系统音频...'); + const displayStream = await navigator.mediaDevices.getDisplayMedia({ + audio: true, + video: true + }); + + // 3. 禁用 loopback 音频 + if (window.electronAPI?.disableLoopbackAudio) { + await window.electronAPI.disableLoopbackAudio(); + console.log('[Settings] Loopback audio disabled'); + } + + // 4. 停止视频轨道 + const videoTracks = displayStream.getVideoTracks(); + videoTracks.forEach(track => { + track.stop(); + displayStream.removeTrack(track); + console.log(`[Settings] Video track stopped: ${track.label}`); + }); + + // 5. 检查音频轨道 + const audioTracks = displayStream.getAudioTracks(); + console.log(`[Settings] 系统音频流: ${audioTracks.length} 个音频轨道`); + + if (audioTracks.length > 0) { + // 检查 AudioContext 是否仍然有效 + if (!systemAudioContextRef.current || systemAudioContextRef.current.state === 'closed') { + console.warn('[Settings] AudioContext 无效,重新创建...'); + const sysAudioContextOptions = { sampleRate: 48000, latencyHint: 'playback' }; + systemAudioContextRef.current = new (window.AudioContext || window.webkitAudioContext)(sysAudioContextOptions); + + if (!systemAudioContextRef.current) { + throw new Error('无法重新创建系统音频 AudioContext'); + } + + attachAudioContextDebugHandlers(systemAudioContextRef.current, 'system'); + + // 重新创建 analyser + const systemAnalyser = systemAudioContextRef.current.createAnalyser(); + systemAnalyser.fftSize = 256; + systemAnalyser.smoothingTimeConstant = 0.8; + systemAnalyserRef.current = systemAnalyser; + systemDataArrayRef.current = new Uint8Array(systemAnalyser.frequencyBinCount); + } + + systemAudioRef.current = displayStream; + + // 再次确认 AudioContext 有效后再使用 + if (!systemAudioContextRef.current) { + throw new Error('系统音频 AudioContext 不可用'); + } + + const systemSource = systemAudioContextRef.current.createMediaStreamSource(displayStream); + systemSource.connect(systemAnalyserRef.current); + sourceCount++; + + if (systemAudioContextRef.current.state === 'suspended') { + await systemAudioContextRef.current.resume(); + } + + console.log(`[Settings] ✅ 系统音频捕获已启动 (electron-audio-loopback)`); + setAudioStatus('✅ 系统音频捕获成功'); + setDesktopCapturerError(null); + } else { + console.warn(`[Settings] ⚠️ 没有音频轨道`); + displayStream.getTracks().forEach(track => track.stop()); + setDesktopCapturerError('没有音频轨道'); + } + } catch (systemError) { + console.error('[Settings] ❌ 系统音频捕获失败:', systemError); + + // 确保禁用 loopback + if (window.electronAPI?.disableLoopbackAudio) { + await window.electronAPI.disableLoopbackAudio().catch(() => { }); + } + + const errorMsg = systemError.message || '未知错误'; + if (micStreamObtained) { + console.warn(`[Settings] 麦克风将继续工作,但无法捕获系统音频`); + } + setDesktopCapturerError(`捕获失败: ${errorMsg}`); + } + } + + // 检查是否至少有一个音频源成功捕获 + if (sourceCount === 0) { + throw new Error('没有成功捕获任何音频源。请检查设备连接和权限设置。'); + } + + // 3. 总计音量将在 analyzeAudio 中通过软件方式计算(两个独立 AudioContext 的平均值) + // 不再使用硬件合并,因为两个 AudioContext 无法直接连接 + totalDataArrayRef.current = new Uint8Array(128); // 用于存储计算后的总音量数据 + + // 构建状态信息 + const capturedSources = []; + if (micStreamObtained) capturedSources.push('麦克风'); + if (systemAudioRef.current) capturedSources.push('系统音频'); + + const statusMsg = capturedSources.length > 0 + ? `正在监听 (${capturedSources.join(' + ')})...` + : '监听中...'; + + setAudioStatus(statusMsg); + setIsListening(true); + + console.log(`[Settings] ✅ 音频监听已启动: ${capturedSources.join(', ') || '无'}`); + + analyzeAudio(); + + } catch (error) { + console.error('启动监听失败:', error); + console.error('错误名称:', error.name); + console.error('错误消息:', error.message); + console.error('错误堆栈:', error.stack); + + // 针对常见错误提供更友好的提示 + let errorMsg = error.message; + if (error.name === 'NotFoundError') { + errorMsg = '未找到音频设备。请检查麦克风是否正确连接,或尝试选择其他设备。'; + } else if (error.name === 'NotAllowedError' || error.name === 'PermissionDeniedError') { + errorMsg = '音频权限被拒绝。请在系统设置中允许此应用访问麦克风。'; + } else if (error.name === 'NotReadableError') { + errorMsg = '无法读取音频设备。设备可能被其他应用占用。'; + } + + setAudioStatus(`启动失败: ${errorMsg}`); + setIsListening(false); + + // 清理可能部分创建的资源 + await cleanup(); + } + }, [analyzeAudio, stopListening, cleanup]); + + // 设置全局错误处理器 + useEffect(() => { + const handleWindowError = (event) => { + if (event?.message?.includes('AudioContext')) { + console.error('[AudioDebug] 捕获到全局 AudioContext 错误:', event.message, event.error); + } + }; + + window.addEventListener('error', handleWindowError); + return () => window.removeEventListener('error', handleWindowError); + }, []); + + return { + // 状态 + isListening, + audioStatus, + desktopCapturerError, + micVolumeLevel, + systemVolumeLevel, + totalVolumeLevel, + + // 方法 + startListening, + stopListening + }; +}; \ No newline at end of file diff --git a/desktop/src/renderer/hooks/useAudioDevices.js b/desktop/src/renderer/hooks/useAudioDevices.js new file mode 100644 index 0000000..f9a76ea --- /dev/null +++ b/desktop/src/renderer/hooks/useAudioDevices.js @@ -0,0 +1,241 @@ +import { useState, useCallback, useRef } from 'react'; + +/** + * 音频设备管理的自定义Hook + */ +export const useAudioDevices = () => { + const [audioDevices, setAudioDevices] = useState([]); + const [selectedAudioDevice, setSelectedAudioDevice] = useState(''); + const [captureSystemAudio, setCaptureSystemAudio] = useState(false); + const [selectedSystemAudioDevice, setSelectedSystemAudioDevice] = useState(''); + const [audioSources, setAudioSources] = useState([]); + const [speaker1Source, setSpeaker1Source] = useState(null); // 用户(麦克风) + const [speaker2Source, setSpeaker2Source] = useState(null); // 角色(系统音频) + + // 用于跟踪是否已经自动保存过,避免重复保存 + const autoSavedRef = useRef(false); + + /** + * 加载音频设备列表 + */ + const loadAudioDevices = useCallback(async () => { + try { + if (!navigator.mediaDevices?.enumerateDevices) { + console.warn('浏览器不支持音频设备枚举'); + return; + } + + const devices = await navigator.mediaDevices.enumerateDevices(); + const audioInputs = devices.filter(device => device.kind === 'audioinput'); + setAudioDevices(audioInputs); + + // 如果没有已保存的配置,选择第一个设备作为默认值 + if (!speaker1Source && audioInputs.length > 0 && !selectedAudioDevice) { + setSelectedAudioDevice(audioInputs[0].deviceId); + } + } catch (error) { + console.error('加载音频设备失败:', error); + } + }, [speaker1Source, selectedAudioDevice]); + + /** + * 加载音频源配置 + */ + const loadAudioSources = useCallback(async () => { + try { + const api = window.electronAPI; + if (!api?.asrGetAudioSources) { + console.warn('ASR API 不可用'); + return; + } + + const sources = await api.asrGetAudioSources(); + setAudioSources(sources || []); + + // 查找 Speaker 1(用户/麦克风)和 Speaker 2(角色/系统音频) + // 使用固定的ID查找(而不是名称匹配),确保与外键约束一致 + const speaker1 = sources.find(s => s.id === 'speaker1'); + const speaker2 = sources.find(s => s.id === 'speaker2'); + + setSpeaker1Source(speaker1 || null); + setSpeaker2Source(speaker2 || null); + + // 如果找到了配置,更新UI状态 + if (speaker1) { + setSelectedAudioDevice(speaker1.device_id || ''); + } + if (speaker2) { + // 根据 is_active 决定是否默认勾选系统音频捕获 + const isActive = speaker2.is_active === 1 || speaker2.is_active === true || speaker2.is_active === '1'; + setCaptureSystemAudio(isActive); + setSelectedSystemAudioDevice(speaker2.device_id || ''); + } + } catch (error) { + console.error('加载音频源配置失败:', error); + } + }, []); + + /** + * 保存音频源配置 + * @param {string} sourceName - 音频源名称 + * @param {string} deviceId - 设备ID + * @param {string} deviceName - 设备名称 + * @param {boolean} isActive - 是否激活 + */ + const saveAudioSource = useCallback(async (sourceName, deviceId, deviceName, isActive = true) => { + try { + const api = window.electronAPI; + if (!api?.asrCreateAudioSource || !api?.asrUpdateAudioSource) { + console.warn('ASR API 不可用'); + return; + } + + // 确定音频源的固定ID(关键:必须使用固定的ID才能与外键约束匹配) + const sourceId = sourceName === '用户(麦克风)' ? 'speaker1' : 'speaker2'; + + console.log('保存音频源配置:', { sourceId, sourceName, deviceId, deviceName, isActive }); + + // 重新获取最新的音频源列表,避免使用过期的 audioSources + const currentSources = await api.asrGetAudioSources(); + + // 使用固定的ID查找是否已存在该音频源(而不是名称匹配) + const existingSource = currentSources.find(s => s.id === sourceId); + + const updateData = { + name: sourceName, + device_id: deviceId, + device_name: deviceName, + is_active: isActive ? 1 : 0 + }; + + if (existingSource) { + // 更新现有配置 + console.log('更新现有音频源:', existingSource.id, updateData); + const result = await api.asrUpdateAudioSource(existingSource.id, updateData); + console.log('更新结果:', result); + } else { + // 创建新配置(必须指定固定的ID) + const createData = { + id: sourceId, // 关键:使用固定的ID + ...updateData + }; + console.log('创建新音频源:', createData); + const result = await api.asrCreateAudioSource(createData); + console.log('创建结果:', result); + } + + // 重新加载音频源配置 + await loadAudioSources(); + + // 验证保存结果(使用ID查找) + const updatedSources = await api.asrGetAudioSources(); + const savedSource = updatedSources.find(s => s.id === sourceId); + console.log('保存后的音频源:', savedSource); + + if (savedSource) { + console.log(`✓ 音频源配置已保存: ${sourceName} (ID: ${sourceId}), is_active=${savedSource.is_active}`); + } else { + console.warn(`⚠ 音频源配置保存后未找到: ${sourceName} (ID: ${sourceId})`); + } + } catch (error) { + console.error('保存音频源配置失败:', error); + alert('保存音频源配置失败:' + (error.message || '未知错误')); + } + }, [loadAudioSources]); + + /** + * 处理音频设备选择变化 + */ + const handleAudioDeviceChange = useCallback(async (deviceId) => { + setSelectedAudioDevice(deviceId); + const device = audioDevices.find(d => d.deviceId === deviceId); + if (device) { + await saveAudioSource('用户(麦克风)', deviceId, device.label || device.deviceId, true); + } + }, [audioDevices, saveAudioSource]); + + /** + * 处理系统音频捕获开关变化 + */ + const handleSystemAudioToggle = useCallback(async (checked) => { + setCaptureSystemAudio(checked); + + // 统一通过 saveAudioSource 确保存在 speaker2 配置: + // - 如果之前没有 speaker2Source,会自动创建 + // - 如果已有,则仅更新 is_active + try { + const deviceId = + (speaker2Source && speaker2Source.device_id) || + selectedSystemAudioDevice || + 'system-loopback'; + const deviceName = + (speaker2Source && speaker2Source.device_name) || + '系统音频(屏幕捕获)'; + + await saveAudioSource( + '角色(系统音频)', + deviceId, + deviceName, + checked + ); + } catch (err) { + console.error('更新系统音频源配置失败:', err); + } + }, [speaker2Source, selectedSystemAudioDevice, saveAudioSource]); + + /** + * 初始化音频设备 + */ + const initializeAudioDevices = useCallback(async () => { + // 先加载音频源配置,再加载设备列表(因为设备列表需要用到音频源配置) + await loadAudioSources(); + await loadAudioDevices(); + }, [loadAudioSources, loadAudioDevices]); + + /** + * 当音频源配置加载完成后,更新设备选择并自动保存 + */ + const handleAudioSourcesLoaded = useCallback(async () => { + // 如果speaker1Source存在但device_id为null,自动选择第一个可用设备并保存 + if (speaker1Source && !speaker1Source.device_id && audioDevices.length > 0 && !autoSavedRef.current) { + const firstDevice = audioDevices[0]; + console.log('自动选择并保存第一个麦克风设备:', firstDevice.deviceId); + setSelectedAudioDevice(firstDevice.deviceId); + // 标记为已保存,避免重复 + autoSavedRef.current = true; + // 直接保存到数据库 + await saveAudioSource('用户(麦克风)', firstDevice.deviceId, firstDevice.label || firstDevice.deviceId, true); + } else if (speaker1Source?.device_id && audioDevices.length > 0) { + const device = audioDevices.find(d => d.deviceId === speaker1Source.device_id); + if (device && selectedAudioDevice !== device.deviceId) { + setSelectedAudioDevice(device.deviceId); + } + } + if (speaker2Source?.device_id && audioDevices.length > 0 && captureSystemAudio) { + const device = audioDevices.find(d => d.deviceId === speaker2Source.device_id); + if (device && selectedSystemAudioDevice !== device.deviceId) { + setSelectedSystemAudioDevice(device.deviceId); + } + } + }, [speaker1Source, speaker2Source, audioDevices, captureSystemAudio, selectedAudioDevice, selectedSystemAudioDevice, saveAudioSource]); + + return { + // 状态 + audioDevices, + selectedAudioDevice, + captureSystemAudio, + selectedSystemAudioDevice, + audioSources, + speaker1Source, + speaker2Source, + + // 方法 + loadAudioDevices, + loadAudioSources, + saveAudioSource, + handleAudioDeviceChange, + handleSystemAudioToggle, + initializeAudioDevices, + handleAudioSourcesLoaded + }; +}; \ No newline at end of file diff --git a/desktop/src/renderer/hooks/useChatSession.js b/desktop/src/renderer/hooks/useChatSession.js new file mode 100644 index 0000000..ee5f33c --- /dev/null +++ b/desktop/src/renderer/hooks/useChatSession.js @@ -0,0 +1,67 @@ +import { useState, useCallback } from 'react'; + +/** + * 聊天会话管理的自定义Hook + */ +export const useChatSession = () => { + const [sessionInfo, setSessionInfo] = useState(null); + const [error, setError] = useState(''); + + /** + * 处理会话选择 + * @param {Object} info - 会话信息 + */ + const handleSessionSelected = useCallback(async (info) => { + setSessionInfo(info); + + // 确保有对话 ID(如果是新对话,需要先创建) + let conversationId = info.conversationId; + if (!conversationId && info.characterId) { + const api = window.electronAPI; + if (api && api.dbCreateConversation) { + try { + const newConv = await api.dbCreateConversation({ + character_id: info.characterId, + title: info.conversationName || '新对话' + }); + conversationId = newConv?.id; + if (conversationId) { + setSessionInfo({ ...info, conversationId }); + } + } catch (err) { + console.error('创建新对话失败:', err); + setError('创建新对话失败'); + } + } + } + }, []); + + /** + * 关闭HUD + */ + const handleClose = useCallback(() => { + if (window.electronAPI?.closeHUD) { + window.electronAPI.closeHUD(); + } + }, []); + + /** + * 切换会话 + */ + const handleSwitchSession = useCallback(() => { + setSessionInfo(null); + setError(''); + }, []); + + return { + // 状态 + sessionInfo, + error, + + // 方法 + handleSessionSelected, + handleClose, + handleSwitchSession, + setError + }; +}; \ No newline at end of file diff --git a/desktop/src/renderer/hooks/useConversationReview.js b/desktop/src/renderer/hooks/useConversationReview.js new file mode 100644 index 0000000..a1e6b7d --- /dev/null +++ b/desktop/src/renderer/hooks/useConversationReview.js @@ -0,0 +1,83 @@ + +import { useState, useEffect, useCallback, useRef } from 'react'; + +export function useConversationReview(conversationId) { + const [review, setReview] = useState(null); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState(null); + const [progress, setProgress] = useState(null); + const activeRequestIdRef = useRef(null); + + const fetchReview = useCallback(async () => { + if (!conversationId) { + setReview(null); + return; + } + + try { + setIsLoading(true); + const result = await window.electronAPI.getConversationReview(conversationId); + if (result.success && result.data) { + setReview(result.data.review_data); + } else { + // 当没有复盘时清空旧数据,避免显示上一个对话的内容 + setReview(null); + } + } catch (err) { + console.error('Failed to fetch review:', err); + setReview(null); + setError(err.message); + } finally { + setIsLoading(false); + } + }, [conversationId]); + + const generate = useCallback(async () => { + if (!conversationId) return; + + setIsLoading(true); + setError(null); + setProgress({ stage: 'start', percent: 0, message: '开始生成复盘...' }); + try { + const requestId = `review-${Date.now()}-${Math.random().toString(36).slice(2, 6)}`; + activeRequestIdRef.current = requestId; + const result = await window.electronAPI.generateConversationReview(conversationId, { + force: true, + requestId + }); + if (result.success) { + setReview(result.data); + setProgress({ stage: 'done', percent: 1, message: '复盘完成' }); + } else { + setError(result.error); + } + } catch (err) { + setError(err.message); + } finally { + setIsLoading(false); + // 不清空 progress,让 UI 可以展示“完成/失败” + } + }, [conversationId]); + + useEffect(() => { + if (!window.electronAPI?.onReviewProgress) return undefined; + const off = window.electronAPI.onReviewProgress((data) => { + if (!data) return; + const active = activeRequestIdRef.current; + if (active && data.requestId && data.requestId !== active) return; + setProgress({ + stage: data.stage, + percent: data.percent, + message: data.message, + extra: data.extra + }); + }); + return () => off && off(); + }, []); + + useEffect(() => { + fetchReview(); + }, [fetchReview]); + + return { review, isLoading, error, progress, generate }; +} diff --git a/desktop/src/renderer/hooks/useLLMConfig.js b/desktop/src/renderer/hooks/useLLMConfig.js new file mode 100644 index 0000000..f239f32 --- /dev/null +++ b/desktop/src/renderer/hooks/useLLMConfig.js @@ -0,0 +1,359 @@ +import { useState, useCallback } from 'react'; +import { isNonEmptyString, isValidApiKey, isValidBaseUrl, isValidModelName, isValidTimeoutMs } from '../utils/validation.js'; + +/** + * LLM配置管理的自定义Hook + */ +export const useLLMConfig = () => { + const [llmConfigs, setLlmConfigs] = useState([]); + const [defaultConfig, setDefaultConfig] = useState(null); + const [loading, setLoading] = useState(true); + const [showAddConfig, setShowAddConfig] = useState(false); + const [editingConfigId, setEditingConfigId] = useState(null); // 正在编辑的配置ID + const [featureBindings, setFeatureBindings] = useState({}); + const [featureBindingLoading, setFeatureBindingLoading] = useState(false); + const [featureBindingError, setFeatureBindingError] = useState(''); + const [newConfig, setNewConfig] = useState({ + name: '', + apiKey: '', + baseUrl: '', + modelName: 'gpt-4o-mini', + timeoutMs: '', + isDefault: false + }); + const [testingConfig, setTestingConfig] = useState(false); + const [testConfigMessage, setTestConfigMessage] = useState(''); + const [testConfigError, setTestConfigError] = useState(''); + + /** + * 加载所有LLM配置 + */ + const loadConfigs = useCallback(async () => { + try { + setLoading(true); + if (window.electronAPI?.getAllLLMConfigs) { + const configs = await window.electronAPI.getAllLLMConfigs(); + setLlmConfigs(configs); + } + if (window.electronAPI?.getDefaultLLMConfig) { + const defaultCfg = await window.electronAPI.getDefaultLLMConfig(); + setDefaultConfig(defaultCfg); + } + } catch (error) { + console.error('Failed to load configs:', error); + } finally { + setLoading(false); + } + }, []); + + /** + * 加载功能绑定配置 + */ + const loadFeatureBindings = useCallback(async () => { + if (!window.electronAPI?.getLLMFeatureConfigs) { + return; + } + try { + setFeatureBindingLoading(true); + setFeatureBindingError(''); + const bindings = await window.electronAPI.getLLMFeatureConfigs(); + setFeatureBindings(bindings || {}); + } catch (error) { + console.error('Failed to load LLM feature configs:', error); + setFeatureBindingError(error?.message || '加载功能绑定失败'); + } finally { + setFeatureBindingLoading(false); + } + }, []); + + /** + * 添加新配置 + */ + const handleAddConfig = useCallback(async () => { + // 验证输入 + if (!isNonEmptyString(newConfig.name)) { + alert('请填写配置名称'); + return; + } + if (!isValidApiKey(newConfig.apiKey)) { + alert('请填写有效的API密钥'); + return; + } + if (!isValidModelName(newConfig.modelName)) { + alert('请填写模型名称'); + return; + } + if (!isValidBaseUrl(newConfig.baseUrl)) { + alert('请填写有效的Base URL'); + return; + } + if (!isValidTimeoutMs(newConfig.timeoutMs)) { + alert('请填写有效的超时时间(毫秒)'); + return; + } + + try { + if (window.electronAPI?.saveLLMConfig) { + const configData = { + name: newConfig.name, + api_key: newConfig.apiKey, + base_url: newConfig.baseUrl || null, + model_name: newConfig.modelName?.trim() || 'gpt-4o-mini', + timeout_ms: newConfig.timeoutMs !== '' ? Number(newConfig.timeoutMs) : null, + is_default: newConfig.isDefault + }; + + await window.electronAPI.saveLLMConfig(configData); + + // 重置表单 + setNewConfig({ + name: '', + apiKey: '', + baseUrl: '', + modelName: 'gpt-4o-mini', + timeoutMs: '', + isDefault: false + }); + setTestConfigMessage(''); + setTestConfigError(''); + setTestingConfig(false); + setShowAddConfig(false); + + // 重新加载配置列表 + await loadConfigs(); + } + } catch (error) { + console.error('添加配置失败:', error); + alert('添加配置失败,请重试'); + } + }, [newConfig, loadConfigs]); + + /** + * 测试LLM配置 + */ + const handleTestLLMConfig = useCallback(async () => { + if (!window.electronAPI?.testLLMConnection) { + setTestConfigError('LLM测试接口不可用'); + setTestConfigMessage(''); + return; + } + + if (!isValidApiKey(newConfig.apiKey)) { + setTestConfigError('请先填写有效的API密钥'); + setTestConfigMessage(''); + return; + } + if (!isValidTimeoutMs(newConfig.timeoutMs)) { + setTestConfigError('请填写有效的超时时间(毫秒)'); + setTestConfigMessage(''); + return; + } + + setTestingConfig(true); + setTestConfigError(''); + setTestConfigMessage(''); + + try { + const result = await window.electronAPI.testLLMConnection({ + api_key: newConfig.apiKey, + base_url: newConfig.baseUrl || null, + model_name: newConfig.modelName?.trim() || 'gpt-4o-mini', + timeout_ms: newConfig.timeoutMs !== '' ? Number(newConfig.timeoutMs) : null + }); + + if (result?.success) { + const statusText = result?.status ? `(HTTP ${result.status})` : ''; + setTestConfigMessage(`${result.message || '验证成功'}${statusText}`); + setTestConfigError(''); + } else { + const statusText = result?.status ? `(HTTP ${result.status})` : ''; + setTestConfigError(`${result?.message || '验证失败,请检查 API Key 和 Base URL'}${statusText}`); + setTestConfigMessage(''); + } + } catch (error) { + console.error('测试 LLM 配置失败:', error); + setTestConfigError(error?.message || '测试失败,请稍后重试'); + setTestConfigMessage(''); + } finally { + setTestingConfig(false); + } + }, [newConfig]); + + /** + * 设置默认配置 + */ + const handleSetDefault = useCallback(async (configId) => { + if (window.electronAPI?.setDefaultLLMConfig) { + await window.electronAPI.setDefaultLLMConfig(configId); + await loadConfigs(); + } + }, [loadConfigs]); + + /** + * 删除配置 + */ + const handleDeleteConfig = useCallback(async (configId) => { + if (window.electronAPI?.deleteLLMConfig) { + if (confirm('确定要删除这个配置吗?')) { + await window.electronAPI.deleteLLMConfig(configId); + await loadConfigs(); + } + } + }, [loadConfigs]); + + /** + * 取消添加配置 + */ + const handleCancelAdd = useCallback(() => { + setShowAddConfig(false); + setEditingConfigId(null); + setNewConfig({ + name: '', + apiKey: '', + baseUrl: '', + modelName: 'gpt-4o-mini', + timeoutMs: '', + isDefault: false + }); + setTestConfigMessage(''); + setTestConfigError(''); + setTestingConfig(false); + }, []); + + /** + * 开始编辑配置 + */ + const handleEditConfig = useCallback((config) => { + setEditingConfigId(config.id); + setNewConfig({ + name: config.name || '', + apiKey: config.api_key || '', + baseUrl: config.base_url || '', + modelName: config.model_name || 'gpt-4o-mini', + timeoutMs: config.timeout_ms ?? '', + isDefault: config.is_default === 1 + }); + setShowAddConfig(true); + setTestConfigMessage(''); + setTestConfigError(''); + }, []); + + /** + * 保存配置(新增或更新) + */ + const handleSaveConfig = useCallback(async () => { + // 验证输入 + if (!isNonEmptyString(newConfig.name)) { + alert('请填写配置名称'); + return; + } + if (!isValidApiKey(newConfig.apiKey)) { + alert('请填写有效的API密钥'); + return; + } + if (!isValidModelName(newConfig.modelName)) { + alert('请填写模型名称'); + return; + } + if (!isValidBaseUrl(newConfig.baseUrl)) { + alert('请填写有效的Base URL'); + return; + } + if (!isValidTimeoutMs(newConfig.timeoutMs)) { + alert('请填写有效的超时时间(毫秒)'); + return; + } + + try { + if (window.electronAPI?.saveLLMConfig) { + const configData = { + id: editingConfigId || undefined, // 如果有 id 则为更新 + name: newConfig.name, + api_key: newConfig.apiKey, + base_url: newConfig.baseUrl || null, + model_name: newConfig.modelName?.trim() || 'gpt-4o-mini', + timeout_ms: newConfig.timeoutMs !== '' ? Number(newConfig.timeoutMs) : null, + is_default: newConfig.isDefault + }; + + await window.electronAPI.saveLLMConfig(configData); + + // 重置表单 + setNewConfig({ + name: '', + apiKey: '', + baseUrl: '', + modelName: 'gpt-4o-mini', + timeoutMs: '', + isDefault: false + }); + setTestConfigMessage(''); + setTestConfigError(''); + setTestingConfig(false); + setShowAddConfig(false); + setEditingConfigId(null); + + // 重新加载配置列表 + await loadConfigs(); + } + } catch (error) { + console.error('保存配置失败:', error); + alert('保存配置失败,请重试'); + } + }, [newConfig, editingConfigId, loadConfigs]); + + /** + * 绑定/更新功能对应的 LLM 配置 + */ + const handleSetFeatureConfig = useCallback( + async (feature, llmConfigId) => { + if (!window.electronAPI?.setLLMFeatureConfig) { + return; + } + try { + setFeatureBindingLoading(true); + setFeatureBindingError(''); + await window.electronAPI.setLLMFeatureConfig(feature, llmConfigId || null); + await loadFeatureBindings(); + } catch (error) { + console.error('设置功能绑定失败:', error); + setFeatureBindingError(error?.message || '设置失败,请稍后重试'); + } finally { + setFeatureBindingLoading(false); + } + }, + [loadFeatureBindings] + ); + + return { + // 状态 + llmConfigs, + defaultConfig, + loading, + showAddConfig, + editingConfigId, + featureBindings, + featureBindingLoading, + featureBindingError, + newConfig, + testingConfig, + testConfigMessage, + testConfigError, + + // 状态设置函数 + setShowAddConfig, + setNewConfig, + + // 方法 + loadConfigs, + loadFeatureBindings, + handleAddConfig, + handleSaveConfig, + handleEditConfig, + handleTestLLMConfig, + handleSetDefault, + handleDeleteConfig, + handleCancelAdd, + handleSetFeatureConfig + }; +}; diff --git a/desktop/src/renderer/hooks/useMessages.js b/desktop/src/renderer/hooks/useMessages.js new file mode 100644 index 0000000..6b4b45e --- /dev/null +++ b/desktop/src/renderer/hooks/useMessages.js @@ -0,0 +1,226 @@ +import { useState, useCallback, useRef, useEffect, useMemo } from 'react'; + +/** + * 消息管理的自定义Hook + */ +export const useMessages = (conversationId) => { + const [messages, setMessages] = useState([]); + const [loading, setLoading] = useState(true); + const [error, setError] = useState(''); + const transcriptRef = useRef(null); + const [streamingMessages, setStreamingMessages] = useState({}); + + /** + * 加载消息 + */ + const loadMessages = useCallback(async () => { + if (!conversationId) { + setLoading(false); + return; + } + + setLoading(true); + setError(''); + try { + const api = window.electronAPI; + if (!api || !api.getMessagesByConversation) { + throw new Error('数据库API不可用'); + } + + const fetchedMessages = await api.getMessagesByConversation(conversationId); + setMessages(fetchedMessages || []); + } catch (err) { + console.error('加载对话失败:', err); + setError(err instanceof Error ? err.message : '加载失败'); + setMessages([]); + } finally { + setLoading(false); + } + }, [conversationId]); + + /** + * 添加消息 + */ + const addMessage = useCallback((message) => { + setMessages(prev => [...prev, message]); + }, []); + + /** + * 更新消息 + */ + const updateMessage = useCallback((messageId, updates) => { + setMessages(prev => prev.map(msg => + msg.id === messageId ? { ...msg, ...updates } : msg + )); + }, []); + + /** + * 滚动到底部 + */ + const scrollToBottom = useCallback(() => { + if (transcriptRef.current) { + transcriptRef.current.scrollTop = transcriptRef.current.scrollHeight; + } + }, []); + + // 当conversationId变化时,重新加载消息 + useEffect(() => { + if (conversationId) { + setStreamingMessages({}); + loadMessages(); + } else { + setMessages([]); + setStreamingMessages({}); + setLoading(false); + } + }, [conversationId, loadMessages]); + + // 当消息变化时,自动滚动到底部 + useEffect(() => { + scrollToBottom(); + }, [messages, streamingMessages, scrollToBottom]); + + // 设置消息监听器 + useEffect(() => { + const api = window.electronAPI; + if (!api?.on || !conversationId) return; + + // 监听完整句子识别结果(新消息) + const handleSentenceComplete = (message) => { + try { + if (!message) return; + + // 兼容旧格式(result.text) + if (!message.id && message.text) { + const normalized = (message.text || '').trim(); + if (!normalized) return; + const sender = message.sourceId === 'speaker1' ? 'user' : 'character'; + addMessage({ + id: `${Date.now()}`, + conversation_id: conversationId, + sender, + content: normalized, + timestamp: Date.now() + }); + return; + } + + // 默认:ASRManager 已经写入数据库并返回 message 记录 + addMessage(message); + + // 同步清理对应 source 的流式气泡 + const sourceKey = message.source_id || message.sourceId; + if (sourceKey) { + setStreamingMessages((prev) => { + if (!prev[sourceKey]) return prev; + const next = { ...prev }; + delete next[sourceKey]; + return next; + }); + } + } catch (error) { + console.error('Error handling ASR result:', error); + setError(`处理识别结果失败:${error.message}`); + } + }; + + // 监听消息更新事件(更新现有消息内容) + const handleSentenceUpdate = (updatedMessage) => { + try { + if (!updatedMessage || !updatedMessage.id) return; + updateMessage(updatedMessage.id, { content: updatedMessage.content }); + } catch (error) { + console.error('Error handling ASR update:', error); + } + }; + + // 监听 ASR 错误 + const handleError = (error) => { + console.error('ASR error:', error); + setError(`语音识别错误:${error.error || error.message || '未知错误'}`); + }; + + api.on('asr-sentence-complete', handleSentenceComplete); + api.on('asr-sentence-update', handleSentenceUpdate); + api.on('asr-error', handleError); + + return () => { + api.removeListener('asr-sentence-complete', handleSentenceComplete); + api.removeListener('asr-sentence-update', handleSentenceUpdate); + api.removeListener('asr-error', handleError); + }; + }, [conversationId, addMessage, updateMessage]); + + // 监听流式 partial 更新,让同一条消息持续增长 + useEffect(() => { + const api = window.electronAPI; + if (!api?.on || !conversationId) return; + + const handlePartialUpdate = (payload = {}) => { + // 仅处理当前会话 + if (payload.conversationId && payload.conversationId !== conversationId) return; + + const content = (payload.content || payload.text || payload.partialText || payload.fullText || '').trim(); + if (!content) return; + + const sourceId = payload.sourceId || payload.sessionId; + if (!sourceId) return; + + const sender = sourceId === 'speaker1' ? 'user' : 'character'; + const timestamp = payload.timestamp || Date.now(); + + setStreamingMessages((prev) => ({ + ...prev, + [sourceId]: { + id: `stream-${sourceId}`, + sender, + content, + timestamp + } + })); + }; + + const handlePartialClear = (payload = {}) => { + if (payload.conversationId && payload.conversationId !== conversationId) return; + const key = payload.sourceId || payload.sessionId; + if (!key) return; + setStreamingMessages((prev) => { + if (!prev[key]) return prev; + const next = { ...prev }; + delete next[key]; + return next; + }); + }; + + api.on('asr-partial-update', handlePartialUpdate); + api.on('asr-partial-clear', handlePartialClear); + + return () => { + api.removeListener('asr-partial-update', handlePartialUpdate); + api.removeListener('asr-partial-clear', handlePartialClear); + }; + }, [conversationId]); + + const messagesWithStreaming = useMemo(() => { + const streamingList = Object.values(streamingMessages); + if (!streamingList.length) return messages; + return [...messages, ...streamingList].sort((a, b) => (a.timestamp || 0) - (b.timestamp || 0)); + }, [messages, streamingMessages]); + + return { + // 状态 + messages: messagesWithStreaming, + baseMessages: messages, + loading, + error, + transcriptRef, + streamingMessages, + + // 方法 + loadMessages, + addMessage, + updateMessage, + scrollToBottom, + setError + }; +}; \ No newline at end of file diff --git a/desktop/src/renderer/hooks/useSuggestionConfig.js b/desktop/src/renderer/hooks/useSuggestionConfig.js new file mode 100644 index 0000000..e216091 --- /dev/null +++ b/desktop/src/renderer/hooks/useSuggestionConfig.js @@ -0,0 +1,151 @@ +import { useState, useCallback } from 'react'; +import { coerceNumberValue } from '../utils/validation.js'; + +const DEFAULT_SUGGESTION_FORM = { + enable_passive_suggestion: true, + suggestion_count: 3, + silence_threshold_seconds: 3, + message_threshold_count: 3, + cooldown_seconds: 15, + context_message_limit: 10 +}; + +/** + * 建议配置管理的自定义Hook + */ +export const useSuggestionConfig = () => { + const [suggestionConfig, setSuggestionConfig] = useState(null); + const [suggestionForm, setSuggestionForm] = useState(DEFAULT_SUGGESTION_FORM); + const [suggestionLoading, setSuggestionLoading] = useState(true); + const [suggestionSaving, setSuggestionSaving] = useState(false); + const [suggestionMessage, setSuggestionMessage] = useState(''); + const [suggestionError, setSuggestionError] = useState(''); + + /** + * 标准化建议配置表单数据 + * @param {Object} config - 配置对象 + * @returns {Object} 标准化后的配置 + */ + const normalizeSuggestionForm = useCallback((config) => { + const merged = { + ...DEFAULT_SUGGESTION_FORM, + ...(config || {}) + }; + return { + enable_passive_suggestion: Boolean(merged.enable_passive_suggestion), + suggestion_count: coerceNumberValue(merged.suggestion_count, DEFAULT_SUGGESTION_FORM.suggestion_count), + silence_threshold_seconds: coerceNumberValue(merged.silence_threshold_seconds, DEFAULT_SUGGESTION_FORM.silence_threshold_seconds), + message_threshold_count: coerceNumberValue(merged.message_threshold_count, DEFAULT_SUGGESTION_FORM.message_threshold_count), + cooldown_seconds: coerceNumberValue(merged.cooldown_seconds, DEFAULT_SUGGESTION_FORM.cooldown_seconds), + context_message_limit: coerceNumberValue(merged.context_message_limit, DEFAULT_SUGGESTION_FORM.context_message_limit) + }; + }, []); + + /** + * 更新建议配置字段 + * @param {string} field - 字段名 + * @param {*} value - 字段值 + */ + const updateSuggestionField = useCallback((field, value) => { + setSuggestionForm(prev => ({ + ...prev, + [field]: value + })); + }, []); + + /** + * 处理数字字段变化 + * @param {string} field - 字段名 + * @returns {Function} 事件处理器 + */ + const handleSuggestionNumberChange = (field) => (event) => { + const value = event.target.value; + updateSuggestionField(field, value === '' ? '' : Number(value)); + }; + + /** + * 加载建议设置 + */ + const loadSuggestionSettings = useCallback(async () => { + if (!window.electronAPI?.getSuggestionConfig) { + setSuggestionLoading(false); + return; + } + setSuggestionLoading(true); + setSuggestionError(''); + try { + const config = await window.electronAPI.getSuggestionConfig(); + setSuggestionConfig(config); + setSuggestionForm(normalizeSuggestionForm(config)); + } catch (error) { + console.error('加载建议配置失败:', error); + setSuggestionError(error?.message || '加载失败,请稍后重试'); + } finally { + setSuggestionLoading(false); + } + }, [normalizeSuggestionForm]); + + /** + * 保存建议配置 + */ + const handleSaveSuggestionConfig = useCallback(async () => { + if (!window.electronAPI?.updateSuggestionConfig) { + return; + } + setSuggestionSaving(true); + setSuggestionMessage(''); + setSuggestionError(''); + try { + const payload = { + enable_passive_suggestion: suggestionForm.enable_passive_suggestion ? 1 : 0, + suggestion_count: coerceNumberValue(suggestionForm.suggestion_count, DEFAULT_SUGGESTION_FORM.suggestion_count), + silence_threshold_seconds: coerceNumberValue(suggestionForm.silence_threshold_seconds, DEFAULT_SUGGESTION_FORM.silence_threshold_seconds), + message_threshold_count: coerceNumberValue(suggestionForm.message_threshold_count, DEFAULT_SUGGESTION_FORM.message_threshold_count), + cooldown_seconds: coerceNumberValue(suggestionForm.cooldown_seconds, DEFAULT_SUGGESTION_FORM.cooldown_seconds), + context_message_limit: coerceNumberValue(suggestionForm.context_message_limit, DEFAULT_SUGGESTION_FORM.context_message_limit), + // 场景判定模型配置项取消,统一使用数据库默认 LLM;后台按需保持开启 + topic_detection_enabled: 1, + situation_llm_enabled: 1 + }; + await window.electronAPI.updateSuggestionConfig(payload); + // 通知 HUD 侧刷新建议配置,避免需切换会话才生效 + window.electronAPI?.send?.('suggestion-config-updated'); + setSuggestionMessage('已保存'); + await loadSuggestionSettings(); + setTimeout(() => setSuggestionMessage(''), 3000); + } catch (error) { + console.error('保存建议配置失败:', error); + setSuggestionError(error?.message || '保存失败,请稍后重试'); + } finally { + setSuggestionSaving(false); + } + }, [loadSuggestionSettings, suggestionForm]); + + /** + * 清除消息 + */ + const clearSuggestionMessage = useCallback(() => { + setSuggestionMessage(''); + setSuggestionError(''); + }, []); + + return { + // 状态 + suggestionConfig, + suggestionForm, + suggestionLoading, + suggestionSaving, + suggestionMessage, + suggestionError, + + // 常量 + DEFAULT_SUGGESTION_FORM, + + // 方法 + updateSuggestionField, + handleSuggestionNumberChange, + loadSuggestionSettings, + handleSaveSuggestionConfig, + clearSuggestionMessage + }; +}; diff --git a/desktop/src/renderer/hooks/useSuggestions.js b/desktop/src/renderer/hooks/useSuggestions.js new file mode 100644 index 0000000..8e3d0a7 --- /dev/null +++ b/desktop/src/renderer/hooks/useSuggestions.js @@ -0,0 +1,714 @@ +import { useState, useCallback, useRef, useEffect } from 'react'; + +const DEFAULT_SUGGESTION_CONFIG = { + enable_passive_suggestion: 1, + suggestion_count: 3, + silence_threshold_seconds: 3, + message_threshold_count: 3, + cooldown_seconds: 15, + context_message_limit: 10, + topic_detection_enabled: 0, + situation_llm_enabled: 0, + situation_model_name: 'gpt-4o-mini' +}; + +const PASSIVE_REASON_LABEL = { + silence: '静默提醒', + message_count: '多条消息', + topic_change: '话题转折', + manual: '手动触发' +}; + +// 关键词启发式已停用,改为完全由 LLM 判定 +const TOPIC_HEURISTIC_REGEX = /.*/; + +/** + * 建议生成和管理的自定义Hook + */ +export const useSuggestions = (sessionInfo) => { + const [suggestions, setSuggestions] = useState([]); + const [suggestionMeta, setSuggestionMeta] = useState(null); + const [suggestionStatus, setSuggestionStatus] = useState('idle'); + const [suggestionError, setSuggestionError] = useState(''); + const [suggestionConfig, setSuggestionConfig] = useState(DEFAULT_SUGGESTION_CONFIG); + const [characterPendingCount, setCharacterPendingCount] = useState(0); + const [lastCharacterMessageTs, setLastCharacterMessageTs] = useState(null); + const [lastUserMessageTs, setLastUserMessageTs] = useState(null); + const [copiedSuggestionId, setCopiedSuggestionId] = useState(null); + const suggestionCooldownRef = useRef(0); + const topicDetectionStateRef = useRef({ running: false, lastMessageId: null }); + const activeStreamRef = useRef({ id: null, trigger: null, reason: null }); + + /** + * 加载建议配置 + */ + const loadSuggestionConfig = useCallback(async () => { + try { + const api = window.electronAPI; + if (!api?.getSuggestionConfig) return; + const config = await api.getSuggestionConfig(); + if (config) { + const normalized = { + ...DEFAULT_SUGGESTION_CONFIG, + ...config, + // 将数据库返回的 0/1 或字符串 '0'/'1' 统一转成布尔,避免 '0' 被当作真值 + enable_passive_suggestion: + config.enable_passive_suggestion === 1 || + config.enable_passive_suggestion === true || + config.enable_passive_suggestion === '1', + suggestion_count: Number(config.suggestion_count ?? DEFAULT_SUGGESTION_CONFIG.suggestion_count), + silence_threshold_seconds: Number(config.silence_threshold_seconds ?? DEFAULT_SUGGESTION_CONFIG.silence_threshold_seconds), + message_threshold_count: Number(config.message_threshold_count ?? DEFAULT_SUGGESTION_CONFIG.message_threshold_count), + cooldown_seconds: Number(config.cooldown_seconds ?? DEFAULT_SUGGESTION_CONFIG.cooldown_seconds), + context_message_limit: Number(config.context_message_limit ?? DEFAULT_SUGGESTION_CONFIG.context_message_limit), + topic_detection_enabled: + config.topic_detection_enabled === 1 || + config.topic_detection_enabled === true || + config.topic_detection_enabled === '1' + }; + console.log('[useSuggestions] Loaded suggestion config (normalized):', JSON.stringify(normalized)); + setSuggestionConfig(normalized); + } + } catch (err) { + console.error('加载建议配置失败:', err); + } + }, []); + + const updateSuggestionConfig = useCallback(async (updates = {}) => { + try { + if (!window.electronAPI?.updateSuggestionConfig) { + console.warn('[useSuggestions] updateSuggestionConfig API not available'); + return null; + } + await window.electronAPI.updateSuggestionConfig(updates); + // 通知同进程/其他窗口刷新配置(HUD 会监听此事件) + window.electronAPI?.send?.('suggestion-config-updated'); + // 本地也立即刷新一次,避免等待事件丢失 + await loadSuggestionConfig(); + return true; + } catch (err) { + console.error('[useSuggestions] Failed to update suggestion config:', err); + return null; + } + }, [loadSuggestionConfig]); + + /** + * 检查是否可以触发被动建议 + */ + const canTriggerPassive = useCallback(() => { + if (!suggestionConfig?.enable_passive_suggestion) { + console.log('[useSuggestions] Passive suggestion disabled by config'); + return false; + } + const cooldownMs = (suggestionConfig?.cooldown_seconds || 15) * 1000; + const elapsed = Date.now() - (suggestionCooldownRef.current || 0); + return elapsed >= cooldownMs; + }, [suggestionConfig]); + + const resetStreamState = useCallback((reason = 'unknown') => { + console.log(`[useSuggestions] resetStreamState called (reason: ${reason}). Clearing active stream:`, activeStreamRef.current); + activeStreamRef.current = { id: null, trigger: null, reason: null }; + }, []); + + const logStreamCharacters = useCallback(() => { + // Disabled: per-character logging is extremely noisy and can stall the renderer. + }, []); + + const startSuggestionStream = useCallback( + ({ trigger, reason }) => { + console.log('[useSuggestions] startSuggestionStream called with:', { trigger, reason }); + if (!window.electronAPI?.startSuggestionStream) { + console.warn('[useSuggestions] startSuggestionStream API not available'); + return false; + } + const previousSuggestions = Array.isArray(suggestions) && suggestions.length + ? suggestions.slice(0, suggestionConfig?.suggestion_count || 5).map((item) => ({ + title: item.title || '', + content: item.content || '', + tags: item.tags || [], + decisionPointId: item.decision_point_id || item.decisionPointId || null + })) + : []; + const decisionPointId = + reason === 'refresh' + ? (suggestions?.[0]?.decision_point_id || suggestions?.[0]?.decisionPointId || null) + : null; + const streamId = `suggestion-stream-${Date.now()}-${Math.random().toString(36).slice(2, 6)}`; + console.log(`[useSuggestions] Generated streamId: ${streamId}`); + activeStreamRef.current = { id: streamId, trigger, reason }; + + console.log('[useSuggestions] Resetting state for new stream'); + setSuggestions([]); + setSuggestionMeta(null); + setSuggestionError(''); + setSuggestionStatus('streaming'); + + const payload = { + streamId, + conversationId: sessionInfo.conversationId, + characterId: sessionInfo.characterId, + decisionPointId, + trigger, + reason, + optionCount: suggestionConfig?.suggestion_count, + messageLimit: suggestionConfig?.context_message_limit, + previousSuggestions: previousSuggestions.length ? previousSuggestions : undefined + }; + console.log('[useSuggestions] Sending startSuggestionStream payload:', payload); + + window.electronAPI.startSuggestionStream(payload); + console.log('[useSuggestions] startSuggestionStream API called successfully'); + return true; + }, + [sessionInfo, suggestionConfig, suggestions] + ); + + /** + * 生成建议 + */ + const handleGenerateSuggestions = useCallback( + async ({ trigger = 'manual', reason = 'manual' } = {}) => { + // 被动关闭时,直接拒绝触发 + if (trigger === 'passive' && suggestionConfig?.enable_passive_suggestion !== true) { + console.log('[useSuggestions] Passive suggestion blocked by config'); + return; + } + + if (!sessionInfo?.conversationId || !sessionInfo?.characterId) { + setSuggestionError('请先选择有效的会话'); + return; + } + + if (suggestionStatus === 'loading' || suggestionStatus === 'streaming') { + return; + } + + if (window.electronAPI?.startSuggestionStream) { + startSuggestionStream({ trigger, reason }); + return; + } + + if (!window.electronAPI?.generateLLMSuggestions) { + setSuggestionError('LLM接口不可用'); + return; + } + + const previousSuggestions = Array.isArray(suggestions) && suggestions.length + ? suggestions.slice(0, suggestionConfig?.suggestion_count || 5).map((item) => ({ + title: item.title || '', + content: item.content || '', + tags: item.tags || [], + decisionPointId: item.decision_point_id || item.decisionPointId || null + })) + : []; + const decisionPointId = + reason === 'refresh' + ? (suggestions?.[0]?.decision_point_id || suggestions?.[0]?.decisionPointId || null) + : null; + + setSuggestionStatus('loading'); + setSuggestionError(''); + try { + const result = await window.electronAPI.generateLLMSuggestions({ + conversationId: sessionInfo.conversationId, + characterId: sessionInfo.characterId, + decisionPointId, + trigger, + reason, + optionCount: suggestionConfig?.suggestion_count, + messageLimit: suggestionConfig?.context_message_limit, + previousSuggestions: previousSuggestions.length ? previousSuggestions : undefined + }); + setSuggestions(result?.suggestions || []); + setSuggestionMeta({ + ...(result?.metadata || {}), + trigger, + reason, + triggeredAt: Date.now() + }); + suggestionCooldownRef.current = Date.now(); + } catch (err) { + console.error('生成建议失败:', err); + setSuggestionError(err?.message || '生成失败,请稍后重试'); + } finally { + setSuggestionStatus('idle'); + } + }, + [sessionInfo, suggestionConfig, suggestionStatus, startSuggestionStream, suggestions] + ); + + /** + * 触发被动建议 + */ + const triggerPassiveSuggestion = useCallback((reason) => { + if (!canTriggerPassive()) return; + if (suggestionStatus === 'streaming') return; + handleGenerateSuggestions({ trigger: 'passive', reason }); + }, [canTriggerPassive, handleGenerateSuggestions, suggestionStatus]); + + /** + * 复制建议 + */ + const handleCopySuggestion = useCallback(async (id, content) => { + try { + await navigator.clipboard.writeText(content); + setCopiedSuggestionId(id); + setTimeout(() => { + setCopiedSuggestionId((prev) => (prev === id ? null : prev)); + }, 1500); + } catch (err) { + console.error('复制建议失败:', err); + } + }, []); + + /** + * 显式确认“采用了哪个建议”(写入 DB,并在当前 UI 内高亮) + */ + const handleSelectSuggestion = useCallback(async (suggestion, selected = true) => { + try { + if (!suggestion?.id) return false; + if (!window.electronAPI?.selectActionSuggestion) { + console.warn('[useSuggestions] selectActionSuggestion API not available'); + return false; + } + + const ok = await window.electronAPI.selectActionSuggestion({ + suggestionId: suggestion.id, + selected: Boolean(selected), + selectedAt: Date.now() + }); + if (!ok) return false; + + // UI 侧按 batch_id(优先)互斥,保持与 DB 一致 + const scopeBatchId = suggestion.batch_id || null; + const scopeDecisionPointId = suggestion.decision_point_id || null; + setSuggestions((prev) => prev.map((item) => { + const sameScope = scopeBatchId + ? item.batch_id === scopeBatchId + : scopeDecisionPointId + ? item.decision_point_id === scopeDecisionPointId + : item.id === suggestion.id; + if (!sameScope) return item; + return { + ...item, + is_selected: selected ? item.id === suggestion.id : false, + selected_at: selected ? Date.now() : null + }; + })); + return true; + } catch (err) { + console.error('[useSuggestions] Failed to select suggestion:', err); + return false; + } + }, []); + + /** + * 情景判定(冷场/连发统一交由 LLM 评估) + */ + const maybeRunSituationDetection = useCallback( + async (reasonHint, message, opts = {}) => { + const isTopicChangeDirect = reasonHint === 'topic_change'; + // 话题转折:按产品预期直接生成,无需再走判定模型 + if (isTopicChangeDirect) { + if (!sessionInfo?.conversationId || !sessionInfo?.characterId) return; + if (!suggestionConfig?.enable_passive_suggestion) return; + if (!canTriggerPassive()) return; + if (suggestionStatus === 'streaming') return; + triggerPassiveSuggestion('topic_change'); + return; + } + + const detectionEnabled = + suggestionConfig?.situation_llm_enabled ?? suggestionConfig?.topic_detection_enabled; + // 静音/连发:需开启情景判定才会走 LLM 判定 + if (!detectionEnabled) return; + if (!sessionInfo?.conversationId || !sessionInfo?.characterId) return; + if (!window.electronAPI?.detectTopicShift) return; + if (!suggestionConfig?.enable_passive_suggestion) return; + if (!canTriggerPassive()) return; + if (suggestionStatus === 'streaming') return; + + const now = Date.now(); + const silenceBaseTs = lastUserMessageTs ?? lastCharacterMessageTs; + const silenceSecondsRaw = silenceBaseTs != null ? (now - silenceBaseTs) / 1000 : null; + const silenceSeconds = + silenceSecondsRaw != null + ? Math.min( + Math.max(Number.isFinite(silenceSecondsRaw) ? silenceSecondsRaw : 0, 0), + 60 + ) + : null; + const burstCountRaw = + opts.burstCountOverride !== undefined ? opts.burstCountOverride : characterPendingCount; + const burstCount = Math.min(Math.max(burstCountRaw || 0, 0), 8); + + const silenceReached = + silenceSeconds != null && + silenceSeconds >= (suggestionConfig?.silence_threshold_seconds || 3); + const burstReached = burstCount >= (suggestionConfig?.message_threshold_count || 3); + + if (!silenceReached && !burstReached) { + return; + } + + const currentState = topicDetectionStateRef.current; + if (currentState.running && currentState.lastMessageId === message?.id) { + return; + } + + topicDetectionStateRef.current = { running: true, lastMessageId: message?.id || null }; + try { + const result = await window.electronAPI.detectTopicShift({ + conversationId: sessionInfo.conversationId, + characterId: sessionInfo.characterId, + messageLimit: 8, + silence_seconds: silenceSeconds, + role_burst_count: burstCount, + trigger_hint: reasonHint + }); + if (result?.shouldSuggest) { + triggerPassiveSuggestion('topic_change'); + } + } catch (err) { + console.error('情景判定失败:', err); + } finally { + topicDetectionStateRef.current = { + ...topicDetectionStateRef.current, + running: false + }; + } + }, + [ + suggestionConfig, + sessionInfo, + canTriggerPassive, + suggestionStatus, + lastUserMessageTs, + characterPendingCount, + triggerPassiveSuggestion + ] + ); + + /** + * 处理新消息 + */ + const handleNewMessage = useCallback( + (message) => { + if (message.sender === 'character') { + setCharacterPendingCount((prev) => { + const next = prev + 1; + // 使用更新后的连发计数参与判定 + maybeRunSituationDetection('message_burst', message, { burstCountOverride: next }); + return next; + }); + setLastCharacterMessageTs(Date.now()); + } else if (message.sender === 'user') { + setCharacterPendingCount(0); + setLastUserMessageTs(Date.now()); + } + }, + [maybeRunSituationDetection] + ); + + /** + * 清除错误 + */ + const clearSuggestionError = useCallback(() => { + setSuggestionError(''); + }, []); + + const streamingHandlersRegisteredRef = useRef(false); + + // 将 partial/最终建议按索引更新到列表 + const upsertSuggestionAt = useCallback((incoming = {}, index, streaming = false) => { + setSuggestions((prev) => { + const next = [...prev]; + const targetIndex = Number.isInteger(index) ? index : next.length; + while (next.length <= targetIndex) { + next.push({ + id: `partial-${targetIndex}-${next.length}`, + title: '', + content: '', + tags: [], + streaming: true + }); + } + const base = next[targetIndex] || {}; + const text = + incoming.content || + incoming.title || + incoming.suggestion || + base.content || + base.title || + ''; + next[targetIndex] = { + ...base, + ...incoming, + id: incoming.id || base.id || `partial-${targetIndex}`, + title: incoming.title || text, + content: incoming.content || text, + tags: incoming.tags || base.tags || [], + streaming + }; + return next; + }); + }, []); + + useEffect(() => { + if (!window.electronAPI?.startSuggestionStream || streamingHandlersRegisteredRef.current) { + return undefined; + } + const unsubs = []; + streamingHandlersRegisteredRef.current = true; + + if (window.electronAPI.onSuggestionStreamStart) { + unsubs.push( + window.electronAPI.onSuggestionStreamStart((data = {}) => { + console.log('[useSuggestions] Received onSuggestionStreamStart:', data); + if (data.streamId !== activeStreamRef.current?.id) { + console.log(`[useSuggestions] Ignoring stream start - streamId mismatch: ${data.streamId} vs ${activeStreamRef.current?.id}`); + return; + } + console.log('[useSuggestions] Processing stream start event'); + setSuggestionMeta((prev) => ({ + ...(prev || {}), + trigger: activeStreamRef.current.trigger, + reason: activeStreamRef.current.reason, + expectedCount: data.expectedCount, + triggeredAt: Date.now(), + streaming: true + })); + }) + ); + } + + if (window.electronAPI.onSuggestionStreamHeader) { + unsubs.push( + window.electronAPI.onSuggestionStreamHeader((data = {}) => { + console.log('[useSuggestions] Received onSuggestionStreamHeader:', data); + if (data.streamId !== activeStreamRef.current?.id) { + console.log(`[useSuggestions] Ignoring header - streamId mismatch: ${data.streamId} vs ${activeStreamRef.current?.id}`); + return; + } + console.log('[useSuggestions] Processing stream header event'); + setSuggestionMeta((prev) => ({ + ...(prev || {}), + expectedCount: data.expectedCount + })); + }) + ); + } + + if (window.electronAPI.onSuggestionStreamChunk) { + unsubs.push( + window.electronAPI.onSuggestionStreamChunk((data = {}) => { + console.log('[useSuggestions] Received onSuggestionStreamChunk:', data); + if (data.streamId !== activeStreamRef.current?.id) { + if (!activeStreamRef.current?.id && data.streamId) { + console.warn( + `[useSuggestions] No active stream, attempting recovery with incoming streamId: ${data.streamId}` + ); + activeStreamRef.current = { + id: data.streamId, + trigger: data.trigger || 'unknown', + reason: data.reason || 'recovered_from_chunk' + }; + } else { + console.log( + `[useSuggestions] Ignoring chunk - streamId mismatch: ${data.streamId} vs ${activeStreamRef.current?.id} (active stream:`, + activeStreamRef.current, + ')' + ); + return; + } + } + const chunkText = + data?.chunk || + data?.delta || + data?.text || + data?.suggestion?.content || + ''; + logStreamCharacters('stream chunk', chunkText); + if (!data.suggestion) { + console.warn('[useSuggestions] Received chunk without suggestion data'); + return; + } + console.log('[useSuggestions] Processing suggestion chunk:', data.suggestion); + const targetIndex = Number.isInteger(data.index) ? data.index : undefined; + upsertSuggestionAt(data.suggestion, targetIndex, false); + console.log('[useSuggestions] Updated suggestions with final chunk'); + }) + ); + } + + if (window.electronAPI.onSuggestionStreamPartial) { + unsubs.push( + window.electronAPI.onSuggestionStreamPartial((data = {}) => { + console.log('[useSuggestions] Received onSuggestionStreamPartial:', data); + if (data.streamId !== activeStreamRef.current?.id) { + console.log( + `[useSuggestions] Ignoring partial - streamId mismatch: ${data.streamId} vs ${activeStreamRef.current?.id}` + ); + return; + } + const targetIndex = Number.isInteger(data.index) ? data.index : undefined; + const suggestionData = data.suggestion || {}; + upsertSuggestionAt(suggestionData, targetIndex, true); + console.log('[useSuggestions] Updated suggestions with partial chunk'); + }) + ); + } + + if (window.electronAPI.onSuggestionStreamError) { + unsubs.push( + window.electronAPI.onSuggestionStreamError((data = {}) => { + console.error('[useSuggestions] Received onSuggestionStreamError:', data); + if (data.streamId && data.streamId !== activeStreamRef.current?.id) { + console.log(`[useSuggestions] Ignoring error - streamId mismatch: ${data.streamId} vs ${activeStreamRef.current?.id}`); + return; + } + console.log('[useSuggestions] Processing stream error event'); + setSuggestionError(data.error || '生成失败,请稍后重试'); + setSuggestionStatus('idle'); + // 延迟重置,给同一批事件一个处理窗口 + setTimeout(() => resetStreamState('stream_error'), 0); + }) + ); + } + + if (window.electronAPI.onSuggestionStreamEnd) { + unsubs.push( + window.electronAPI.onSuggestionStreamEnd((data = {}) => { + console.log('[useSuggestions] Received onSuggestionStreamEnd:', data); + if (data.streamId !== activeStreamRef.current?.id) { + if (!activeStreamRef.current?.id && data.streamId) { + console.warn( + `[useSuggestions] No active stream on end, attempting recovery with incoming streamId: ${data.streamId}` + ); + activeStreamRef.current = { + id: data.streamId, + trigger: data.trigger || 'unknown', + reason: data.reason || 'recovered_from_end' + }; + } else { + console.log( + `[useSuggestions] Ignoring stream end - streamId mismatch: ${data.streamId} vs ${activeStreamRef.current?.id}` + ); + return; + } + } + console.log('[useSuggestions] Processing stream end event'); + if (data.success) { + console.log('[useSuggestions] Stream completed successfully'); + } else { + console.warn('[useSuggestions] Stream ended without success'); + } + setSuggestionStatus('idle'); + if (data.metadata) { + setSuggestionMeta({ + ...data.metadata, + triggeredAt: Date.now() + }); + } + suggestionCooldownRef.current = Date.now(); + // 延迟重置,避免和同一 tick 内的 chunk 竞争 + setTimeout(() => resetStreamState('stream_end'), 0); + }) + ); + } + + return () => { + unsubs.forEach((off) => off && off()); + streamingHandlersRegisteredRef.current = false; + }; + }, [resetStreamState]); + + // 当session变化时,重置状态 + useEffect(() => { + if (!sessionInfo?.conversationId) { + setSuggestions([]); + setSuggestionMeta(null); + setCharacterPendingCount(0); + setLastCharacterMessageTs(null); + setLastUserMessageTs(null); + suggestionCooldownRef.current = 0; + topicDetectionStateRef.current = { running: false, lastMessageId: null }; + resetStreamState('conversation_id_cleared'); + return; + } + + loadSuggestionConfig(); + setSuggestions([]); + setSuggestionMeta(null); + setCharacterPendingCount(0); + setLastCharacterMessageTs(null); + setLastUserMessageTs(null); + suggestionCooldownRef.current = 0; + topicDetectionStateRef.current = { running: false, lastMessageId: null }; + // 如果有活跃流,避免强制重置导致流事件丢弃 + if (activeStreamRef.current.id) { + console.log('[useSuggestions] Skipping reset due to active stream:', activeStreamRef.current); + return; + } + resetStreamState('conversation_changed'); + }, [sessionInfo?.conversationId, loadSuggestionConfig, resetStreamState]); + + // 静默触发检查 + useEffect(() => { + if (!suggestionConfig?.enable_passive_suggestion) return undefined; + if (!lastCharacterMessageTs && !lastUserMessageTs) return undefined; + const thresholdMs = (suggestionConfig?.silence_threshold_seconds || 3) * 1000; + const baseTs = lastUserMessageTs ?? lastCharacterMessageTs; + const elapsed = baseTs ? Date.now() - baseTs : 0; + const wait = Math.max(thresholdMs - elapsed, 0); + const timer = setTimeout(() => { + maybeRunSituationDetection('silence_timer', null); + }, wait); + return () => clearTimeout(timer); + }, [lastCharacterMessageTs, lastUserMessageTs, suggestionConfig, maybeRunSituationDetection]); + + // 监听配置更新广播,实时刷新建议配置 + useEffect(() => { + const handler = () => { + // 仅在有会话时刷新,避免无效调用 + if (sessionInfo?.conversationId) { + console.log('[useSuggestions] Received suggestion-config-updated event, reloading config'); + loadSuggestionConfig(); + // 重置被动触发计数,避免旧计数在禁用后继续触发 + setCharacterPendingCount(0); + setLastCharacterMessageTs(null); + setLastUserMessageTs(null); + } + }; + window.electronAPI?.on?.('suggestion-config-updated', handler); + return () => { + window.electronAPI?.removeListener?.('suggestion-config-updated', handler); + }; + }, [loadSuggestionConfig, sessionInfo?.conversationId]); + + return { + // 状态 + suggestions, + suggestionMeta, + suggestionStatus, + suggestionError, + suggestionConfig, + characterPendingCount, + lastCharacterMessageTs, + copiedSuggestionId, + + // 常量 + PASSIVE_REASON_LABEL, + + // 方法 + handleGenerateSuggestions, + updateSuggestionConfig, + triggerPassiveSuggestion, + triggerTopicChangeSuggestion: () => maybeRunSituationDetection('topic_change'), + handleCopySuggestion, + handleSelectSuggestion, + handleNewMessage, + clearSuggestionError, + loadSuggestionConfig + }; +}; diff --git a/desktop/src/renderer/hud.compact.css b/desktop/src/renderer/hud.compact.css new file mode 100644 index 0000000..f86c4d1 --- /dev/null +++ b/desktop/src/renderer/hud.compact.css @@ -0,0 +1,312 @@ +/* ========================= */ +/* 精简 HUD 模式样式 */ +/* ========================= */ +.compact-widget { + width: 100%; + height: 100%; + display: flex; + align-items: center; + justify-content: center; + background: transparent; +} + +.compact-widget-inner { + width: 340px; + background: rgba(28, 28, 30, 0.85); + backdrop-filter: blur(24px); + -webkit-backdrop-filter: blur(24px); + border: 1px solid rgba(255, 255, 255, 0.08); + border-radius: 16px; + box-shadow: 0 24px 48px rgba(0, 0, 0, 0.6); + display: flex; + flex-direction: column; + overflow: hidden; + color: #fff; +} + +.compact-header { + height: 56px; + background: rgba(255, 255, 255, 0.03); + border-bottom: 1px solid rgba(255, 255, 255, 0.06); + display: flex; + align-items: center; + justify-content: space-between; + padding: 0 16px; + -webkit-app-region: drag; +} + +.compact-status-badge { + position: relative; + width: 140px; + height: 32px; + background: rgba(0, 230, 118, 0.1); + border: 1px solid rgba(0, 230, 118, 0.2); + border-radius: 6px; + display: flex; + align-items: center; + justify-content: center; + cursor: help; + /* overflow: hidden; */ + transition: all 0.3s ease; +} + +.compact-status-badge:hover { + background: rgba(0, 0, 0, 0.3); + border-color: rgba(255, 255, 255, 0.1); +} + +.compact-status-badge .status-text-layer { + /* position: absolute; */ + display: flex; + align-items: center; + gap: 8px; + font-size: 12px; + font-weight: 700; + color: #00E676; + transition: opacity 0.3s ease, transform 0.3s ease; +} + +/* Removed old volume detail layer styles */ + +/* +.compact-status-badge:hover .status-text-layer { + opacity: 0; + transform: translateY(-10px); +} +*/ + +/* Enable popover on hover */ +.compact-status-badge:hover .listening-popover { + opacity: 1; + pointer-events: auto; + transform: translateY(0); +} + +.compact-mini-wave { + display: flex; + gap: 2px; + height: 12px; + align-items: flex-end; +} + +.compact-mini-wave .bar { + width: 3px; + background: #00E676; + border-radius: 2px; + animation: compactEqualize 1s infinite ease-in-out; +} + +.compact-mini-wave.paused .bar { + animation: none; + height: 6px !important; + background: #6b7280; +} + +.compact-mini-wave .bar:nth-child(1) { height: 60%; animation-delay: -0.4s; } +.compact-mini-wave .bar:nth-child(2) { height: 100%; animation-delay: -0.2s; } +.compact-mini-wave .bar:nth-child(3) { height: 50%; animation-delay: -0.5s; } + +.compact-window-controls { + display: flex; + align-items: center; + gap: 8px; + -webkit-app-region: no-drag; +} + +.compact-ctrl-btn { + width: 32px; + height: 32px; + border-radius: 6px; + border: none; + background: transparent; + color: #8E8E93; + display: flex; + align-items: center; + justify-content: center; + cursor: pointer; + transition: all 0.2s; +} + +.compact-ctrl-btn:hover { background: rgba(255, 255, 255, 0.1); color: #fff; } + +.compact-ctrl-btn.record-btn { + color: #FF453A; + background: rgba(255, 69, 58, 0.1); +} + +.compact-ctrl-btn.record-btn:hover { + background: rgba(255, 69, 58, 0.2); + transform: scale(1.05); +} + +.compact-ctrl-btn.record-btn.active { + background: #FF453A; + color: #fff; + box-shadow: 0 0 10px rgba(255, 69, 58, 0.4); + animation: compactPulseRed 2s infinite; +} + +.compact-widget-meta { + padding: 10px 16px 0 16px; + display: flex; + align-items: center; + justify-content: space-between; + gap: 8px; +} + +.compact-title { + font-size: 14px; + font-weight: 700; + color: #fff; +} + +.compact-subtitle { + font-size: 12px; + color: #ccc; + margin-left: 4px; +} + +.compact-meta-actions { + display: flex; + gap: 6px; +} + +.compact-refresh { + padding: 4px 8px; + border-radius: 6px; + border: 1px solid rgba(255, 255, 255, 0.1); + color: #ccc; + background: rgba(255, 255, 255, 0.05); +} + +.compact-widget-content { + padding: 12px 16px; + display: flex; + flex-direction: column; + gap: 10px; +} + +.compact-error { + background: rgba(255, 69, 58, 0.08); + border: 1px solid rgba(255, 69, 58, 0.3); + color: #ffcdd2; + padding: 8px 10px; + border-radius: 8px; + font-size: 12px; +} + +.compact-loading { + font-size: 12px; + color: #ccc; +} + +.compact-suggestion-list { + display: flex; + flex-direction: column; + gap: 10px; +} + +.compact-suggestion-card { + background: rgba(255, 255, 255, 0.04); + border: 1px solid rgba(255, 255, 255, 0.05); + border-radius: 10px; + padding: 12px 14px; + position: relative; + display: flex; + flex-direction: column; + gap: 6px; + transition: all 0.2s; +} + +.compact-suggestion-card:hover { + background: rgba(255, 255, 255, 0.08); + border-color: rgba(255, 255, 255, 0.15); + transform: translateY(-1px); +} + +.compact-tag { + font-size: 10px; + padding: 2px 6px; + border-radius: 4px; + display: inline-block; + border: 1px solid transparent; + width: fit-content; +} + +.compact-tag-blue { color: #82B1FF; background: rgba(130, 177, 255, 0.1); border-color: rgba(130, 177, 255, 0.2); } +.compact-tag-red { color: #FF8A80; background: rgba(255, 138, 128, 0.1); border-color: rgba(255, 138, 128, 0.2); } +.compact-tag-purple { color: #EA80FC; background: rgba(234, 128, 252, 0.1); border-color: rgba(234, 128, 252, 0.2); } + +.compact-text { + font-size: 13px; + line-height: 1.5; + color: #eee; +} + +.compact-tag-group { + display: flex; + gap: 6px; + flex-wrap: wrap; + margin-bottom: 6px; +} + +.compact-copy-btn { + align-self: flex-end; + background: rgba(255, 255, 255, 0.08); + border: 1px solid rgba(255, 255, 255, 0.15); + color: #fff; + padding: 4px 10px; + border-radius: 6px; + cursor: pointer; + font-size: 11px; + transition: all 0.2s; +} + +.compact-copy-btn:hover { background: rgba(0, 230, 118, 0.15); border-color: rgba(0, 230, 118, 0.3); } +.compact-copy-btn.copied { background: rgba(0, 230, 118, 0.25); border-color: rgba(0, 230, 118, 0.5); color: #00E676; } + +.compact-widget-footer { + padding: 10px 16px; + border-top: 1px solid rgba(255, 255, 255, 0.06); + display: flex; + justify-content: space-between; + align-items: center; + font-size: 11px; + color: #888; +} + +.compact-model-label { + color: #aaa; +} + +.compact-widget .refresh-btn { + background: none; + border: 1px solid rgba(255, 255, 255, 0.1); + color: #aaa; + padding: 4px 10px; + border-radius: 4px; + cursor: pointer; + display: flex; + align-items: center; + gap: 4px; + font-size: 11px; + transition: 0.2s; +} + +.compact-widget .refresh-btn:hover { + background: rgba(255, 255, 255, 0.05); + color: #fff; +} + +@keyframes compactEqualize { + 0% { height: 20%; } + 50% { height: 100%; } + 100% { height: 20%; } +} + +@keyframes compactPulseRed { + 0% { box-shadow: 0 0 0 0 rgba(255, 69, 58, 0.4); } + 70% { box-shadow: 0 0 0 10px rgba(255, 69, 58, 0); } + 100% { box-shadow: 0 0 0 0 rgba(255, 69, 58, 0); } +} + diff --git a/desktop/src/renderer/hud.css b/desktop/src/renderer/hud.css index f35961b..8f67199 100644 --- a/desktop/src/renderer/hud.css +++ b/desktop/src/renderer/hud.css @@ -1,662 +1,3 @@ -:root { - font-family: 'Noto Sans SC', 'Segoe UI', system-ui, sans-serif; - color: #1f1d2b; -} - -body { - margin: 0; - background: transparent; -} - -#hud-root { - width: 100vw; - height: 100vh; -} - -.hud-body { - width: 100%; - height: 100%; - background: transparent; -} - -.hud-container { - width: 100%; - height: 100%; - padding: 18px; - box-sizing: border-box; - display: flex; - flex-direction: column; - gap: 16px; - background: rgba(255, 255, 255, 0.92); - border-radius: 20px; - border: 1px solid rgba(255, 255, 255, 0.5); - box-shadow: 0 20px 50px rgba(0, 0, 0, 0.35), inset 0 0 40px rgba(255, 255, 255, 0.35); - backdrop-filter: blur(26px); -} - -.selector-container { - width: 100%; - height: 100%; - padding: 18px; - box-sizing: border-box; - display: flex; - flex-direction: column; - gap: 16px; - background: rgba(255, 255, 255, 0.92); - border-radius: 20px; - border: 1px solid rgba(255, 255, 255, 0.5); - box-shadow: 0 20px 50px rgba(0, 0, 0, 0.35), inset 0 0 40px rgba(255, 255, 255, 0.35); - backdrop-filter: blur(26px); -} - -.selector-content { - flex: 1; - display: flex; - flex-direction: column; - gap: 16px; - overflow-y: auto; -} - -.hud-title-section { - display: inline-flex; - align-items: center; - gap: 8px; -} - -.hud-header { - display: flex; - justify-content: space-between; - align-items: center; - -webkit-app-region: drag; - /* Enable native window dragging */ -} - -.hud-drag-zone { - display: inline-flex; - align-items: center; - gap: 8px; - cursor: default; - /* CSS drag handles cursor automatically mostly */ - user-select: none; -} - -.hud-drag-zone.hud-dragging { - cursor: default; -} - -.hud-title { - font-weight: 600; - font-size: 15px; - color: #c51662; -} - -.hud-controls { - display: flex; - gap: 10px; - -webkit-app-region: no-drag; - /* Disable drag for controls */ -} - -.control-btn { - width: 32px; - height: 32px; - border-radius: 50%; - border: none; - background: rgba(197, 22, 98, 0.12); - color: #c51662; - font-size: 20px; - cursor: pointer; - transition: transform 0.2s ease, background 0.2s ease; - display: flex; - align-items: center; - justify-content: center; - -webkit-app-region: no-drag; - /* Double ensure */ -} - -.control-btn svg { - width: 16px; - height: 16px; -} - -.control-btn:hover { - background: rgba(197, 22, 98, 0.25); - transform: scale(1.08); -} - -.status-indicator { - width: 8px; - height: 8px; - border-radius: 50%; - background: #22c55e; - animation: pulse 2s infinite; -} - -@keyframes pulse { - 0% { - opacity: 1; - } - - 50% { - opacity: 0.4; - } - - 100% { - opacity: 1; - } -} - -.hud-section { - display: flex; - flex-direction: column; - gap: 8px; -} - -.section-label { - font-size: 12px; - font-weight: 700; - text-transform: uppercase; - letter-spacing: 0.5px; - color: #974e6e; -} - -.transcript-area { - min-height: 140px; - max-height: 170px; - padding: 10px; - border-radius: 14px; - background: rgba(15, 23, 42, 0.04); - border: 1px solid rgba(15, 23, 42, 0.1); - overflow-y: auto; - display: flex; - flex-direction: column; - gap: 6px; -} - -.hud-status { - text-align: center; - flex: 1; - display: flex; - flex-direction: column; - justify-content: center; - align-items: center; - padding: 12px; - gap: 6px; - color: rgba(15, 23, 42, 0.85); -} - -.hud-status-text { - font-size: 13px; -} - -.hud-error { - color: #c51662; -} - -.hud-warning { - color: #f59e0b; - background: rgba(245, 158, 11, 0.1); - border: 1px solid rgba(245, 158, 11, 0.3); - border-radius: 8px; - padding: 12px; - margin: 8px 0; -} - -.hud-spinner { - width: 26px; - height: 26px; - border-radius: 50%; - border: 3px solid rgba(255, 255, 255, 0.5); - border-bottom-color: #c51662; - animation: spinner 0.9s linear infinite; - box-shadow: 0 0 12px rgba(197, 22, 98, 0.35); -} - -@keyframes spinner { - to { - transform: rotate(360deg); - } -} - -.message-item { - display: flex; -} - -.message-other { - justify-content: flex-start; -} - -.message-user { - justify-content: flex-end; -} - -.message-bubble { - max-width: 78%; - padding: 10px 14px; - border-radius: 16px; - font-size: 13px; - line-height: 1.5; - word-break: break-word; - background: rgba(255, 255, 255, 0.9); - color: #111827; - box-shadow: 0 6px 16px rgba(17, 24, 39, 0.08); -} - -.message-user .message-bubble { - background: rgba(197, 22, 98, 0.12); - color: #c51662; -} - -.message-streaming .message-bubble { - opacity: 0.85; - border: 1px dashed rgba(197, 22, 98, 0.4); - position: relative; -} - -.message-streaming-indicator { - display: inline-block; - margin-left: 6px; - color: rgba(197, 22, 98, 0.8); - animation: pulseDots 1.2s ease-in-out infinite; -} - -@keyframes pulseDots { - 0% { - opacity: 0.2; - } - 50% { - opacity: 1; - } - 100% { - opacity: 0.2; - } -} - -.suggestions-grid { - display: flex; - flex-direction: column; - gap: 12px; -} - -.suggestion-card { - padding: 12px 14px; - border-radius: 16px; - background: linear-gradient(135deg, rgba(197, 22, 98, 0.1), rgba(142, 36, 170, 0.08)); - border: 1px solid rgba(197, 22, 98, 0.3); - box-shadow: inset 0 0 12px rgba(255, 255, 255, 0.3); - transition: transform 0.2s ease; -} - -.suggestion-card:hover { - transform: translateY(-2px); -} - -.suggestion-header { - display: flex; - justify-content: space-between; - align-items: flex-start; - margin-bottom: 6px; - color: #c51662; - font-size: 13px; - font-weight: 600; -} - -.suggestion-body { - font-size: 13px; - line-height: 1.5; - color: rgba(15, 23, 42, 0.9); - margin: 0; -} - -.suggestion-meta { - display: flex; - gap: 6px; - margin-top: 10px; - flex-wrap: wrap; -} - -.suggestion-badge { - padding: 2px 8px; - font-size: 10px; - font-weight: 600; - border-radius: 999px; - background: rgba(255, 255, 255, 0.5); - color: #5b21b6; -} - -/* 角色选择器样式 */ -.character-grid { - display: grid; - grid-template-columns: repeat(auto-fill, minmax(140px, 1fr)); - gap: 12px; - padding: 8px 0; -} - -.character-card { - background: rgba(255, 255, 255, 0.6); - border-radius: 14px; - padding: 12px; - cursor: pointer; - border: 1px solid rgba(255, 255, 255, 0.4); - box-shadow: 0 4px 12px rgba(0, 0, 0, 0.05); - transition: transform 0.2s ease, box-shadow 0.2s ease; - text-align: center; -} - -.character-card:hover { - transform: translateY(-4px); - box-shadow: 0 8px 24px rgba(0, 0, 0, 0.1); -} - -.character-avatar { - width: 48px; - height: 48px; - border-radius: 50%; - margin: 0 auto 8px; - display: flex; - align-items: center; - justify-content: center; - color: white; - font-size: 18px; - font-weight: bold; - box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); -} - -.character-info h3 { - font-size: 14px; - font-weight: 600; - margin: 0 0 4px 0; - color: #1f1d2b; -} - -.character-relationship { - font-size: 11px; - color: #6b7280; - margin: 0 0 8px 0; -} - -.character-stats { - display: flex; - justify-content: center; - align-items: center; - gap: 4px; - font-size: 11px; -} - -.affinity-label { - color: #6b7280; -} - -.affinity-value { - color: #c51662; - font-weight: 600; -} - -.back-button { - background: rgba(197, 22, 98, 0.1); - color: #c51662; - border: 1px solid rgba(197, 22, 98, 0.3); - padding: 8px 12px; - border-radius: 8px; - cursor: pointer; - font-size: 12px; - transition: background 0.2s ease; - align-self: flex-start; -} - -.back-button:hover { - background: rgba(197, 22, 98, 0.2); -} - -.new-conversation-btn { - background: linear-gradient(135deg, #c51662, #8e24aa); - color: white; - border: none; - padding: 12px 16px; - border-radius: 12px; - cursor: pointer; - font-size: 13px; - font-weight: 600; - display: flex; - align-items: center; - justify-content: center; - gap: 8px; - box-shadow: 0 4px 12px rgba(197, 22, 98, 0.3); - transition: transform 0.2s ease, box-shadow 0.2s ease; -} - -.new-conversation-btn:hover { - transform: translateY(-2px); - box-shadow: 0 6px 20px rgba(197, 22, 98, 0.4); -} - -.new-conversation-icon { - font-size: 16px; - font-weight: bold; -} - -.no-conversations { - text-align: center; - padding: 24px; - color: #6b7280; -} - -.no-conversations-text { - margin: 0 0 8px 0; - font-size: 13px; -} - -.no-conversations-hint { - margin: 0; - font-size: 11px; - color: #9ca3af; -} - -.conversation-list { - display: flex; - flex-direction: column; - gap: 8px; -} - -.conversation-item { - background: rgba(255, 255, 255, 0.6); - border-radius: 12px; - padding: 12px; - cursor: pointer; - border: 1px solid rgba(255, 255, 255, 0.4); - box-shadow: 0 2px 6px rgba(0, 0, 0, 0.05); - transition: transform 0.2s ease, box-shadow 0.2s ease; - display: flex; - justify-content: space-between; - align-items: center; -} - -.conversation-item:hover { - transform: translateY(-2px); - box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); -} - -.conversation-info h4 { - margin: 0 0 4px 0; - font-size: 13px; - font-weight: 600; - color: #1f1d2b; - cursor: pointer; - user-select: none; - transition: color 0.2s ease; -} - -.conversation-info h4:hover { - color: #c51662; -} - -.conversation-meta { - margin: 0; - font-size: 11px; - color: #6b7280; -} - -.conversation-arrow { - color: #c51662; - font-weight: bold; - font-size: 14px; -} - - -.conversation-edit-form { - display: flex; - flex-direction: column; - gap: 8px; - width: 100%; -} - -.conversation-title-input { - background: rgba(255, 255, 255, 0.9); - border: 1px solid rgba(197, 22, 98, 0.3); - border-radius: 8px; - padding: 8px 12px; - font-size: 13px; - font-weight: 600; - color: #1f1d2b; - outline: none; - transition: border-color 0.2s ease; - width: 100%; - box-sizing: border-box; -} - -.conversation-title-input:focus { - border-color: #c51662; - box-shadow: 0 0 0 3px rgba(197, 22, 98, 0.1); -} - -.conversation-edit-actions { - display: flex; - gap: 8px; - justify-content: flex-end; -} - -.edit-btn { - background: rgba(255, 255, 255, 0.9); - border: 1px solid rgba(197, 22, 98, 0.3); - border-radius: 6px; - padding: 4px 12px; - font-size: 14px; - cursor: pointer; - transition: all 0.2s ease; - color: #1f1d2b; -} - -.edit-btn.save-btn { - background: #c51662; - color: white; - border-color: #c51662; -} - -.edit-btn.save-btn:hover { - background: #a0124f; - border-color: #a0124f; -} - -.edit-btn.cancel-btn:hover { - background: rgba(0, 0, 0, 0.05); - border-color: rgba(197, 22, 98, 0.5); -} - -/* ======================================== */ -/* 响应式设计 -/* ======================================== */ - -/* 音量指示器样式 */ -.volume-indicators { - display: flex; - flex-direction: column; - gap: 8px; - margin-top: 8px; - padding: 8px; - background: rgba(15, 23, 42, 0.03); - border-radius: 8px; -} - -.volume-item { - display: flex; - align-items: center; - gap: 8px; - font-size: 11px; -} - -.volume-label { - width: 32px; - color: rgba(15, 23, 42, 0.7); - font-weight: 600; -} - -.volume-bar-container { - flex: 1; - height: 6px; - background: rgba(15, 23, 42, 0.1); - border-radius: 3px; - overflow: hidden; -} - -.volume-bar { - height: 100%; - border-radius: 3px; - transition: width 0.1s ease; -} - -.volume-bar-mic { - background: linear-gradient(to right, #3b82f6, #2563eb); -} - -.volume-bar-system { - background: linear-gradient(to right, #a855f7, #9333ea); -} - -.volume-value { - width: 32px; - text-align: right; - color: rgba(15, 23, 42, 0.6); - font-size: 10px; -} - -/* 系统音频未授权提示 */ -.volume-warning { - margin-left: 4px; - cursor: help; -} - -.system-audio-hint { - font-size: 10px; - color: #f59e0b; - background: rgba(245, 158, 11, 0.1); - padding: 6px 10px; - border-radius: 6px; - border: 1px solid rgba(245, 158, 11, 0.2); - margin-top: 4px; -} - -@media (max-width: 768px) { - .hud-container { - max-height: 90vh; - } - - .hud-header { - padding: 12px 16px; - } - - .hud-title { - font-size: 14px; - } - - .transcript-area { - height: 120px; - } - - .suggestions-grid { - grid-template-columns: 1fr; - } - - .asr-speakers { - grid-template-columns: 1fr; - } -} \ No newline at end of file +@import "./hud.layout.css"; +@import "./hud.compact.css"; +@import "./hud.responsive.css"; diff --git a/desktop/src/renderer/hud.jsx b/desktop/src/renderer/hud.jsx index 1237ae9..1583c17 100644 --- a/desktop/src/renderer/hud.jsx +++ b/desktop/src/renderer/hud.jsx @@ -1,21 +1,16 @@ -import React, { useCallback, useEffect, useRef, useState } from 'react'; +import React, { useEffect } from 'react'; import ReactDOM from 'react-dom/client'; import './hud.css'; -import audioCaptureService from '../asr/audio-capture-service.js'; - - -const HUD_SUGGESTIONS = [ - { - title: '提议具体地点', - body: '"我知道附近有个很棒的公园,樱花特别美,要不要去那里?"', - badges: ['主动', '体贴'] - }, - { - title: '表达期待', - body: '"太好了!我一直想和你一起去散步呢。"', - badges: ['情感', '真诚'] - } -]; +import audioCaptureService from "../asr/audio-capture-service.js"; +// Hooks +import { useChatSession } from './hooks/useChatSession.js'; +import { useMessages } from './hooks/useMessages.js'; +import { useSuggestions } from './hooks/useSuggestions.js'; + +// Components +import { SessionSelector } from "./pages/HUD/SessionSelector.jsx"; +import { CompactHud } from './components/Chat/CompactHud.jsx'; +import { FullHud } from './components/Chat/FullHud.jsx'; const getPointerCoords = (event) => { const x = event.screenX !== undefined && event.screenX !== null ? event.screenX : event.clientX; @@ -23,368 +18,118 @@ const getPointerCoords = (event) => { return { x, y }; }; -// 会话选择器组件 -function SessionSelector({ onSessionSelected, onClose }) { - const [characters, setCharacters] = useState([]); - const [selectedCharacter, setSelectedCharacter] = useState(null); - const [conversations, setConversations] = useState([]); - const [loading, setLoading] = useState(true); - const [characterLoading, setCharacterLoading] = useState(false); - const [editingConversationId, setEditingConversationId] = useState(null); - const [editingTitle, setEditingTitle] = useState(''); - - useEffect(() => { - loadCharacters(); - }, []); - - const loadCharacters = async () => { - try { - setLoading(true); - const api = window.electronAPI; - if (!api || !api.getAllCharacters) { - throw new Error('数据库API不可用'); - } - const chars = await api.getAllCharacters(); - setCharacters(chars || []); - } catch (err) { - console.error('加载角色失败:', err); - } finally { - setLoading(false); - } - }; +function Hud() { + // 使用自定义Hooks + const chatSession = useChatSession(); + const messages = useMessages(chatSession.sessionInfo?.conversationId); + const suggestions = useSuggestions(chatSession.sessionInfo); - const handleCharacterSelect = async (character) => { - setSelectedCharacter(character); - setCharacterLoading(true); - try { - const api = window.electronAPI; - const convs = await api.getConversationsByCharacter(character.id); - setConversations(convs || []); - } catch (err) { - console.error('加载会话失败:', err); - setConversations([]); - } finally { - setCharacterLoading(false); - } - }; + // 音量检测相关状态 + const [micVolumeLevel, setMicVolumeLevel] = React.useState(0); + const [systemVolumeLevel, setSystemVolumeLevel] = React.useState(0); + const [hasSystemAudio, setHasSystemAudio] = React.useState(false); + const [systemAudioNotAuthorized, setSystemAudioNotAuthorized] = React.useState(false); + const [isListening, setIsListening] = React.useState(false); + const [showSelector, setShowSelector] = React.useState(true); + const [viewMode, setViewMode] = React.useState('full'); // full | compact + + // 临时禁用streaming功能 + const streamingDisabled = true; - const handleContinueConversation = (conversation) => { - onSessionSelected({ - characterId: selectedCharacter.id, - conversationId: conversation.id, - conversationName: conversation.title || conversation.name || '未命名对话', - characterName: selectedCharacter.name, - isNew: false - }); - }; + // 确保有可用的对话ID(新建后回写状态) + const ensureConversationId = async () => { + let conversationId = chatSession.sessionInfo?.conversationId; + if (conversationId) return conversationId; - const handleStartEdit = (conversation, e) => { - e.stopPropagation(); - setEditingConversationId(conversation.id); - setEditingTitle(conversation.title || conversation.name || ''); - }; + const api = window.electronAPI; + const characterId = chatSession.sessionInfo?.characterId; + if (!api?.dbCreateConversation || !characterId) return null; - const handleSaveEdit = async (conversationId, e) => { - e.stopPropagation(); try { - const api = window.electronAPI; - if (!api || !api.updateConversation) { - throw new Error('数据库API不可用'); - } - - const updated = await api.updateConversation(conversationId, { - title: editingTitle.trim() || '未命名对话' + const newConv = await api.dbCreateConversation({ + character_id: characterId, + title: chatSession.sessionInfo?.conversationName || '新对话' }); - - if (updated) { - // 更新本地状态 - setConversations(prev => prev.map(conv => - conv.id === conversationId - ? { ...conv, title: updated.title } - : conv - )); + conversationId = newConv?.id; + if (conversationId) { + await chatSession.handleSessionSelected({ + ...chatSession.sessionInfo, + conversationId, + conversationName: newConv?.title || chatSession.sessionInfo?.conversationName, + isNew: false + }); } - - setEditingConversationId(null); - setEditingTitle(''); + return conversationId; } catch (err) { - console.error('保存会话标题失败:', err); - alert('保存失败,请重试'); + console.error('[HUD] 创建对话失败:', err); + return null; } }; - const handleCancelEdit = (e) => { - e.stopPropagation(); - setEditingConversationId(null); - setEditingTitle(''); - }; - - const handleCreateNewConversation = async () => { - try { - const timestamp = new Date().toLocaleString('zh-CN'); - const conversationName = `与 ${selectedCharacter.name} 的新对话 - ${timestamp}`; - - onSessionSelected({ - characterId: selectedCharacter.id, - conversationId: null, - conversationName: conversationName, - characterName: selectedCharacter.name, - isNew: true - }); - } catch (err) { - console.error('创建新会话失败:', err); + // 对齐设置页:确保音源存在且有兜底麦克风 + const prepareAudioSources = async () => { + const api = window.electronAPI; + if (!api?.asrGetAudioSources) { + throw new Error('ASR 音频源接口不可用'); } - }; - - const getAvatarGradient = (color) => { - if (color?.includes('ff6b6b')) return 'bg-gradient-to-br from-[#ff6b6b] to-[#ff8e8e]'; - if (color?.includes('4ecdc4')) return 'bg-gradient-to-br from-[#4ecdc4] to-[#6ee5dd]'; - if (color?.includes('ffe66d')) return 'bg-gradient-to-br from-[#ffe66d] to-[#fff099]'; - return 'bg-gradient-to-br from-primary to-[#8e24aa]'; - }; - if (loading) { - return ( -
-
-
-
- ); - } - - if (!selectedCharacter) { - return ( -
-
-
- - 选择聊天对象 -
- -
-
-
选择角色开始对话
- {characters.length === 0 ? ( -
-

暂无角色数据

-
- ) : ( -
- {characters.map((character) => { - const firstLetter = character.name.charAt(0); - const avatarGradient = getAvatarGradient(character.avatar_color); - return ( -
handleCharacterSelect(character)} - > -
- {firstLetter} -
-
-

{character.name}

-

{character.relationship_label}

-
- 好感度 - {character.affinity}% -
-
-
- ); - })} -
- )} -
-
- ); - } + let audioSources = await api.asrGetAudioSources(); + let speaker1 = audioSources.find(s => s.id === 'speaker1'); + let speaker2 = audioSources.find(s => s.id === 'speaker2'); - return ( -
-
-
- - {selectedCharacter.name} -
- -
-
- - -
选择会话
- - - - {characterLoading ? ( -
-
- ) : conversations.length === 0 ? ( -
-

还没有对话记录

-

点击上方按钮创建新对话

-
- ) : ( -
- {conversations.map((conversation) => ( -
{ - if (editingConversationId !== conversation.id) { - handleContinueConversation(conversation); - } - }} - > -
- {editingConversationId === conversation.id ? ( -
- setEditingTitle(e.target.value)} - onClick={(e) => e.stopPropagation()} - onKeyDown={(e) => { - if (e.key === 'Enter') { - handleSaveEdit(conversation.id, e); - } else if (e.key === 'Escape') { - handleCancelEdit(e); - } - }} - autoFocus - /> -
- - -
-
- ) : ( - <> -

{ - e.stopPropagation(); - handleStartEdit(conversation, e); - }} - title="双击编辑标题" - > - {conversation.title || conversation.name || '未命名对话'} -

-

- {new Date(conversation.created_at).toLocaleDateString('zh-CN')} - {conversation.message_count > 0 && ` • ${conversation.message_count} 条消息`} -

- - )} -
- {editingConversationId !== conversation.id && ( -
- )} -
- ))} -
- )} -
-
- ); -} - -function Hud() { - const [messages, setMessages] = useState([]); - const [streamingMessages, setStreamingMessages] = useState({}); - // 临时禁用streaming功能以修复HUD关闭问题 - const streamingDisabled = true; - const [loading, setLoading] = useState(true); - const [error, setError] = useState(''); - const [showSelector, setShowSelector] = useState(true); - const [sessionInfo, setSessionInfo] = useState(null); - const transcriptRef = useRef(null); - - // 音量检测相关状态 - const [micVolumeLevel, setMicVolumeLevel] = useState(0); - const [systemVolumeLevel, setSystemVolumeLevel] = useState(0); - const [hasSystemAudio, setHasSystemAudio] = useState(false); - const [systemAudioNotAuthorized, setSystemAudioNotAuthorized] = useState(false); // 系统音频未授权提示 - const [isListening, setIsListening] = useState(false); - - const loadMessages = useCallback(async (conversationId) => { - setLoading(true); - setError(''); - try { - const api = window.electronAPI; - if (!api || !api.getMessagesByConversation) { - throw new Error('数据库API不可用'); + // 若未配置或缺失设备ID,自动枚举麦克风并写入数据库 + if (!speaker1 || !speaker1.device_id) { + const devices = await audioCaptureService.enumerateDevices(); + if (!devices || devices.length === 0) { + throw new Error('未找到可用麦克风设备,请先在系统中确认设备连接'); } - const fetchedMessages = await api.getMessagesByConversation(conversationId); - setMessages(fetchedMessages || []); - } catch (err) { - console.error('加载对话失败:', err); - setError(err instanceof Error ? err.message : '加载失败'); - setMessages([]); - } finally { - setLoading(false); + const firstDevice = devices[0]; + const payload = { + id: 'speaker1', + name: '用户(麦克风)', + device_id: firstDevice.deviceId, + device_name: firstDevice.label || firstDevice.deviceId, + is_active: 1 + }; + + if (speaker1) { + await api.asrUpdateAudioSource('speaker1', payload); + } else if (api.asrCreateAudioSource) { + await api.asrCreateAudioSource(payload); + } + speaker1 = payload; } - }, []); + // 确保系统音频源有记录,沿用设置页的默认占位配置 + if (!speaker2 && api.asrCreateAudioSource) { + try { + await api.asrCreateAudioSource({ + id: 'speaker2', + name: '角色(系统音频)', + device_id: 'system-loopback', + device_name: '系统音频(屏幕捕获)', + is_active: 0 + }); + } catch (err) { + console.warn('[HUD] 创建系统音频源失败:', err); + } + } + // 重新获取,保证使用最新状态 + audioSources = await api.asrGetAudioSources(); + speaker1 = audioSources.find(s => s.id === 'speaker1') || speaker1; + speaker2 = audioSources.find(s => s.id === 'speaker2') || speaker2; - const handleSessionSelected = async (info) => { - setSessionInfo(info); - setShowSelector(false); - if (info.conversationId) { - loadMessages(info.conversationId); - } else { - setMessages([]); - setLoading(false); + const isSpeaker1Active = speaker1 && (speaker1.is_active === 1 || speaker1.is_active === true || speaker1.is_active === '1'); + if (!isSpeaker1Active) { + throw new Error('麦克风配置未激活,请在设置中启用音频源'); } - - // 确保有对话 ID(如果是新对话,需要先创建) - let conversationId = info.conversationId; - if (!conversationId && info.characterId) { - const api = window.electronAPI; - if (api && api.dbCreateConversation) { - try { - const newConv = await api.dbCreateConversation({ - character_id: info.characterId, - title: info.conversationName || '新对话' - }); - conversationId = newConv?.id; - if (conversationId) { - setSessionInfo({ ...info, conversationId }); - } - } catch (err) { - console.error('创建新对话失败:', err); - setError('创建新对话失败'); - } - } + if (!speaker1?.device_id) { + throw new Error('麦克风设备ID未配置,请在设置中配置音频源'); } + + return { speaker1, speaker2 }; }; const toggleListening = async () => { @@ -413,33 +158,13 @@ function Hud() { return; } - const conversationId = sessionInfo?.conversationId; + const conversationId = await ensureConversationId(); if (!conversationId) { - setError('未找到有效的对话ID'); + chatSession.setError('未找到有效的对话ID'); return; } - // 检查音频源配置 - const audioSources = await api.asrGetAudioSources(); - const speaker1 = audioSources.find(s => s.id === 'speaker1'); - const speaker2 = audioSources.find(s => s.id === 'speaker2'); - - // 检查speaker1是否存在且激活 - if (!speaker1) { - setError('未找到麦克风配置,请在设置中配置音频源'); - return; - } - - const isSpeaker1Active = speaker1.is_active === 1 || speaker1.is_active === true || speaker1.is_active === '1'; - if (!isSpeaker1Active) { - setError('麦克风配置未激活,请在设置中启用音频源'); - return; - } - - if (!speaker1.device_id) { - setError('麦克风设备ID未配置,请在设置中配置音频源'); - return; - } + const { speaker1, speaker2 } = await prepareAudioSources(); // 1. 通知主进程开始 ASR await api.asrStart(conversationId); @@ -470,362 +195,117 @@ function Hud() { setHasSystemAudio(systemAudioEnabled); setIsListening(true); - setError(''); // 清除之前的错误 + chatSession.setError(''); // 清除之前的错误 } catch (captureError) { console.error('[HUD] Failed to start audio capture:', captureError); - setError(`音频捕获启动失败: ${captureError.message}`); + chatSession.setError(`音频捕获启动失败: ${captureError.message}`); // 如果启动失败,尝试停止已启动的部分 await audioCaptureService.stopAllCaptures(); } } catch (error) { console.error('[HUD] Error starting ASR:', error); - setError(`启动语音识别失败:${error.message}`); - } - }; - - const handleCloseSelector = () => { - if (window.electronAPI?.closeHUD) { - window.electronAPI.closeHUD(); + chatSession.setError(`启动语音识别失败:${error.message}`); } }; + // 监听音量更新事件 useEffect(() => { - if (showSelector) return; - setTimeout(() => { - if (sessionInfo?.conversationId) { - loadMessages(sessionInfo.conversationId); + const handleVolumeUpdate = ({ sourceId, volume }) => { + if (sourceId === 'speaker1') { + setMicVolumeLevel(volume); + } else if (sourceId === 'speaker2') { + setSystemVolumeLevel(volume); } - }, 0); - }, [showSelector, sessionInfo, loadMessages]); + }; - useEffect(() => { - if (transcriptRef.current) { - transcriptRef.current.scrollTop = transcriptRef.current.scrollHeight; - } - }, [messages]); - - const updateStreamingMessage = useCallback((sourceId, sender, content, timestamp) => { - if (!sourceId || !content) return; - setStreamingMessages(prev => ({ - ...prev, - [sourceId]: { - id: `stream-${sourceId}`, - sender, - content, - timestamp: timestamp || Date.now() - } - })); - }, []); + audioCaptureService.on('volume-update', handleVolumeUpdate); - const clearStreamingMessage = useCallback((sourceId) => { - if (!sourceId) return; - setStreamingMessages(prev => { - if (!prev[sourceId]) return prev; - const next = { ...prev }; - delete next[sourceId]; - return next; - }); + return () => { + audioCaptureService.off('volume-update', handleVolumeUpdate); + }; }, []); - // 监听 ASR 识别结果 + // 监听来自消息系统的新消息事件 useEffect(() => { const api = window.electronAPI; - if (!api?.on || !sessionInfo?.conversationId) return; + if (!api?.on) return; - // 监听完整句子识别结果(新消息) - const handleSentenceComplete = (message) => { - try { - if (!message) return; - - // 兼容旧格式(result.text) - if (!message.id && message.text) { - const normalized = (message.text || '').trim(); - if (!normalized) return; - const sender = message.sourceId === 'speaker1' ? 'user' : 'character'; - setMessages(prev => [...prev, { - id: `${Date.now()}`, - conversation_id: sessionInfo.conversationId, - sender, - content: normalized, - timestamp: Date.now() - }]); - return; - } - - // 默认:ASRManager 已经写入数据库并返回 message 记录 - setMessages(prev => [...prev, message]); - // 清除对应的streaming消息 - setStreamingMessages(prev => { - const newState = { ...prev }; - delete newState[message.source_id]; - return newState; - }); - } catch (error) { - console.error('Error handling ASR result:', error); - setError(`处理识别结果失败:${error.message}`); - } + const handleNewMessage = (message) => { + suggestions.handleNewMessage(message); }; - // 监听消息更新事件(更新现有消息内容) - const handleSentenceUpdate = (updatedMessage) => { - try { - if (!updatedMessage || !updatedMessage.id) return; - - setMessages(prev => prev.map(msg => - msg.id === updatedMessage.id - ? { ...msg, content: updatedMessage.content } - : msg - )); - } catch (error) { - console.error('Error handling ASR update:', error); - } - }; + api.on('asr-sentence-complete', handleNewMessage); - // 监听 ASR 错误 - const handleError = (error) => { - console.error('ASR error:', error); - setError(`语音识别错误:${error.error || error.message || '未知错误'}`); - }; - - // 注册监听器 - const handlePartialUpdate = (payload) => { - try { - const sourceId = payload?.sourceId || payload?.sessionId; - const content = payload?.content; - if (!sourceId || !content) return; - const sender = sourceId === 'speaker1' ? 'user' : 'character'; - updateStreamingMessage(sourceId, sender, content, payload?.timestamp); - } catch (error) { - console.error('Error handling partial update:', error); - } - }; - - const handlePartialClear = (payload) => { - try { - const sourceId = payload?.sourceId || payload?.sessionId; - if (!sourceId) return; - clearStreamingMessage(sourceId); - } catch (error) { - console.error('Error clearing partial message:', error); - } + return () => { + api.removeListener('asr-sentence-complete', handleNewMessage); }; + }, [suggestions]); - api.on('asr-sentence-complete', handleSentenceComplete); - api.on('asr-sentence-update', handleSentenceUpdate); - api.on('asr-error', handleError); - // 临时禁用streaming事件监听 - if (!streamingDisabled) { - api.on('asr-partial-update', handlePartialUpdate); - api.on('asr-partial-clear', handlePartialClear); + useEffect(() => { + if (chatSession.sessionInfo?.conversationId) { + messages.loadMessages(); } + }, [chatSession.sessionInfo?.conversationId]); - return () => { - // 清理监听器 - api.removeListener('asr-sentence-complete', handleSentenceComplete); - api.removeListener('asr-sentence-update', handleSentenceUpdate); - api.removeListener('asr-error', handleError); - if (!streamingDisabled) { - api.removeListener('asr-partial-update', handlePartialUpdate); - api.removeListener('asr-partial-clear', handlePartialClear); - } - }; - }, [sessionInfo?.conversationId]); // 只依赖会话ID,避免函数引用变化导致的重新注册 - - const handleClose = () => { - if (window.electronAPI?.closeHUD) { - window.electronAPI.closeHUD(); + useEffect(() => { + if (chatSession.sessionInfo) { + setShowSelector(false); } - }; + }, [chatSession.sessionInfo]); const handleSwitchSession = () => { setShowSelector(true); - setSessionInfo(null); + setViewMode('full'); + chatSession.handleSwitchSession(); }; - // 监听音量更新事件 - useEffect(() => { - const handleVolumeUpdate = ({ sourceId, volume }) => { - if (sourceId === 'speaker1') { - setMicVolumeLevel(volume); - } else if (sourceId === 'speaker2') { - setSystemVolumeLevel(volume); - } - }; - - audioCaptureService.on('volume-update', handleVolumeUpdate); - - return () => { - audioCaptureService.off('volume-update', handleVolumeUpdate); - }; - }, []); - - const renderTranscriptContent = () => { - if (loading) { - return ( -
-
- ); - } - - if (error) { - // 检查是否是系统音频捕获失败的错误(不应该阻止应用运行) - const isSystemAudioError = error.includes('系统音频捕获失败'); - - return ( -
-

- {isSystemAudioError ? '⚠️ ' : '❌ '}{error} -

-
- ); - } + const handleToggleViewMode = () => { + setViewMode((prev) => (prev === 'full' ? 'compact' : 'full')); + }; - if (!messages.length) { - return ( -
-

- {isListening ? (sessionInfo?.isNew ? '新对话,开始聊天吧!' : '该对话还没有消息') : '点击上方播放按钮开始监听'} -

-
- ); - } + if (showSelector) { + return ; + } - const streamingItems = streamingDisabled ? [] : Object.values(streamingMessages); + if (viewMode === 'compact') { return ( - <> - {messages.map((msg, index) => { - const isUser = msg.sender === 'user'; - const key = msg.id ?? `${msg.sender}-${msg.timestamp ?? index}`; - return ( -
-
{msg.content || msg.text || ''}
-
- ); - })} - {!streamingDisabled && streamingItems.map((msg) => { - const isUser = msg.sender === 'user'; - return ( -
-
- {msg.content} - -
-
- ); - })} - + ); - }; - - if (showSelector) { - return ; } + return ( -
-
-
- - {sessionInfo?.characterName || '心情助手'} -
-
- - - -
-
- -
-
{sessionInfo?.conversationName || '最近互动'}
-
- {renderTranscriptContent()} -
- - {/* 音量显示 */} - {sessionInfo && ( -
-
- 用户 -
-
-
- {micVolumeLevel.toFixed(0)}% -
-
- 角色 -
-
-
- {systemVolumeLevel.toFixed(0)}% - {systemAudioNotAuthorized && ( - ⚠️ - )} -
- {systemAudioNotAuthorized && ( -
- 💡 系统音频未授权,请检查设置 -
- )} -
- )} -
- -
-
AI 建议
-
- {HUD_SUGGESTIONS.map((suggestion) => ( -
-
- {suggestion.title} -
-

{suggestion.body}

-
- {suggestion.badges.map((badge) => ( - - {badge} - - ))} -
-
- ))} -
-
-
+ ); + } const hudRoot = document.getElementById('hud-root'); @@ -837,5 +317,4 @@ if (hudRoot) { ); } else { console.error('HUD root element not found'); -} - +} \ No newline at end of file diff --git a/desktop/src/renderer/hud.layout.css b/desktop/src/renderer/hud.layout.css new file mode 100644 index 0000000..350489d --- /dev/null +++ b/desktop/src/renderer/hud.layout.css @@ -0,0 +1,739 @@ +:root { + font-family: 'Noto Sans SC', 'Segoe UI', system-ui, sans-serif; + color: #1f1d2b; +} + +body { + margin: 0; + background: transparent; +} + +#hud-root { + width: 100vw; + height: 100vh; + overflow: hidden; +} + +.hud-body { + width: 100%; + height: 100%; + background: transparent; +} + +.hud-container { + width: 100%; + height: 100%; + display: flex; + flex-direction: column; + background: rgba(255, 255, 255, 0.92); + border-radius: 20px; + border: 1px solid rgba(255, 255, 255, 0.5); + box-shadow: none; + backdrop-filter: blur(26px); +} + +.hud-content { + flex: 1; + padding: 18px; + padding-bottom: 24px; + /* Ensure bottom padding */ + box-sizing: border-box; + display: flex; + flex-direction: column; + gap: 16px; + overflow-y: auto; + /* Allow scrolling for main content */ + mask-image: none !important; + /* Force remove any mask */ + -webkit-mask-image: none !important; +} + +.selector-container { + width: 100%; + height: 100%; + padding: 18px; + box-sizing: border-box; + display: flex; + flex-direction: column; + gap: 16px; + background: rgba(255, 255, 255, 0.92); + border-radius: 20px; + border: 1px solid rgba(255, 255, 255, 0.5); + box-shadow: 0 20px 50px rgba(0, 0, 0, 0.35), inset 0 0 40px rgba(255, 255, 255, 0.35); + backdrop-filter: blur(26px); +} + +.selector-content { + flex: 1; + display: flex; + flex-direction: column; + gap: 16px; + overflow-y: auto; +} + +.hud-title-section { + display: inline-flex; + align-items: center; + gap: 8px; +} + +.hud-header { + display: flex; + justify-content: space-between; + align-items: center; + -webkit-app-region: drag; + /* Enable native window dragging */ +} + +.hud-drag-zone { + display: inline-flex; + align-items: center; + gap: 8px; + cursor: default; + /* CSS drag handles cursor automatically mostly */ + user-select: none; +} + +.hud-drag-zone.hud-dragging { + cursor: default; +} + +.hud-title { + font-weight: 600; + font-size: 15px; + color: #c51662; +} + +.hud-controls { + display: flex; + gap: 10px; + -webkit-app-region: no-drag; + /* Disable drag for controls */ +} + +.control-btn { + width: 32px; + height: 32px; + border-radius: 50%; + border: none; + background: rgba(197, 22, 98, 0.12); + color: #c51662; + font-size: 20px; + cursor: pointer; + transition: transform 0.2s ease, background 0.2s ease; + display: flex; + align-items: center; + justify-content: center; + -webkit-app-region: no-drag; + /* Double ensure */ +} + +.control-btn svg { + width: 16px; + height: 16px; +} + +.control-btn:hover { + background: rgba(197, 22, 98, 0.25); + transform: scale(1.08); +} + +.suggestion-action-btn { + height: 24px; + padding: 0 8px; + border-radius: 12px; + border: 1px solid rgba(197, 22, 98, 0.3); + background: rgba(197, 22, 98, 0.05); + color: #c51662; + font-size: 11px; + font-weight: 600; + cursor: pointer; + transition: all 0.2s ease; + white-space: nowrap; + display: flex; + align-items: center; + justify-content: center; + -webkit-app-region: no-drag; +} + +.suggestion-action-btn:hover { + background: rgba(197, 22, 98, 0.15); + border-color: rgba(197, 22, 98, 0.5); + transform: translateY(-1px); +} + +.suggestion-action-btn:disabled { + opacity: 0.6; + cursor: not-allowed; + transform: none; + background: transparent; + border-color: rgba(0, 0, 0, 0.1); + color: rgba(0, 0, 0, 0.3); +} + +.status-indicator { + width: 8px; + height: 8px; + border-radius: 50%; + background: #22c55e; + animation: pulse 2s infinite; +} + +@keyframes pulse { + 0% { + opacity: 1; + } + + 50% { + opacity: 0.4; + } + + 100% { + opacity: 1; + } +} + +.hud-section { + display: flex; + flex-direction: column; + gap: 8px; +} + +.section-label { + font-size: 12px; + font-weight: 700; + text-transform: uppercase; + letter-spacing: 0.5px; + color: #974e6e; +} + +.transcript-area { + min-height: 140px; + max-height: 170px; + padding: 10px; + border-radius: 14px; + background: rgba(15, 23, 42, 0.04); + border: 1px solid rgba(15, 23, 42, 0.1); + overflow-y: auto; + display: flex; + flex-direction: column; + gap: 6px; +} + +.hud-status { + text-align: center; + flex: 1; + display: flex; + flex-direction: column; + justify-content: center; + align-items: center; + padding: 12px; + gap: 6px; + color: rgba(15, 23, 42, 0.85); +} + +.hud-status-text { + font-size: 13px; +} + +.hud-error { + color: #c51662; +} + +.hud-warning { + color: #f59e0b; + background: rgba(245, 158, 11, 0.1); + border: 1px solid rgba(245, 158, 11, 0.3); + border-radius: 8px; + padding: 12px; + margin: 8px 0; +} + +.hud-spinner { + width: 26px; + height: 26px; + border-radius: 50%; + border: 3px solid rgba(255, 255, 255, 0.5); + border-bottom-color: #c51662; + animation: spinner 0.9s linear infinite; + box-shadow: 0 0 12px rgba(197, 22, 98, 0.35); +} + +@keyframes spinner { + to { + transform: rotate(360deg); + } +} + +.message-item { + display: flex; +} + +.message-other { + justify-content: flex-start; +} + +.message-user { + justify-content: flex-end; +} + +.message-bubble { + max-width: 78%; + padding: 10px 14px; + border-radius: 16px; + font-size: 13px; + line-height: 1.5; + word-break: break-word; + background: rgba(255, 255, 255, 0.9); + color: #111827; + box-shadow: 0 6px 16px rgba(17, 24, 39, 0.08); +} + +.message-user .message-bubble { + background: rgba(197, 22, 98, 0.12); + color: #c51662; +} + +.message-streaming .message-bubble { + opacity: 0.85; + border: 1px dashed rgba(197, 22, 98, 0.4); + position: relative; +} + +.message-streaming-indicator { + display: inline-block; + margin-left: 6px; + color: rgba(197, 22, 98, 0.8); + animation: pulseDots 1.2s ease-in-out infinite; +} + +@keyframes pulseDots { + 0% { + opacity: 0.2; + } + + 50% { + opacity: 1; + } + + 100% { + opacity: 0.2; + } +} + +.suggestions-grid { + display: flex; + flex-direction: column; + gap: 12px; +} + +.suggestion-card { + padding: 12px 14px; + border-radius: 16px; + background: linear-gradient(135deg, rgba(197, 22, 98, 0.1), rgba(142, 36, 170, 0.08)); + border: 1px solid rgba(197, 22, 98, 0.3); + box-shadow: inset 0 0 12px rgba(255, 255, 255, 0.3); + transition: transform 0.2s ease; + cursor: pointer; + position: relative; +} + +.suggestion-card:hover { + transform: translateY(-2px); +} + +.suggestion-card.copied { + border-color: rgba(16, 185, 129, 0.65); + box-shadow: inset 0 0 12px rgba(255, 255, 255, 0.3), 0 0 0 2px rgba(16, 185, 129, 0.25); +} + +.suggestion-card:focus-visible { + outline: 2px solid rgba(99, 102, 241, 0.55); + outline-offset: 2px; +} + +.suggestion-selected-indicator { + position: absolute; + top: 10px; + right: 10px; + width: 22px; + height: 22px; + border-radius: 999px; + border: 1px solid rgba(16, 185, 129, 0.55); + background: rgba(16, 185, 129, 0.12); + color: rgba(16, 185, 129, 0.95); + font-size: 14px; + font-weight: 800; + display: flex; + align-items: center; + justify-content: center; +} + +.suggestion-header { + display: flex; + justify-content: space-between; + align-items: flex-start; + margin-bottom: 6px; + color: #c51662; + font-size: 13px; + font-weight: 600; +} + +.suggestion-body { + font-size: 13px; + line-height: 1.5; + color: rgba(15, 23, 42, 0.9); + margin: 0; +} + +.suggestion-meta { + display: flex; + gap: 6px; + margin-top: 10px; + flex-wrap: wrap; +} + +.suggestion-tags { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(0, 1fr)); + gap: 6px; + margin-top: 10px; + width: 100%; +} + +.suggestion-tag { + text-align: center; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} + +.suggestion-actions { + display: flex; + align-items: center; + gap: 8px; +} + +.suggestion-toggle { + display: inline-flex; + align-items: center; + gap: 4px; + padding: 2px 6px; + border-radius: 999px; + border: 1px solid rgba(197, 22, 98, 0.25); + background: rgba(255, 255, 255, 0.55); + color: rgba(15, 23, 42, 0.8); + font-size: 10px; + user-select: none; +} + +.suggestion-toggle input { + width: 12px; + height: 12px; +} + +.suggestion-card-actions { + display: flex; + justify-content: flex-end; + margin-top: 10px; +} + +.suggestion-select-btn { + border: 1px solid rgba(197, 22, 98, 0.25); + background: rgba(255, 255, 255, 0.55); + color: rgba(15, 23, 42, 0.85); + border-radius: 999px; + padding: 3px 8px; + font-size: 10.5px; + font-weight: 600; +} + +.suggestion-select-btn:hover { + background: rgba(255, 255, 255, 0.75); +} + +.suggestion-badge { + padding: 1px 6px; + font-size: 9px; + font-weight: 600; + border-radius: 999px; + background: rgba(255, 255, 255, 0.5); + color: #5b21b6; +} + +/* 换一批按钮容器 */ +.suggestions-refresh-container { + display: flex; + justify-content: flex-end; + margin-top: 12px; + padding-right: 4px; +} + +/* 角色选择器样式 */ +.character-grid { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(140px, 1fr)); + gap: 12px; + padding: 8px 0; +} + +.character-card { + background: rgba(255, 255, 255, 0.6); + border-radius: 14px; + padding: 12px; + cursor: pointer; + border: 1px solid rgba(255, 255, 255, 0.4); + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.05); + transition: transform 0.2s ease, box-shadow 0.2s ease; + text-align: center; +} + +.character-card:hover { + transform: translateY(-4px); + box-shadow: 0 8px 24px rgba(0, 0, 0, 0.1); +} + +.character-avatar { + width: 48px; + height: 48px; + border-radius: 50%; + margin: 0 auto 8px; + display: flex; + align-items: center; + justify-content: center; + color: white; + font-size: 18px; + font-weight: bold; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); +} + +.character-info h3 { + font-size: 14px; + font-weight: 600; + margin: 0 0 4px 0; + color: #1f1d2b; +} + +.character-relationship { + font-size: 11px; + color: #6b7280; + margin: 0 0 8px 0; +} + +.character-stats { + display: flex; + justify-content: center; + align-items: center; + gap: 4px; + font-size: 11px; +} + +.affinity-label { + color: #6b7280; +} + +.affinity-value { + color: #c51662; + font-weight: 600; +} + +.back-button { + background: rgba(197, 22, 98, 0.1); + color: #c51662; + border: 1px solid rgba(197, 22, 98, 0.3); + padding: 8px 12px; + border-radius: 8px; + cursor: pointer; + font-size: 12px; + transition: background 0.2s ease; + align-self: flex-start; +} + +.back-button:hover { + background: rgba(197, 22, 98, 0.2); +} + +.new-conversation-btn { + background: linear-gradient(135deg, #c51662, #8e24aa); + color: white; + border: none; + padding: 12px 16px; + border-radius: 12px; + cursor: pointer; + font-size: 13px; + font-weight: 600; + display: flex; + align-items: center; + justify-content: center; + gap: 8px; + box-shadow: 0 4px 12px rgba(197, 22, 98, 0.3); + transition: transform 0.2s ease, box-shadow 0.2s ease; +} + +.new-conversation-btn:hover { + transform: translateY(-2px); + box-shadow: 0 6px 20px rgba(197, 22, 98, 0.4); +} + +.new-conversation-icon { + font-size: 16px; + font-weight: bold; +} + +.no-conversations { + text-align: center; + padding: 24px; + color: #6b7280; +} + +.no-conversations-text { + margin: 0 0 8px 0; + font-size: 13px; +} + +.no-conversations-hint { + margin: 0; + font-size: 11px; + color: #9ca3af; +} + +.conversation-list { + display: flex; + flex-direction: column; + gap: 8px; +} + +.conversation-item { + background: rgba(255, 255, 255, 0.6); + border-radius: 12px; + padding: 12px; + cursor: pointer; + border: 1px solid rgba(255, 255, 255, 0.4); + box-shadow: 0 2px 6px rgba(0, 0, 0, 0.05); + transition: transform 0.2s ease, box-shadow 0.2s ease; + display: flex; + justify-content: space-between; + align-items: center; +} + +.conversation-item:hover { + transform: translateY(-2px); + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); +} + +.conversation-info h4 { + margin: 0 0 4px 0; + font-size: 13px; + font-weight: 600; + color: #1f1d2b; + cursor: pointer; + user-select: none; + transition: color 0.2s ease; +} + +.conversation-info h4:hover { + color: #c51662; +} + +.conversation-meta { + margin: 0; + font-size: 11px; + color: #6b7280; +} + +.conversation-arrow { + color: #c51662; + font-weight: bold; + font-size: 14px; +} + +/* Listening hover badge */ +.listening-badge { + position: relative; + display: inline-flex; + align-items: center; + gap: 6px; + margin-left: 8px; + padding: 4px 8px; + border-radius: 10px; + background: rgba(197, 22, 98, 0.08); + color: #c51662; + font-weight: 700; + font-size: 12px; + cursor: default; + -webkit-app-region: no-drag; +} + +.listening-label { + letter-spacing: 0.4px; +} + +.listening-popover { + position: absolute; + left: 0; + top: 100%; + margin-top: 6px; + padding: 10px; + min-width: 180px; + background: rgba(255, 255, 255, 0.96); + border: 1px solid rgba(197, 22, 98, 0.15); + border-radius: 10px; + box-shadow: 0 10px 30px rgba(0, 0, 0, 0.15); + opacity: 0; + pointer-events: none; + transform: translateY(-4px); + transition: all 0.15s ease; + z-index: 9; +} + +.listening-badge:hover .listening-popover { + opacity: 1; + pointer-events: auto; + transform: translateY(0); +} + +.listening-row { + display: flex; + align-items: center; + gap: 6px; + margin-bottom: 6px; +} + +.listening-row:last-child { + margin-bottom: 0; +} + +.listening-tag { + font-size: 11px; + font-weight: 700; + color: #c51662; + width: 32px; +} + +.listening-bar { + flex: 1; + height: 8px; + background: rgba(15, 23, 42, 0.08); + border-radius: 4px; + overflow: hidden; +} + +.listening-bar-fill { + height: 100%; + transition: width 0.12s ease; +} + +.listening-bar-fill.mic { + background: linear-gradient(to right, #3b82f6, #2563eb); +} + +.listening-bar-fill.sys { + background: linear-gradient(to right, #a855f7, #9333ea); +} + +.listening-value { + font-size: 11px; + color: #6b7280; + width: 36px; + text-align: right; +} diff --git a/desktop/src/renderer/hud.responsive.css b/desktop/src/renderer/hud.responsive.css new file mode 100644 index 0000000..05ece86 --- /dev/null +++ b/desktop/src/renderer/hud.responsive.css @@ -0,0 +1,159 @@ + +.conversation-edit-form { + display: flex; + flex-direction: column; + gap: 8px; + width: 100%; +} + +.conversation-title-input { + background: rgba(255, 255, 255, 0.9); + border: 1px solid rgba(197, 22, 98, 0.3); + border-radius: 8px; + padding: 8px 12px; + font-size: 13px; + font-weight: 600; + color: #1f1d2b; + outline: none; + transition: border-color 0.2s ease; + width: 100%; + box-sizing: border-box; +} + +.conversation-title-input:focus { + border-color: #c51662; + box-shadow: 0 0 0 3px rgba(197, 22, 98, 0.1); +} + +.conversation-edit-actions { + display: flex; + gap: 8px; + justify-content: flex-end; +} + +.edit-btn { + background: rgba(255, 255, 255, 0.9); + border: 1px solid rgba(197, 22, 98, 0.3); + border-radius: 6px; + padding: 4px 12px; + font-size: 14px; + cursor: pointer; + transition: all 0.2s ease; + color: #1f1d2b; +} + +.edit-btn.save-btn { + background: #c51662; + color: white; + border-color: #c51662; +} + +.edit-btn.save-btn:hover { + background: #a0124f; + border-color: #a0124f; +} + +.edit-btn.cancel-btn:hover { + background: rgba(0, 0, 0, 0.05); + border-color: rgba(197, 22, 98, 0.5); +} + +/* ======================================== */ +/* 响应式设计 +/* ======================================== */ + +/* 音量指示器样式 */ +.volume-indicators { + display: flex; + flex-direction: column; + gap: 8px; + margin-top: 8px; + padding: 8px; + background: rgba(15, 23, 42, 0.03); + border-radius: 8px; +} + +.volume-item { + display: flex; + align-items: center; + gap: 8px; + font-size: 11px; +} + +.volume-label { + width: 32px; + color: rgba(15, 23, 42, 0.7); + font-weight: 600; +} + +.volume-bar-container { + flex: 1; + height: 6px; + background: rgba(15, 23, 42, 0.1); + border-radius: 3px; + overflow: hidden; +} + +.volume-bar { + height: 100%; + border-radius: 3px; + transition: width 0.1s ease; +} + +.volume-bar-mic { + background: linear-gradient(to right, #3b82f6, #2563eb); +} + +.volume-bar-system { + background: linear-gradient(to right, #a855f7, #9333ea); +} + +.volume-value { + width: 32px; + text-align: right; + color: rgba(15, 23, 42, 0.6); + font-size: 10px; +} + +/* 系统音频未授权提示 */ +.volume-warning { + margin-left: 4px; + cursor: help; +} + +.system-audio-hint { + font-size: 10px; + color: #f59e0b; + background: rgba(245, 158, 11, 0.1); + padding: 6px 10px; + border-radius: 6px; + border: 1px solid rgba(245, 158, 11, 0.2); + margin-top: 4px; +} + +@media (max-width: 768px) { + .hud-container { + height: 100%; + /* max-height removed to fill window */ + } + + .hud-header { + padding: 12px 16px; + } + + .hud-title { + font-size: 14px; + } + + .transcript-area { + height: 120px; + } + + .suggestions-grid { + grid-template-columns: 1fr; + } + + .asr-speakers { + grid-template-columns: 1fr; + } +} \ No newline at end of file diff --git a/desktop/src/renderer/main.jsx b/desktop/src/renderer/main.jsx index 54b4a0e..b0d7c32 100644 --- a/desktop/src/renderer/main.jsx +++ b/desktop/src/renderer/main.jsx @@ -1,14 +1,14 @@ import React from 'react'; import ReactDOM from 'react-dom/client'; -import { BrowserRouter } from 'react-router-dom'; +import { HashRouter } from 'react-router-dom'; import App from './App'; import './index.css'; ReactDOM.createRoot(document.getElementById('root')).render( - + - + ); diff --git a/desktop/src/renderer/pages/ASRConfigForm.jsx b/desktop/src/renderer/pages/ASRConfigForm.jsx new file mode 100644 index 0000000..ba18b1e --- /dev/null +++ b/desktop/src/renderer/pages/ASRConfigForm.jsx @@ -0,0 +1,139 @@ +import { languageOptions, formatBytes } from './asrSettingsUtils'; + +export function ASRConfigForm({ + formData, + setFormData, + modelPresets, + selectedModelPreset, + onCreate, + onCancel, +}) { + return ( +
+

添加 ASR 配置

+ +
+
+ + +

+ {selectedModelPreset + ? `${selectedModelPreset.description} · 推荐: ${selectedModelPreset.recommendedSpec}` + : '选择模型后可查看详细说明'} +

+
+ +
+ + +
+ +
+ + setFormData({ ...formData, sentence_pause_threshold: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500" + /> +

检测到停顿超过此时间(秒)时进行分句

+
+ +
+ setFormData({ ...formData, enable_vad: e.target.checked })} + className="h-4 w-4 text-blue-600 focus:ring-blue-500 border-gray-300 rounded" + /> + +
+ +
+ setFormData({ ...formData, retain_audio_files: e.target.checked })} + className="h-4 w-4 text-blue-600 focus:ring-blue-500 border-gray-300 rounded" + /> + +
+ + {formData.retain_audio_files && ( +
+ + setFormData({ ...formData, audio_retention_days: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500" + /> +
+ )} + + {formData.retain_audio_files && ( +
+ + setFormData({ ...formData, audio_storage_path: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500" + /> +

留空使用默认路径,或指定自定义路径

+
+ )} +
+ +
+ + +
+
+ ); +} + + diff --git a/desktop/src/renderer/pages/ASRModelCard.jsx b/desktop/src/renderer/pages/ASRModelCard.jsx new file mode 100644 index 0000000..655cd7c --- /dev/null +++ b/desktop/src/renderer/pages/ASRModelCard.jsx @@ -0,0 +1,149 @@ +import { formatBytes, formatSpeed, calculateProgress, isPresetActive, engineNames } from './asrSettingsUtils'; + +// 过滤 ANSI 转义序列 +function stripAnsi(str) { + if (!str) return str; + // eslint-disable-next-line no-control-regex + return str.replace(/\x1b\[[0-9;]*[a-zA-Z]|\[A/g, '').trim(); +} + +export function ASRModelCard({ + preset, + status = {}, + activeModelId, + savingModelId, + modelsLoading, + onSetActive, + onDownload, + onCancelDownload, +}) { + const totalBytes = status.totalBytes || status.sizeBytes || preset.sizeBytes || 0; + const downloadedBytes = status.downloadedBytes || 0; + const percent = calculateProgress(downloadedBytes, totalBytes); + const isDownloaded = Boolean(status.isDownloaded); + const activeDownload = Boolean(status.activeDownload); + const isActive = isPresetActive(preset, activeModelId); + const updatedAt = status.updatedAt ? new Date(status.updatedAt).toLocaleString() : null; + const progressVisible = (totalBytes > 0 && (activeDownload || (downloadedBytes > 0 && !isDownloaded))) || (activeDownload && status.progressMessage); + const engine = preset.engine || 'funasr'; + const canResume = !isDownloaded && downloadedBytes > 0 && !activeDownload; + const progressMessage = stripAnsi(status.progressMessage); + // 是否有进度条(有字节数据) + const hasProgressBar = totalBytes > 0 && percent > 0; + + return ( +
+
+
+
+

{preset.label}

+ + {engineNames[engine] || engine} + +
+

{preset.description}

+ {preset.language && ( +

+ 语言: {preset.language === 'zh' ? '中文' : preset.language === 'multilingual' ? '多语言' : preset.language} +

+ )} +
+ {isActive && ( + + 当前使用 + + )} +
+ +
+

推荐配置:{preset.recommendedSpec}

+

速度参考:{preset.speedHint}

+

模型大小:{formatBytes(preset.sizeBytes)}

+
+ +
+ {isDownloaded || preset.sizeBytes === 0 ? ( +
+ check_circle + {preset.sizeBytes === 0 ? '无需下载 (云端)' : `本地可用${updatedAt ? ` · 更新于 ${updatedAt}` : ''}`} +
+ ) : status.lastError ? ( +
+ error + 上次下载失败:{status.lastError} +
+ ) : ( +
+ {activeDownload ? '正在下载模型...' : '尚未下载,点击下方按钮开始下载'} +
+ )} +
+ + {progressVisible && ( +
+ {totalBytes > 0 && percent > 0 ? ( + <> +
+
+
+
+ + {formatBytes(downloadedBytes)} / {formatBytes(totalBytes)} ({percent}%) + + 速度:{formatSpeed(status.bytesPerSecond)} +
+ + ) : ( +
+
+ {progressMessage || '正在准备下载...'} +
+ )} +
+ )} + +
+ {isDownloaded || preset.sizeBytes === 0 ? ( + + ) : ( + + )} + + {activeDownload && ( + + )} +
+
+ ); +} + diff --git a/desktop/src/renderer/pages/ASRSettings.jsx b/desktop/src/renderer/pages/ASRSettings.jsx index fa2220a..ab4af8c 100644 --- a/desktop/src/renderer/pages/ASRSettings.jsx +++ b/desktop/src/renderer/pages/ASRSettings.jsx @@ -1,53 +1,11 @@ import { useState, useEffect } from 'react'; import { Link } from 'react-router-dom'; - -const SIZE_UNITS = ['B', 'KB', 'MB', 'GB', 'TB']; - -function formatBytes(bytes) { - if (!bytes || bytes <= 0) { - return '0 B'; - } - const exponent = Math.min( - Math.floor(Math.log(bytes) / Math.log(1024)), - SIZE_UNITS.length - 1 - ); - const value = bytes / (1024 ** exponent); - return `${value.toFixed(value >= 10 || exponent === 0 ? 0 : 1)} ${SIZE_UNITS[exponent]}`; -} - -function formatSpeed(bytesPerSecond) { - if (!bytesPerSecond || bytesPerSecond <= 0) { - return '—'; - } - return `${formatBytes(bytesPerSecond)}/s`; -} - -function buildStatusMap(statusList = []) { - return statusList.reduce((acc, status) => { - if (!status?.modelId) { - return acc; - } - acc[status.modelId] = { - bytesPerSecond: 0, - ...status, - }; - return acc; - }, {}); -} - -function calculateProgress(downloadedBytes, totalBytes) { - if (!totalBytes || totalBytes <= 0) { - return 0; - } - return Math.min(100, Math.round((downloadedBytes / totalBytes) * 100)); -} - -function isPresetActive(preset, activeModelId) { - if (!activeModelId) { - return false; - } - return activeModelId === preset.id || activeModelId === preset.repoId; -} +import { ASRModelCard } from './ASRModelCard'; +import { ASRConfigForm } from './ASRConfigForm'; +import { + buildStatusMap, + engineNames, +} from './asrSettingsUtils'; /** * ASR(语音识别)设置页面 @@ -59,6 +17,9 @@ function ASRSettings() { const [loading, setLoading] = useState(true); const [showAddConfig, setShowAddConfig] = useState(false); const [editingConfig, setEditingConfig] = useState(null); + const [testingASR, setTestingASR] = useState(false); + const [testResult, setTestResult] = useState(null); + const [testError, setTestError] = useState(''); // ASR 模型(支持多引擎) const [modelPresets, setModelPresets] = useState([]); @@ -69,9 +30,16 @@ function ASRSettings() { const [savingModelId, setSavingModelId] = useState(null); const [downloadSource, setDownloadSource] = useState('huggingface'); + // 模型缓存目录(HF / ModelScope) + const [cacheInfo, setCacheInfo] = useState(null); + const [cacheLoading, setCacheLoading] = useState(true); + const [cacheSaving, setCacheSaving] = useState(false); + const [cacheError, setCacheError] = useState(''); + const [cacheNotice, setCacheNotice] = useState(''); + // 按引擎分组模型 const modelsByEngine = modelPresets.reduce((acc, preset) => { - const engine = preset.engine || 'faster-whisper'; + const engine = preset.engine || 'funasr'; if (!acc[engine]) { acc[engine] = []; } @@ -79,33 +47,22 @@ function ASRSettings() { return acc; }, {}); - const engineNames = { - 'funasr': 'FunASR', - 'faster-whisper': 'Faster-Whisper' - }; - // 表单数据 const [formData, setFormData] = useState({ - model_name: 'medium', + model_name: 'siliconflow-cloud', language: 'zh', enable_vad: true, - sentence_pause_threshold: 1.0, + // 云端默认更灵敏;FunASR 实际会在主进程侧做下限保护,不会被该默认值影响 + sentence_pause_threshold: 0.6, retain_audio_files: false, audio_retention_days: 30, audio_storage_path: '' }); - // 语言选项 - const languageOptions = [ - { value: 'zh', label: '中文' }, - { value: 'en', label: '英文' }, - { value: 'ja', label: '日文' }, - { value: 'auto', label: '自动检测' } - ]; - useEffect(() => { loadASRConfigs(); loadModelData(); + loadCacheInfo(); const api = window.electronAPI; if (!api) { @@ -125,6 +82,7 @@ function ASRSettings() { modelId: payload.modelId, activeDownload: true, bytesPerSecond: 0, + lastError: null, // 清除上一次错误 }, }; }); @@ -145,6 +103,25 @@ function ASRSettings() { bytesPerSecond: payload.bytesPerSecond ?? previous.bytesPerSecond ?? 0, activeDownload: true, isDownloaded: false, + // 如果 progress 事件里带了 message,也更新 + progressMessage: payload.message || previous.progressMessage, + }, + }; + }); + })); + } + + if (api.onAsrModelDownloadLog) { + cleanups.push(api.onAsrModelDownloadLog((payload) => { + setModelStatuses((prev) => { + const previous = prev[payload.modelId] || { modelId: payload.modelId }; + return { + ...prev, + [payload.modelId]: { + ...previous, + modelId: payload.modelId, + progressMessage: payload.message, + activeDownload: true, }, }; }); @@ -160,6 +137,7 @@ function ASRSettings() { ...(status.modelId ? status : { ...status, modelId: payload.modelId }), bytesPerSecond: 0, activeDownload: false, + lastError: null, }, })); })); @@ -167,6 +145,9 @@ function ASRSettings() { if (api.onAsrModelDownloadError) { cleanups.push(api.onAsrModelDownloadError((payload) => { + const reason = + payload?.message || + (payload?.code ? `进程退出码 ${payload.code}${payload?.signal ? `, 信号 ${payload.signal}` : ''}` : '未知错误'); setModelStatuses((prev) => { const previous = prev[payload.modelId] || { modelId: payload.modelId }; return { @@ -175,9 +156,11 @@ function ASRSettings() { ...previous, modelId: payload.modelId, activeDownload: false, + lastError: reason, }, }; }); + alert(`下载模型失败:${reason}`); })); } @@ -206,6 +189,77 @@ function ASRSettings() { }; }, []); + const loadCacheInfo = async () => { + const api = window.electronAPI; + if (!api?.appGetModelCachePaths) { + setCacheLoading(false); + return; + } + setCacheLoading(true); + setCacheError(''); + try { + const res = await api.appGetModelCachePaths(); + if (!res?.ok) { + throw new Error(res?.message || '获取缓存目录失败'); + } + setCacheInfo(res); + } catch (error) { + setCacheError(error?.message || String(error)); + } finally { + setCacheLoading(false); + } + }; + + const handlePickCacheDir = async () => { + const api = window.electronAPI; + if (!api?.appSelectDirectory || !api?.appSetAsrCacheBase) { + setCacheError('当前版本不支持通过 GUI 配置缓存目录'); + return; + } + setCacheNotice(''); + setCacheError(''); + try { + const selected = await api.appSelectDirectory({ title: '选择模型缓存目录(HF / ModelScope)' }); + if (selected?.canceled || !selected?.path) { + return; + } + setCacheSaving(true); + const res = await api.appSetAsrCacheBase(selected.path); + if (!res?.ok) { + throw new Error(res?.message || '保存缓存目录失败'); + } + setCacheNotice('已保存:ASR 将自动重载以应用新缓存目录(可能需要 10-30 秒)。'); + await loadCacheInfo(); + } catch (error) { + setCacheError(error?.message || String(error)); + } finally { + setCacheSaving(false); + } + }; + + const handleResetCacheDir = async () => { + const api = window.electronAPI; + if (!api?.appSetAsrCacheBase) { + setCacheError('当前版本不支持通过 GUI 配置缓存目录'); + return; + } + setCacheNotice(''); + setCacheError(''); + try { + setCacheSaving(true); + const res = await api.appSetAsrCacheBase(null); + if (!res?.ok) { + throw new Error(res?.message || '重置缓存目录失败'); + } + setCacheNotice('已重置为默认目录:ASR 将自动重载以应用变更。'); + await loadCacheInfo(); + } catch (error) { + setCacheError(error?.message || String(error)); + } finally { + setCacheSaving(false); + } + }; + const loadModelData = async () => { try { setModelsError(''); @@ -229,6 +283,18 @@ function ASRSettings() { }; const handleDownloadModel = async (modelId) => { + // 先标记前端状态,按钮/文案立刻反馈,便于“继续下载”场景 + setModelStatuses((prev) => ({ + ...prev, + [modelId]: { + ...(prev[modelId] || { modelId }), + modelId, + activeDownload: true, + lastError: null, + bytesPerSecond: 0, + }, + })); + try { const api = window.electronAPI; if (!api?.asrDownloadModel) { @@ -237,6 +303,15 @@ function ASRSettings() { await api.asrDownloadModel(modelId, downloadSource); } catch (err) { console.error('下载模型失败:', err); + setModelStatuses((prev) => ({ + ...prev, + [modelId]: { + ...(prev[modelId] || { modelId }), + modelId, + activeDownload: false, + lastError: err.message || '未知错误', + }, + })); alert('下载模型失败:' + (err.message || '未知错误')); } }; @@ -405,10 +480,10 @@ function ASRSettings() { // 重置表单 const resetForm = () => { setFormData({ - model_name: modelPresets[0]?.id || 'medium', + model_name: modelPresets[0]?.id || 'siliconflow-cloud', language: 'zh', enable_vad: true, - sentence_pause_threshold: 1.0, + sentence_pause_threshold: 0.6, retain_audio_files: false, audio_retention_days: 30, audio_storage_path: '' @@ -417,128 +492,101 @@ function ASRSettings() { // 测试 ASR 功能 const testASR = async () => { - alert('ASR 测试功能:系统将使用当前默认配置进行语音识别测试。\n\n请确保:\n1. 已选择正确的音频输入设备\n2. 麦克风权限已授权\n3. 环境相对安静'); - }; - - const selectedModelPreset = modelPresets.find((preset) => preset.id === formData.model_name); - - const renderModelCard = (preset) => { - const status = modelStatuses[preset.id] || {}; - const totalBytes = status.totalBytes || status.sizeBytes || preset.sizeBytes || 0; - const downloadedBytes = status.downloadedBytes || 0; - const percent = calculateProgress(downloadedBytes, totalBytes); - const isDownloaded = Boolean(status.isDownloaded); - const activeDownload = Boolean(status.activeDownload); - const isActive = isPresetActive(preset, activeModelId); - const updatedAt = status.updatedAt ? new Date(status.updatedAt).toLocaleString() : null; - const progressVisible = totalBytes > 0 && (activeDownload || (downloadedBytes > 0 && !isDownloaded)); - const engine = preset.engine || 'faster-whisper'; - - return ( -
-
-
-
-

{preset.label}

- - {engineNames[engine] || engine} - -
-

{preset.description}

- {preset.language && ( -

- 语言: {preset.language === 'zh' ? '中文' : preset.language === 'multilingual' ? '多语言' : preset.language} -

- )} -
- {isActive && ( - - 当前使用 - - )} -
+ if (testingASR) return; + setTestingASR(true); + setTestResult(null); + setTestError(''); + + let captureService = null; + let sentenceListener = null; + let testConversationId = null; + const cleanupListener = () => { + if (sentenceListener) { + window.electronAPI?.removeListener?.('asr-sentence-complete', sentenceListener); + sentenceListener = null; + } + }; -
-

推荐配置:{preset.recommendedSpec}

-

速度参考:{preset.speedHint}

-

模型大小:{formatBytes(preset.sizeBytes)}

-
+ try { + const api = window.electronAPI; + if (!api) throw new Error('electronAPI 不可用'); + + // 确保测试角色存在 + try { + await api.createCharacter({ + id: 'asr-test-character', + name: 'ASR 测试角色', + nickname: '测试', + affinity: 50, + created_at: Date.now(), + updated_at: Date.now() + }); + } catch { + // 角色可能已存在,忽略错误 + } -
- {isDownloaded ? ( -
- check_circle - - 本地可用{updatedAt ? ` · 更新于 ${updatedAt}` : ''} - -
- ) : ( -
- {activeDownload ? '正在下载模型...' : '尚未下载,点击下方按钮开始下载'} -
- )} -
+ // 创建一个临时对话,便于把识别结果保存/回显 + const conversation = await api.dbCreateConversation({ + id: 'asr-settings-test', + character_id: 'asr-test-character', + title: 'ASR 测试', + date: Date.now(), + affinity_change: 0, + summary: 'ASR 设置页测试会话', + tags: null, + created_at: Date.now(), + updated_at: Date.now() + }); + testConversationId = conversation?.id || 'asr-settings-test'; - {progressVisible && ( -
-
-
-
-
- - {formatBytes(downloadedBytes)} / {formatBytes(totalBytes)} ({percent}%) - - 速度:{formatSpeed(status.bytesPerSecond)} -
-
- )} + // 1) 检查模型就绪 + const ready = await api.asrCheckReady(); + if (!ready?.ready) { + throw new Error(ready?.message || 'ASR 模型未就绪,请先下载并设为默认'); + } -
- {isDownloaded ? ( - - ) : ( - - )} + // 2) 启动 ASR(使用测试会话 ID) + await api.asrStart(testConversationId); - {activeDownload && ( - - )} -
-
- ); + // 3) 监听识别结果(拿到一句就停) + sentenceListener = (payload) => { + const finalText = payload?.text || payload?.content; + if (!finalText) return; + setTestResult(finalText); + if (captureService) { + captureService.stopCapture('speaker1').catch(() => {}); + } + api.asrStop().catch(() => {}); + setTestingASR(false); + cleanupListener(); + }; + api.on('asr-sentence-complete', sentenceListener); + + // 4) 启动麦克风采集,录 6 秒 + // audio-capture-service 默认导出的是单例实例,而非类 + const { default: audioCaptureService } = await import('../../asr/audio-capture-service'); + captureService = audioCaptureService; + + await captureService.startMicrophoneCapture('speaker1'); + // 超时保护:6 秒后自动停止 + setTimeout(() => { + if (captureService) { + captureService.stopCapture('speaker1').catch(() => {}); + } + api.asrStop().catch(() => {}); + setTestingASR(false); + cleanupListener(); + }, 6000); + } catch (err) { + console.error('ASR 测试失败:', err); + setTestError(err.message || '未知错误'); + cleanupListener(); + setTestingASR(false); + } }; + const selectedModelPreset = modelPresets.find((preset) => preset.id === formData.model_name); + if (loading) { return (
@@ -568,6 +616,85 @@ function ASRSettings() {
+ {/* 模型缓存目录 */} +
+
+
+
+

模型缓存目录

+

+ 管理 FunASR / HuggingFace / ModelScope 的模型下载位置,方便跨平台统一与迁移。 +

+
+
+ + + +
+
+ + {cacheNotice && ( +
+ {cacheNotice} +
+ )} + {cacheError && ( +
+ {cacheError} +
+ )} + + {cacheLoading ? ( +
正在读取缓存目录…
+ ) : cacheInfo?.computed ? ( +
+
+
ASR 缓存根目录(ASR_CACHE_BASE)
+
{cacheInfo.computed.asrCacheBase}
+
+
+
HuggingFace 缓存(HF_HOME / hub)
+
{cacheInfo.computed.hfHome}
+
{cacheInfo.computed.asrCacheDir}
+
+
+
ModelScope 缓存(MODELSCOPE_CACHE / hub)
+
{cacheInfo.computed.modelscopeCacheBase}
+
{cacheInfo.computed.modelscopeCacheHub}
+
+ {cacheInfo?.persistedAsrCacheBase && ( +
+
已保存的自定义目录
+
{cacheInfo.persistedAsrCacheBase}
+
+ )} +
+ ) : ( +
+ 当前未能读取缓存目录信息(可能是旧版本主进程未实现对应 IPC)。 +
+ )} +
+
+ {/* 模型管理 */}
@@ -637,7 +764,19 @@ function ASRSettings() {
- {presets.map((preset) => renderModelCard(preset))} + {presets.map((preset) => ( + + ))}
))} @@ -647,26 +786,69 @@ function ASRSettings() { {/* 默认配置信息 */} {asrDefaultConfig && ( -
-
-
- - - -
-
-

- 当前默认配置: {asrDefaultConfig.model_name} -

-
-

语言: {asrDefaultConfig.language === 'zh' ? '中文' : asrDefaultConfig.language}

-

VAD: {asrDefaultConfig.enable_vad ? '已启用' : '已禁用'}

- {asrDefaultConfig.retain_audio_files && ( -

录音保留: {asrDefaultConfig.audio_retention_days} 天

- )} +
+
+
+
+ + + +
+
+

+ 当前默认配置: {asrDefaultConfig.model_name} +

+
+

语言: {asrDefaultConfig.language === 'zh' ? '中文' : asrDefaultConfig.language}

+

VAD: {asrDefaultConfig.enable_vad ? '已启用' : '已禁用'}

+ {asrDefaultConfig.retain_audio_files && ( +

录音保留: {asrDefaultConfig.audio_retention_days} 天

+ )} +
+ + {/* 本地模型警告信息 */} + {asrDefaultConfig.model_name && !asrDefaultConfig.model_name.includes('cloud') && ( +
+
+
+ warning +
+
+

+ 正在使用本地模型 +

+
+

• 本地模型需要下载较大的模型文件(约 1-3GB),且需要占用较多的系统资源(CPU/内存)。

+

• 优势:响应速度更快(低延迟),数据完全本地处理,隐私性更好。

+

• 如果您的设备性能较弱,推荐切换回 SiliconFlow Cloud 远程模式。

+
+
+
+
+ )} + + {/* 远程模型提示信息 */} + {asrDefaultConfig.model_name && asrDefaultConfig.model_name.includes('cloud') && ( +
+
+
+ cloud_done +
+
+

+ 正在使用远程云端模型 (推荐) +

+
+

• 无需下载模型文件,不占用本地算力。

+

• 依赖网络连接,可能会有轻微的网络延迟。

+
+
+
+
+ )}
)} @@ -738,165 +920,27 @@ function ASRSettings() { {/* 添加配置表单 */} {showAddConfig && ( -
-

添加 ASR 配置

- -
- {/* 模型选择 */} -
- - -

- {selectedModelPreset - ? `${selectedModelPreset.description} · 推荐: ${selectedModelPreset.recommendedSpec}` - : '选择模型后可查看详细说明'} -

-
- - {/* 语言选择 */} -
- - -
- - {/* 停顿阈值 */} -
- - setFormData({ ...formData, sentence_pause_threshold: e.target.value })} - className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500" - /> -

- 检测到停顿超过此时间(秒)时进行分句 -

-
- - {/* VAD 开关 */} -
- setFormData({ ...formData, enable_vad: e.target.checked })} - className="h-4 w-4 text-blue-600 focus:ring-blue-500 border-gray-300 rounded" - /> - -
- - {/* 录音文件保留 */} -
- setFormData({ ...formData, retain_audio_files: e.target.checked })} - className="h-4 w-4 text-blue-600 focus:ring-blue-500 border-gray-300 rounded" - /> - -
- - {/* 保留天数 */} - {formData.retain_audio_files && ( -
- - setFormData({ ...formData, audio_retention_days: e.target.value })} - className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500" - /> -
- )} - - {/* 存储路径 */} - {formData.retain_audio_files && ( -
- - setFormData({ ...formData, audio_storage_path: e.target.value })} - className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500" - /> -

- 留空使用默认路径,或指定自定义路径 -

-
- )} -
- -
- - -
-
+ { + setShowAddConfig(false); + resetForm(); + }} + /> )} {/* 操作按钮 */}