@@ -173,11 +182,10 @@ function ConversationEditor() {
selectConversation(conv.id)}
- className={`p-3 rounded-lg cursor-pointer transition-colors ${
- selectedConversation === conv.id
- ? 'bg-surface-light dark:bg-surface-dark'
- : 'hover:bg-surface-light dark:hover:bg-surface-dark'
- }`}
+ className={`p-3 rounded-lg cursor-pointer transition-colors ${selectedConversation === conv.id
+ ? 'bg-surface-light dark:bg-surface-dark'
+ : 'hover:bg-surface-light dark:hover:bg-surface-dark'
+ }`}
>
@@ -215,10 +223,15 @@ function ConversationEditor() {
+ {/* Resizer Handle */}
+
-
-
+
+
与 {characterName} 的对话
@@ -241,11 +254,12 @@ function ConversationEditor() {
)}
-
-
-
-
-
+
+ {/* 复盘区域 - 未复盘时只显示按钮,复盘后显示完整报告 */}
+
{selectedConversation ? (
@@ -262,11 +276,10 @@ function ConversationEditor() {
/>
)}
-
-
);
}
export default ConversationEditor;
-
-function AIAnalysisPanel({ aiData, loading }) {
- if (loading) {
- return (
-
- );
- }
-
- const report = aiData?.analysisReport;
- if (!report) {
- return (
-
- AI 分析报告数据暂未生成
-
- );
- }
-
- const sections = [
- { label: '表述能力', data: report.expressionAbility },
- { label: '话题选择', data: report.topicSelection }
- ];
-
- return (
-
-
-
AI分析报告
- 洞察更新于最新对话
-
-
- {sections.map((section) => (
-
-
- {section.label}
- {section.data?.score ?? '--'} 分
-
-
- {section.data?.description || '暂无描述'}
-
-
- ))}
-
-
- );
-}
-
-function KeyMomentsSection({ moments }) {
- if (!moments || moments.length === 0) {
- return null;
- }
-
- const renderEvaluation = (evaluation) => {
- if (!evaluation) return '';
- if (typeof evaluation === 'string') return evaluation;
- return evaluation.content || evaluation.description || '';
- };
-
- return (
-
-
- schedule
- 关键时刻回放
-
-
- {moments.map((moment) => (
-
-
- {moment.sender === 'user' ? '我' : '对方'}
- {moment.timestamp ? new Date(moment.timestamp).toLocaleTimeString('zh-CN') : ''}
-
-
{moment.messageContent || '(无内容)'}
-
- {renderEvaluation(moment.evaluation) || 'AI暂无评估'}
-
-
- ))}
-
-
- );
-}
-
-function AttitudeAnalysisSection({ attitude }) {
- if (!attitude) return null;
-
- const affinityText = attitude.affinityChange >= 0 ? `+${attitude.affinityChange}` : attitude.affinityChange;
-
- return (
-
-
-
- psychology
- 本轮对话表现态度分析
-
-
- 趋势:{attitude.trend}
-
-
-
{attitude.description}
-
- 好感度变化:{affinityText} 点
-
-
- );
-}
-
-function ActionSuggestionsSection({ suggestions }) {
- if (!suggestions || suggestions.length === 0) return null;
-
- return (
-
-
- lightbulb
- 行动建议
-
-
- {suggestions.map((suggestion) => (
-
-
- {suggestion.title}
- {suggestion.affinity_prediction !== null && (
-
- 预估好感度:
- {suggestion.affinity_prediction > 0
- ? `+${suggestion.affinity_prediction}`
- : suggestion.affinity_prediction}
-
- )}
-
-
{suggestion.content}
- {suggestion.tags?.length > 0 && (
-
- {suggestion.tags.map((tag) => (
-
- {tag}
-
- ))}
-
- )}
-
- ))}
-
-
- );
-}
-
-function ConversationInsights({ aiData, tags }) {
- const report = aiData?.analysisReport;
- const suggestionTags = aiData?.actionSuggestions?.flatMap((item) => item.tags || []) || [];
- const uniqueSuggestionTags = Array.from(new Set(suggestionTags));
- const insights = [
- report?.expressionAbility?.description,
- report?.topicSelection?.description,
- aiData?.attitudeAnalysis?.description
- ].filter(Boolean);
-
- return (
-
- {insights.length > 0 ? (
-
-
复盘分析
-
- {insights.map((insight, idx) => (
-
- {insight}
-
- ))}
-
-
- ) : (
-
- AI 分析数据暂未生成
-
- )}
-
- {tags.length > 0 && (
-
-
对话分类
-
- {tags.map((tag) => (
-
- {tag}
-
- ))}
-
-
- )}
-
- {uniqueSuggestionTags.length > 0 && (
-
-
AI 建议分类
-
- {uniqueSuggestionTags.map((tag) => (
-
- ))}
-
-
- )}
-
- );
-}
-
diff --git a/desktop/src/renderer/pages/HUD/SessionSelector.jsx b/desktop/src/renderer/pages/HUD/SessionSelector.jsx
new file mode 100644
index 0000000..d6e8dc6
--- /dev/null
+++ b/desktop/src/renderer/pages/HUD/SessionSelector.jsx
@@ -0,0 +1,296 @@
+/**
+ * 会话选择器组件
+ */
+
+import React, { useState, useEffect } from 'react';
+
+export const SessionSelector = ({ onSessionSelected, onClose }) => {
+ const [characters, setCharacters] = useState([]);
+ const [selectedCharacter, setSelectedCharacter] = useState(null);
+ const [conversations, setConversations] = useState([]);
+ const [loading, setLoading] = useState(true);
+ const [characterLoading, setCharacterLoading] = useState(false);
+ const [editingConversationId, setEditingConversationId] = useState(null);
+ const [editingTitle, setEditingTitle] = useState('');
+
+ useEffect(() => {
+ loadCharacters();
+ }, []);
+
+ const loadCharacters = async () => {
+ try {
+ setLoading(true);
+ const api = window.electronAPI;
+ if (!api || !api.getAllCharacters) {
+ throw new Error('数据库API不可用');
+ }
+ const chars = await api.getAllCharacters();
+ setCharacters(chars || []);
+ } catch (err) {
+ console.error('加载角色失败:', err);
+ } finally {
+ setLoading(false);
+ }
+ };
+
+ const handleCharacterSelect = async (character) => {
+ setSelectedCharacter(character);
+ setCharacterLoading(true);
+ try {
+ const api = window.electronAPI;
+ const convs = await api.getConversationsByCharacter(character.id);
+ setConversations(convs || []);
+ } catch (err) {
+ console.error('加载会话失败:', err);
+ setConversations([]);
+ } finally {
+ setCharacterLoading(false);
+ }
+ };
+
+ const handleContinueConversation = (conversation) => {
+ onSessionSelected({
+ characterId: selectedCharacter.id,
+ conversationId: conversation.id,
+ conversationName: conversation.title || conversation.name || '未命名对话',
+ characterName: selectedCharacter.name,
+ isNew: false
+ });
+ };
+
+ const handleStartEdit = (conversation, e) => {
+ e.stopPropagation();
+ setEditingConversationId(conversation.id);
+ setEditingTitle(conversation.title || conversation.name || '');
+ };
+
+ const handleSaveEdit = async (conversationId, e) => {
+ e.stopPropagation();
+ try {
+ const api = window.electronAPI;
+ if (!api || !api.updateConversation) {
+ throw new Error('数据库API不可用');
+ }
+
+ const updated = await api.updateConversation(conversationId, {
+ title: editingTitle.trim() || '未命名对话'
+ });
+
+ if (updated) {
+ // 更新本地状态
+ setConversations(prev => prev.map(conv =>
+ conv.id === conversationId
+ ? { ...conv, title: updated.title }
+ : conv
+ ));
+ }
+
+ setEditingConversationId(null);
+ setEditingTitle('');
+ } catch (err) {
+ console.error('保存会话标题失败:', err);
+ alert('保存失败,请重试');
+ }
+ };
+
+ const handleCancelEdit = (e) => {
+ e.stopPropagation();
+ setEditingConversationId(null);
+ setEditingTitle('');
+ };
+
+ const handleCreateNewConversation = async () => {
+ try {
+ const timestamp = new Date().toLocaleString('zh-CN');
+ const conversationName = `与 ${selectedCharacter.name} 的新对话 - ${timestamp}`;
+
+ onSessionSelected({
+ characterId: selectedCharacter.id,
+ conversationId: null,
+ conversationName: conversationName,
+ characterName: selectedCharacter.name,
+ isNew: true
+ });
+ } catch (err) {
+ console.error('创建新会话失败:', err);
+ }
+ };
+
+ const getAvatarGradient = (color) => {
+ if (color?.includes('ff6b6b')) return 'bg-gradient-to-br from-[#ff6b6b] to-[#ff8e8e]';
+ if (color?.includes('4ecdc4')) return 'bg-gradient-to-br from-[#4ecdc4] to-[#6ee5dd]';
+ if (color?.includes('ffe66d')) return 'bg-gradient-to-br from-[#ffe66d] to-[#fff099]';
+ return 'bg-gradient-to-br from-primary to-[#8e24aa]';
+ };
+
+ if (loading) {
+ return (
+
+ );
+ }
+
+ if (!selectedCharacter) {
+ return (
+
+
+
+
选择角色开始对话
+ {characters.length === 0 ? (
+
+ ) : (
+
+ {characters.map((character) => {
+ const firstLetter = character.name.charAt(0);
+ const avatarGradient = getAvatarGradient(character.avatar_color);
+ return (
+
handleCharacterSelect(character)}
+ >
+
+ {firstLetter}
+
+
+
{character.name}
+
{character.relationship_label}
+
+ 好感度
+ {character.affinity}%
+
+
+
+ );
+ })}
+
+ )}
+
+
+ );
+ }
+
+ return (
+
+
+
+
+ {selectedCharacter.name}
+
+
+
+
+
+
+
选择会话
+
+
+
+ {characterLoading ? (
+
+ ) : conversations.length === 0 ? (
+
+
还没有对话记录
+
点击上方按钮创建新对话
+
+ ) : (
+
+ {conversations.map((conversation) => (
+
{
+ if (editingConversationId !== conversation.id) {
+ handleContinueConversation(conversation);
+ }
+ }}
+ >
+
+ {editingConversationId === conversation.id ? (
+
+
setEditingTitle(e.target.value)}
+ onClick={(e) => e.stopPropagation()}
+ onKeyDown={(e) => {
+ if (e.key === 'Enter') {
+ handleSaveEdit(conversation.id, e);
+ } else if (e.key === 'Escape') {
+ handleCancelEdit(e);
+ }
+ }}
+ autoFocus
+ />
+
+
+
+
+
+ ) : (
+ <>
+
{
+ e.stopPropagation();
+ handleStartEdit(conversation, e);
+ }}
+ title="双击编辑标题"
+ >
+ {conversation.title || conversation.name || '未命名对话'}
+
+
+ {new Date(conversation.created_at).toLocaleDateString('zh-CN')}
+ {conversation.message_count > 0 && ` • ${conversation.message_count} 条消息`}
+
+ >
+ )}
+
+ {editingConversationId !== conversation.id && (
+
→
+ )}
+
+ ))}
+
+ )}
+
+
+ );
+};
\ No newline at end of file
diff --git a/desktop/src/renderer/pages/HUD/index.jsx b/desktop/src/renderer/pages/HUD/index.jsx
new file mode 100644
index 0000000..9f8c4be
--- /dev/null
+++ b/desktop/src/renderer/pages/HUD/index.jsx
@@ -0,0 +1,340 @@
+import React, { useEffect } from 'react';
+import ReactDOM from 'react-dom/client';
+import '../../hud.css';
+import audioCaptureService from '../../asr/audio-capture-service.js';
+
+// Hooks
+import { useChatSession } from '../../hooks/useChatSession.js';
+import { useMessages } from '../../hooks/useMessages.js';
+import { useSuggestions } from '../../hooks/useSuggestions.js';
+
+// Components
+import { SessionSelector } from './SessionSelector.jsx';
+import { TranscriptView } from '../../components/Chat/TranscriptView.jsx';
+import { SuggestionsPanel } from '../../components/Chat/SuggestionsPanel.jsx';
+
+const getPointerCoords = (event) => {
+ const x = event.screenX !== undefined && event.screenX !== null ? event.screenX : event.clientX;
+ const y = event.screenY !== undefined && event.screenY !== null ? event.screenY : event.clientY;
+ return { x, y };
+};
+
+function Hud() {
+ // 使用自定义Hooks
+ const chatSession = useChatSession();
+ const messages = useMessages(chatSession.sessionInfo?.conversationId);
+ const suggestions = useSuggestions(chatSession.sessionInfo);
+
+ // 音量检测相关状态
+ const [micVolumeLevel, setMicVolumeLevel] = React.useState(0);
+ const [systemVolumeLevel, setSystemVolumeLevel] = React.useState(0);
+ const [hasSystemAudio, setHasSystemAudio] = React.useState(false);
+ const [systemAudioNotAuthorized, setSystemAudioNotAuthorized] = React.useState(false);
+ const [isListening, setIsListening] = React.useState(false);
+
+ // 临时禁用streaming功能
+ const streamingDisabled = true;
+
+ // 确保有可用对话ID(缺失时自动创建并回写)
+ const ensureConversationId = async () => {
+ let conversationId = chatSession.sessionInfo?.conversationId;
+ if (conversationId) return conversationId;
+
+ const api = window.electronAPI;
+ const characterId = chatSession.sessionInfo?.characterId;
+ if (!api?.dbCreateConversation || !characterId) return null;
+
+ try {
+ const newConv = await api.dbCreateConversation({
+ character_id: characterId,
+ title: chatSession.sessionInfo?.conversationName || '新对话'
+ });
+ conversationId = newConv?.id;
+ if (conversationId) {
+ await chatSession.handleSessionSelected({
+ ...chatSession.sessionInfo,
+ conversationId,
+ conversationName: newConv?.title || chatSession.sessionInfo?.conversationName,
+ isNew: false
+ });
+ }
+ return conversationId;
+ } catch (err) {
+ console.error('[HUD] 创建对话失败:', err);
+ return null;
+ }
+ };
+
+ // 对齐设置页:自动兜底音源配置
+ const prepareAudioSources = async () => {
+ const api = window.electronAPI;
+ if (!api?.asrGetAudioSources) {
+ throw new Error('ASR 音频源接口不可用');
+ }
+
+ let audioSources = await api.asrGetAudioSources();
+ let speaker1 = audioSources.find(s => s.id === 'speaker1');
+ let speaker2 = audioSources.find(s => s.id === 'speaker2');
+
+ // 若未配置或缺失设备ID,枚举麦克风并写入
+ if (!speaker1 || !speaker1.device_id) {
+ const devices = await audioCaptureService.enumerateDevices();
+ if (!devices || devices.length === 0) {
+ throw new Error('未找到可用麦克风设备,请先在系统中确认设备连接');
+ }
+
+ const firstDevice = devices[0];
+ const payload = {
+ id: 'speaker1',
+ name: '用户(麦克风)',
+ device_id: firstDevice.deviceId,
+ device_name: firstDevice.label || firstDevice.deviceId,
+ is_active: 1
+ };
+
+ if (speaker1) {
+ await api.asrUpdateAudioSource('speaker1', payload);
+ } else if (api.asrCreateAudioSource) {
+ await api.asrCreateAudioSource(payload);
+ }
+ speaker1 = payload;
+ }
+
+ // 确保系统音频源有默认记录
+ if (!speaker2 && api.asrCreateAudioSource) {
+ try {
+ await api.asrCreateAudioSource({
+ id: 'speaker2',
+ name: '角色(系统音频)',
+ device_id: 'system-loopback',
+ device_name: '系统音频(屏幕捕获)',
+ is_active: 0
+ });
+ } catch (err) {
+ console.warn('[HUD] 创建系统音频源失败:', err);
+ }
+ }
+
+ // 使用最新状态
+ audioSources = await api.asrGetAudioSources();
+ speaker1 = audioSources.find(s => s.id === 'speaker1') || speaker1;
+ speaker2 = audioSources.find(s => s.id === 'speaker2') || speaker2;
+
+ const isSpeaker1Active = speaker1 && (speaker1.is_active === 1 || speaker1.is_active === true || speaker1.is_active === '1');
+ if (!isSpeaker1Active) {
+ throw new Error('麦克风配置未激活,请在设置中启用音频源');
+ }
+ if (!speaker1?.device_id) {
+ throw new Error('麦克风设备ID未配置,请在设置中配置音频源');
+ }
+
+ return { speaker1, speaker2 };
+ };
+
+ const toggleListening = async () => {
+ if (isListening) {
+ // 停止监听
+ try {
+ await audioCaptureService.stopAllCaptures();
+ const api = window.electronAPI;
+ if (api?.asrStop) {
+ await api.asrStop();
+ }
+ setIsListening(false);
+ setMicVolumeLevel(0);
+ setSystemVolumeLevel(0);
+ } catch (err) {
+ console.error('停止监听失败:', err);
+ }
+ return;
+ }
+
+ // 开始监听
+ try {
+ const api = window.electronAPI;
+ if (!api?.asrGetAudioSources || !api?.asrStart) {
+ console.error('ASR API not available');
+ return;
+ }
+
+ const conversationId = await ensureConversationId();
+ if (!conversationId) {
+ chatSession.setError('未找到有效的对话ID');
+ return;
+ }
+
+ const { speaker1, speaker2 } = await prepareAudioSources();
+
+ // 1. 通知主进程开始 ASR
+ await api.asrStart(conversationId);
+
+ // 2. 在渲染进程开始捕获音频
+ try {
+ console.log('[HUD] 开始启动音频捕获...');
+
+ // 启动 speaker1 (用户/麦克风)
+ await audioCaptureService.startMicrophoneCapture('speaker1', speaker1.device_id);
+
+ // 启动 speaker2 (角色/系统音频)
+ let systemAudioEnabled = false;
+ if (speaker2) {
+ const isSpeaker2Active = speaker2.is_active === 1 || speaker2.is_active === true || speaker2.is_active === '1';
+ if (isSpeaker2Active) {
+ try {
+ // 尝试启动系统音频捕获 (如果缓存不可用,会尝试获取新流,可能弹出选择器)
+ await audioCaptureService.startSystemAudioCapture('speaker2');
+ systemAudioEnabled = true;
+ setSystemAudioNotAuthorized(false);
+ } catch (speaker2Error) {
+ console.error('[HUD] ❌ speaker2 (系统音频) 启动失败:', speaker2Error);
+ setSystemAudioNotAuthorized(true);
+ }
+ }
+ }
+
+ setHasSystemAudio(systemAudioEnabled);
+ setIsListening(true);
+ chatSession.setError(''); // 清除之前的错误
+ } catch (captureError) {
+ console.error('[HUD] Failed to start audio capture:', captureError);
+ chatSession.setError(`音频捕获启动失败: ${captureError.message}`);
+ // 如果启动失败,尝试停止已启动的部分
+ await audioCaptureService.stopAllCaptures();
+ }
+ } catch (error) {
+ console.error('[HUD] Error starting ASR:', error);
+ chatSession.setError(`启动语音识别失败:${error.message}`);
+ }
+ };
+
+ // 监听音量更新事件
+ useEffect(() => {
+ const handleVolumeUpdate = ({ sourceId, volume }) => {
+ if (sourceId === 'speaker1') {
+ setMicVolumeLevel(volume);
+ } else if (sourceId === 'speaker2') {
+ setSystemVolumeLevel(volume);
+ }
+ };
+
+ audioCaptureService.on('volume-update', handleVolumeUpdate);
+
+ return () => {
+ audioCaptureService.off('volume-update', handleVolumeUpdate);
+ };
+ }, []);
+
+ // 监听来自消息系统的新消息事件
+ useEffect(() => {
+ const api = window.electronAPI;
+ if (!api?.on) return;
+
+ const handleNewMessage = (message) => {
+ suggestions.handleNewMessage(message);
+ };
+
+ api.on('asr-sentence-complete', handleNewMessage);
+
+ return () => {
+ api.removeListener('asr-sentence-complete', handleNewMessage);
+ };
+ }, [suggestions]);
+
+ if (chatSession.sessionInfo && chatSession.sessionInfo.conversationId) {
+ messages.loadMessages();
+ }
+
+ if (chatSession.sessionInfo && chatSession.sessionInfo.conversationId) {
+ useEffect(() => {
+ messages.loadMessages();
+ }, [chatSession.sessionInfo.conversationId]);
+ }
+
+ if (chatSession.showSelector !== false && !chatSession.sessionInfo) {
+ chatSession.showSelector = true;
+ }
+
+ if (chatSession.showSelector || !chatSession.sessionInfo) {
+ return
;
+ }
+
+ return (
+
+
+
+
+ {chatSession.sessionInfo?.conversationName || '最近互动'}
+
+
+
+
suggestions.updateSuggestionConfig({ enable_passive_suggestion: enabled ? 1 : 0 })}
+ sessionInfo={chatSession.sessionInfo}
+ />
+
+ );
+}
+
+const hudRoot = document.getElementById('hud-root');
+if (hudRoot) {
+ ReactDOM.createRoot(hudRoot).render(
+
+
+
+ );
+} else {
+ console.error('HUD root element not found');
+}
diff --git a/desktop/src/renderer/pages/Overview.jsx b/desktop/src/renderer/pages/Overview.jsx
index 153fa90..2594675 100644
--- a/desktop/src/renderer/pages/Overview.jsx
+++ b/desktop/src/renderer/pages/Overview.jsx
@@ -17,6 +17,15 @@ function Overview() {
setLoading(true);
console.log('Loading data...');
+ // 检查是否在 Electron 环境中
+ const isElectron = window.electronAPI !== undefined;
+
+ if (!isElectron) {
+ console.warn('⚠️ 当前不在 Electron 环境中,无法加载数据。请在 Electron 应用中打开此页面。');
+ setLoading(false);
+ return;
+ }
+
// 加载统计数据
if (window.electronAPI?.getStatistics) {
console.log('Calling getStatistics...');
@@ -141,6 +150,22 @@ function Overview() {
>
+ ) : window.electronAPI === undefined ? (
+
+
+
⚠️
+
+
+ 请在 Electron 应用中打开
+
+
+ 当前页面在普通浏览器中打开,无法访问 Electron API。
+
+ 请在 LiveGalGame Desktop 应用中查看数据。
+
+
+
+
) : (
加载失败
diff --git a/desktop/src/renderer/pages/Settings.jsx b/desktop/src/renderer/pages/Settings.jsx
index e5dad49..03c229b 100644
--- a/desktop/src/renderer/pages/Settings.jsx
+++ b/desktop/src/renderer/pages/Settings.jsx
@@ -1,682 +1,45 @@
-import { useState, useEffect, useRef, useCallback } from 'react';
+import { useEffect } from 'react';
import { Link } from 'react-router-dom';
-function Settings() {
- // LLM 配置
- const [llmConfigs, setLlmConfigs] = useState([]);
- const [defaultConfig, setDefaultConfig] = useState(null);
- const [loading, setLoading] = useState(true);
- const [showAddConfig, setShowAddConfig] = useState(false);
- const [newConfig, setNewConfig] = useState({
- name: '',
- provider: 'openai',
- apiKey: '',
- baseUrl: '',
- isDefault: false
- });
-
- // 音频设备设置
- const [audioDevices, setAudioDevices] = useState([]);
- const [selectedAudioDevice, setSelectedAudioDevice] = useState('');
- const [captureSystemAudio, setCaptureSystemAudio] = useState(false);
- const [selectedSystemAudioDevice, setSelectedSystemAudioDevice] = useState('');
- const [isListening, setIsListening] = useState(false);
- const [audioStatus, setAudioStatus] = useState('');
- const [desktopCapturerError, setDesktopCapturerError] = useState(null);
- const [micVolumeLevel, setMicVolumeLevel] = useState(0);
- const [systemVolumeLevel, setSystemVolumeLevel] = useState(0);
- const [totalVolumeLevel, setTotalVolumeLevel] = useState(0);
- // 使用独立的 AudioContext 避免采样率冲突
- const micAudioContextRef = useRef(null);
- const systemAudioContextRef = useRef(null);
- const audioContextRef = useRef(null); // 保留用于兼容性
- const micAnalyserRef = useRef(null);
- const systemAnalyserRef = useRef(null);
- const totalAnalyserRef = useRef(null);
- const microphoneRef = useRef(null);
- const systemAudioRef = useRef(null);
- const systemAudioElementRef = useRef(null);
- const micDataArrayRef = useRef(null);
- const systemDataArrayRef = useRef(null);
- const totalDataArrayRef = useRef(null);
- const animationIdRef = useRef(null);
- const audioContextStateLogRef = useRef({ mic: null, system: null });
-
- // 音频源配置(从数据库加载)
- const [audioSources, setAudioSources] = useState([]);
- const [speaker1Source, setSpeaker1Source] = useState(null); // 用户(麦克风)
- const [speaker2Source, setSpeaker2Source] = useState(null); // 角色(系统音频)
-
- // ASR(语音识别)配置
- const [asrConfigs, setAsrConfigs] = useState([]);
- const [asrDefaultConfig, setAsrDefaultConfig] = useState(null);
- const [asrLoading, setAsrLoading] = useState(true);
- const [showAddAsrConfig, setShowAddAsrConfig] = useState(false);
- const logAudioContextDetails = useCallback((context, label) => {
- if (!context) {
- console.warn(`[AudioDebug] ${label} AudioContext 不存在或已销毁`);
- return;
- }
-
- const details = {
- state: context.state,
- sampleRate: context.sampleRate,
- baseLatency: context.baseLatency ?? 'n/a',
- outputLatency: context.outputLatency ?? 'n/a',
- currentTime: Number(context.currentTime.toFixed(3))
- };
-
- console.log(`[AudioDebug] ${label} AudioContext 详情:`, details);
- }, []);
-
- const attachAudioContextDebugHandlers = useCallback((context, label) => {
- if (!context) return;
-
- const handler = () => {
- const prevState = audioContextStateLogRef.current[label];
- if (prevState !== context.state) {
- console.log(`[AudioDebug] ${label} AudioContext 状态: ${context.state}`);
- audioContextStateLogRef.current[label] = context.state;
- }
-
- if (context.state === 'suspended') {
- console.warn(`[AudioDebug] ${label} AudioContext 已暂停,尝试恢复...`);
- } else if (context.state === 'closed') {
- console.warn(`[AudioDebug] ${label} AudioContext 已关闭`);
- }
- };
-
- context.onstatechange = handler;
- logAudioContextDetails(context, label);
- }, [logAudioContextDetails]);
-
- useEffect(() => {
- const handleWindowError = (event) => {
- if (event?.message?.includes('AudioContext')) {
- console.error('[AudioDebug] 捕获到全局 AudioContext 错误:', event.message, event.error);
- logAudioContextDetails(micAudioContextRef.current, '麦克风');
- logAudioContextDetails(systemAudioContextRef.current, '系统音频');
-
- setAudioStatus(prev => {
- const prefix = prev && !prev.includes('AudioContext 错误') ? `${prev} | ` : '';
- return `${prefix}AudioContext 错误: ${event.message}`;
- });
- }
- };
-
- window.addEventListener('error', handleWindowError);
- return () => window.removeEventListener('error', handleWindowError);
- }, [logAudioContextDetails]);
- const [newAsrConfig, setNewAsrConfig] = useState({
- model_name: 'whisper-base',
- language: 'zh',
- enable_vad: true,
- sentence_pause_threshold: 1.0,
- retain_audio_files: false,
- audio_retention_days: 30,
- audio_storage_path: ''
- });;
-
- // 保存音频源配置(使用 useCallback 避免无限循环,但不依赖 audioSources)
- const saveAudioSource = useCallback(async (sourceName, deviceId, deviceName, isActive = true) => {
- try {
- const api = window.electronAPI;
- if (!api?.asrCreateAudioSource || !api?.asrUpdateAudioSource) {
- console.warn('ASR API 不可用');
- return;
- }
-
- // 确定音频源的固定ID(关键:必须使用固定的ID才能与外键约束匹配)
- const sourceId = sourceName === '用户(麦克风)' ? 'speaker1' : 'speaker2';
-
- console.log('保存音频源配置:', { sourceId, sourceName, deviceId, deviceName, isActive });
-
- // 重新获取最新的音频源列表,避免使用过期的 audioSources
- const currentSources = await api.asrGetAudioSources();
-
- // 使用固定的ID查找是否已存在该音频源(而不是名称匹配)
- const existingSource = currentSources.find(s => s.id === sourceId);
-
- const updateData = {
- name: sourceName,
- device_id: deviceId,
- device_name: deviceName,
- is_active: isActive ? 1 : 0
- };
-
- if (existingSource) {
- // 更新现有配置
- console.log('更新现有音频源:', existingSource.id, updateData);
- const result = await api.asrUpdateAudioSource(existingSource.id, updateData);
- console.log('更新结果:', result);
- } else {
- // 创建新配置(必须指定固定的ID)
- const createData = {
- id: sourceId, // 关键:使用固定的ID
- ...updateData
- };
- console.log('创建新音频源:', createData);
- const result = await api.asrCreateAudioSource(createData);
- console.log('创建结果:', result);
- }
+// Hooks
+import { useLLMConfig } from '../hooks/useLLMConfig.js';
+import { useSuggestionConfig } from '../hooks/useSuggestionConfig.js';
+import { useAudioDevices } from '../hooks/useAudioDevices.js';
+import { useAudioCapture } from '../hooks/useAudioCapture.js';
- // 重新加载音频源配置
- await loadAudioSources();
+// Components
+import { LLMConfigList } from '../components/LLM/LLMConfigList.jsx';
+import { LLMConfigForm } from '../components/LLM/LLMConfigForm.jsx';
+import { SuggestionConfigForm } from '../components/Suggestions/SuggestionConfigForm.jsx';
+import { AudioDeviceSelector } from '../components/Audio/AudioDeviceSelector.jsx';
+import { AudioTester } from '../components/Audio/AudioTester.jsx';
- // 验证保存结果(使用ID查找)
- const updatedSources = await api.asrGetAudioSources();
- const savedSource = updatedSources.find(s => s.id === sourceId);
- console.log('保存后的音频源:', savedSource);
-
- if (savedSource) {
- console.log(`✓ 音频源配置已保存: ${sourceName} (ID: ${sourceId}), is_active=${savedSource.is_active}`);
- } else {
- console.warn(`⚠ 音频源配置保存后未找到: ${sourceName} (ID: ${sourceId})`);
- }
- } catch (error) {
- console.error('保存音频源配置失败:', error);
- alert('保存音频源配置失败:' + (error.message || '未知错误'));
- }
- }, []); // 移除 audioSources 依赖,改为在函数内部获取最新数据
-
- // 用于跟踪是否已经自动保存过,避免重复保存
- const autoSavedRef = useRef(false);
+function Settings() {
+ // 使用自定义Hooks
+ const llmHook = useLLMConfig();
+ const suggestionHook = useSuggestionConfig();
+ const audioDevicesHook = useAudioDevices();
+ const audioCaptureHook = useAudioCapture();
+ // 初始化
useEffect(() => {
- loadConfigs();
- // 先加载音频源配置,再加载设备列表(因为设备列表需要用到音频源配置)
- loadAudioSources().then(() => {
- loadAudioDevices();
- });
+ llmHook.loadConfigs();
+ llmHook.loadFeatureBindings();
+ suggestionHook.loadSuggestionSettings();
+ audioDevicesHook.initializeAudioDevices();
}, []);
// 当音频源配置加载完成后,更新设备选择并自动保存
useEffect(() => {
- const initializeAndSave = async () => {
- // 如果speaker1Source存在但device_id为null,自动选择第一个可用设备并保存
- if (speaker1Source && !speaker1Source.device_id && audioDevices.length > 0 && !autoSavedRef.current) {
- const firstDevice = audioDevices[0];
- console.log('自动选择并保存第一个麦克风设备:', firstDevice.deviceId);
- setSelectedAudioDevice(firstDevice.deviceId);
- // 标记为已保存,避免重复
- autoSavedRef.current = true;
- // 直接保存到数据库
- await saveAudioSource('用户(麦克风)', firstDevice.deviceId, firstDevice.label || firstDevice.deviceId, true);
- } else if (speaker1Source?.device_id && audioDevices.length > 0) {
- const device = audioDevices.find(d => d.deviceId === speaker1Source.device_id);
- if (device && selectedAudioDevice !== device.deviceId) {
- setSelectedAudioDevice(device.deviceId);
- }
- }
- if (speaker2Source?.device_id && audioDevices.length > 0 && captureSystemAudio) {
- const device = audioDevices.find(d => d.deviceId === speaker2Source.device_id);
- if (device && selectedSystemAudioDevice !== device.deviceId) {
- setSelectedSystemAudioDevice(device.deviceId);
- }
- }
- };
-
- initializeAndSave();
- }, [speaker1Source, speaker2Source, audioDevices, captureSystemAudio, saveAudioSource]);
-
- const loadAudioDevices = async () => {
- try {
- if (!navigator.mediaDevices?.enumerateDevices) {
- console.warn('浏览器不支持音频设备枚举');
- return;
- }
-
- const devices = await navigator.mediaDevices.enumerateDevices();
- const audioInputs = devices.filter(device => device.kind === 'audioinput');
- setAudioDevices(audioInputs);
-
- // 如果没有已保存的配置,选择第一个设备作为默认值
- if (!speaker1Source && audioInputs.length > 0 && !selectedAudioDevice) {
- setSelectedAudioDevice(audioInputs[0].deviceId);
- }
- } catch (error) {
- console.error('加载音频设备失败:', error);
- }
- };
-
-
- // 加载音频源配置
- const loadAudioSources = async () => {
- try {
- const api = window.electronAPI;
- if (!api?.asrGetAudioSources) {
- console.warn('ASR API 不可用');
- return;
- }
-
- const sources = await api.asrGetAudioSources();
- setAudioSources(sources || []);
-
- // 查找 Speaker 1(用户/麦克风)和 Speaker 2(角色/系统音频)
- // 使用固定的ID查找(而不是名称匹配),确保与外键约束一致
- const speaker1 = sources.find(s => s.id === 'speaker1');
- const speaker2 = sources.find(s => s.id === 'speaker2');
-
- setSpeaker1Source(speaker1 || null);
- setSpeaker2Source(speaker2 || null);
-
- // 如果找到了配置,更新UI状态
- if (speaker1) {
- setSelectedAudioDevice(speaker1.device_id || '');
- }
- if (speaker2) {
- // 根据 is_active 决定是否默认勾选系统音频捕获
- const isActive = speaker2.is_active === 1 || speaker2.is_active === true || speaker2.is_active === '1';
- setCaptureSystemAudio(isActive);
- setSelectedSystemAudioDevice(speaker2.device_id || '');
- }
- } catch (error) {
- console.error('加载音频源配置失败:', error);
- }
- };
-
- const startListening = async () => {
- try {
- // 停止之前的监听(如果有)并等待清理完成
- await stopListening();
-
- // 额外等待一小段时间确保浏览器音频子系统完全释放
- await new Promise(resolve => setTimeout(resolve, 200));
-
- setAudioStatus('正在检查权限...');
- setDesktopCapturerError(null);
-
- // macOS: 先检查并请求麦克风权限
- if (window.electronAPI?.checkMediaAccessStatus) {
- const micStatus = await window.electronAPI.checkMediaAccessStatus('microphone');
- console.log('[Settings] 麦克风权限状态:', micStatus);
-
- if (micStatus.status !== 'granted') {
- setAudioStatus('正在请求麦克风权限...');
- const result = await window.electronAPI.requestMediaAccess('microphone');
- console.log('[Settings] 麦克风权限请求结果:', result);
-
- if (!result.granted) {
- throw new Error(result.message || '麦克风权限被拒绝,请在系统设置中允许');
- }
- }
- }
-
- setAudioStatus('正在初始化音频...');
-
- let sourceCount = 0;
- let micStreamObtained = false;
-
- // 1. 捕获麦克风音频 - 使用独立的 AudioContext
- setAudioStatus('正在获取麦克风...');
- try {
- // 为麦克风创建独立的 AudioContext,强制使用 48kHz 采样率以减少冲突
- const audioContextOptions = { sampleRate: 48000, latencyHint: 'playback' };
- micAudioContextRef.current = new (window.AudioContext || window.webkitAudioContext)(audioContextOptions);
- attachAudioContextDebugHandlers(micAudioContextRef.current, 'mic');
-
- const micAnalyser = micAudioContextRef.current.createAnalyser();
- micAnalyser.fftSize = 256;
- micAnalyser.smoothingTimeConstant = 0.8;
- micAnalyserRef.current = micAnalyser;
- micDataArrayRef.current = new Uint8Array(micAnalyser.frequencyBinCount);
-
- const micConstraints = {
- audio: {
- deviceId: selectedAudioDevice ? { exact: selectedAudioDevice } : undefined,
- echoCancellation: true,
- noiseSuppression: true
- }
- };
-
- const micStream = await navigator.mediaDevices.getUserMedia(micConstraints);
- microphoneRef.current = micStream;
-
- const micSource = micAudioContextRef.current.createMediaStreamSource(micStream);
- micSource.connect(micAnalyser);
- sourceCount++;
- micStreamObtained = true;
- console.log('[Settings] ✅ 麦克风捕获成功');
- } catch (micError) {
- console.error('[Settings] ❌ 麦克风捕获失败:', micError);
- // 麦克风捕获失败时,如果也要捕获系统音频,继续执行;否则抛出错误
- if (!captureSystemAudio) {
- throw micError;
- }
- setAudioStatus(`⚠️ 麦克风捕获失败: ${micError.message},尝试捕获系统音频...`);
- }
-
- // 2. 如果启用了系统音频捕获,使用 electron-audio-loopback
- if (captureSystemAudio) {
- setAudioStatus('正在尝试捕获系统音频...');
- console.log('[Settings] 系统音频捕获: 使用 electron-audio-loopback...');
-
- // 为系统音频创建独立的 AudioContext
- await new Promise(resolve => setTimeout(resolve, 500));
-
- const sysAudioContextOptions = { sampleRate: 48000, latencyHint: 'playback' };
- systemAudioContextRef.current = new (window.AudioContext || window.webkitAudioContext)(sysAudioContextOptions);
-
- attachAudioContextDebugHandlers(systemAudioContextRef.current, 'system');
-
- const systemAnalyser = systemAudioContextRef.current.createAnalyser();
- systemAnalyser.fftSize = 256;
- systemAnalyser.smoothingTimeConstant = 0.8;
- systemAnalyserRef.current = systemAnalyser;
- systemDataArrayRef.current = new Uint8Array(systemAnalyser.frequencyBinCount);
-
- try {
- // 使用 electron-audio-loopback 方案
- // 1. 启用 loopback 音频
- if (window.electronAPI?.enableLoopbackAudio) {
- await window.electronAPI.enableLoopbackAudio();
- console.log('[Settings] Loopback audio enabled');
- }
-
- // 2. 使用 getDisplayMedia 获取系统音频
- setAudioStatus('正在获取系统音频...');
- const displayStream = await navigator.mediaDevices.getDisplayMedia({
- audio: true,
- video: true
- });
-
- // 3. 禁用 loopback 音频
- if (window.electronAPI?.disableLoopbackAudio) {
- await window.electronAPI.disableLoopbackAudio();
- console.log('[Settings] Loopback audio disabled');
- }
-
- // 4. 停止视频轨道
- const videoTracks = displayStream.getVideoTracks();
- videoTracks.forEach(track => {
- track.stop();
- displayStream.removeTrack(track);
- console.log(`[Settings] Video track stopped: ${track.label}`);
- });
-
- // 5. 检查音频轨道
- const audioTracks = displayStream.getAudioTracks();
- console.log(`[Settings] 系统音频流: ${audioTracks.length} 个音频轨道`);
-
- if (audioTracks.length > 0) {
- systemAudioRef.current = displayStream;
-
- const systemSource = systemAudioContextRef.current.createMediaStreamSource(displayStream);
- systemSource.connect(systemAnalyser);
- sourceCount++;
-
- if (systemAudioContextRef.current.state === 'suspended') {
- await systemAudioContextRef.current.resume();
- }
-
- console.log(`[Settings] ✅ 系统音频捕获已启动 (electron-audio-loopback)`);
- setAudioStatus('✅ 系统音频捕获成功');
- setDesktopCapturerError(null);
- } else {
- console.warn(`[Settings] ⚠️ 没有音频轨道`);
- displayStream.getTracks().forEach(track => track.stop());
- setDesktopCapturerError('没有音频轨道');
- }
- } catch (systemError) {
- console.error('[Settings] ❌ 系统音频捕获失败:', systemError);
-
- // 确保禁用 loopback
- if (window.electronAPI?.disableLoopbackAudio) {
- await window.electronAPI.disableLoopbackAudio().catch(() => { });
- }
-
- const errorMsg = systemError.message || '未知错误';
- if (micStreamObtained) {
- console.warn(`[Settings] 麦克风将继续工作,但无法捕获系统音频`);
- }
- setDesktopCapturerError(`捕获失败: ${errorMsg}`);
- }
- }
-
- // 检查是否至少有一个音频源成功捕获
- if (sourceCount === 0) {
- throw new Error('没有成功捕获任何音频源。请检查设备连接和权限设置。');
- }
-
- // 3. 总计音量将在 analyzeAudio 中通过软件方式计算(两个独立 AudioContext 的平均值)
- // 不再使用硬件合并,因为两个 AudioContext 无法直接连接
- totalDataArrayRef.current = new Uint8Array(128); // 用于存储计算后的总音量数据
-
- // 构建状态信息
- const capturedSources = [];
- if (micStreamObtained) capturedSources.push('麦克风');
- if (systemAudioRef.current) capturedSources.push('系统音频');
-
- const statusMsg = capturedSources.length > 0
- ? `正在监听 (${capturedSources.join(' + ')})...`
- : '监听中...';
-
- setAudioStatus(statusMsg);
- setIsListening(true);
-
- console.log(`[Settings] ✅ 音频监听已启动: ${capturedSources.join(', ') || '无'}`);
-
- analyzeAudio();
-
- } catch (error) {
- console.error('启动监听失败:', error);
- console.error('错误名称:', error.name);
- console.error('错误消息:', error.message);
- console.error('错误堆栈:', error.stack);
-
- // 针对常见错误提供更友好的提示
- let errorMsg = error.message;
- if (error.name === 'NotFoundError') {
- errorMsg = '未找到音频设备。请检查麦克风是否正确连接,或尝试选择其他设备。';
- } else if (error.name === 'NotAllowedError' || error.name === 'PermissionDeniedError') {
- errorMsg = '音频权限被拒绝。请在系统设置中允许此应用访问麦克风。';
- } else if (error.name === 'NotReadableError') {
- errorMsg = '无法读取音频设备。设备可能被其他应用占用。';
- }
-
- setAudioStatus(`启动失败: ${errorMsg}`);
- setIsListening(false);
-
- // 清理可能部分创建的资源
- await stopListening();
- }
- };
-
- const stopListening = async () => {
- if (animationIdRef.current) {
- cancelAnimationFrame(animationIdRef.current);
- animationIdRef.current = null;
- }
-
- if (microphoneRef.current) {
- microphoneRef.current.getTracks().forEach(track => track.stop());
- microphoneRef.current = null;
- }
-
- if (systemAudioRef.current) {
- systemAudioRef.current.getTracks().forEach(track => track.stop());
- systemAudioRef.current = null;
- }
-
- if (systemAudioElementRef.current) {
- systemAudioElementRef.current.pause();
- systemAudioElementRef.current.srcObject = null;
- systemAudioElementRef.current = null;
- }
-
- // 关闭麦克风的 AudioContext
- if (micAudioContextRef.current) {
- micAudioContextRef.current.onstatechange = null;
- try {
- if (micAudioContextRef.current.state !== 'closed') {
- await micAudioContextRef.current.close();
- }
- } catch (e) {
- console.warn('关闭麦克风 AudioContext 时出错:', e);
- }
- micAudioContextRef.current = null;
- }
-
- // 关闭系统音频的 AudioContext
- if (systemAudioContextRef.current) {
- systemAudioContextRef.current.onstatechange = null;
- try {
- if (systemAudioContextRef.current.state !== 'closed') {
- await systemAudioContextRef.current.close();
- }
- } catch (e) {
- console.warn('关闭系统音频 AudioContext 时出错:', e);
- }
- systemAudioContextRef.current = null;
- }
-
- // 兼容性:清理旧的 audioContextRef
- if (audioContextRef.current) {
- try {
- if (audioContextRef.current.state !== 'closed') {
- await audioContextRef.current.close();
- }
- } catch (e) {
- console.warn('关闭 AudioContext 时出错:', e);
- }
- audioContextRef.current = null;
- }
-
- // 清理分析器引用
- micAnalyserRef.current = null;
- systemAnalyserRef.current = null;
- totalAnalyserRef.current = null;
- audioContextStateLogRef.current = { mic: null, system: null };
-
- setIsListening(false);
- setAudioStatus('监听已停止');
- setMicVolumeLevel(0);
- setSystemVolumeLevel(0);
- setTotalVolumeLevel(0);
- };
-
- const analyzeAudio = () => {
- // 检查是否至少有一个 AudioContext 在运行
- const micContextActive = micAudioContextRef.current && micAudioContextRef.current.state !== 'closed';
- const systemContextActive = systemAudioContextRef.current && systemAudioContextRef.current.state !== 'closed';
-
- if (!micContextActive && !systemContextActive) {
- return;
- }
-
- let hasMic = false;
- let hasSystem = false;
- let micVolume = 0;
- let systemVolume = 0;
-
- // 分析麦克风音量
- if (micAnalyserRef.current && micDataArrayRef.current && micContextActive) {
- try {
- micAnalyserRef.current.getByteFrequencyData(micDataArrayRef.current);
- let micSum = 0;
- for (let i = 0; i < micDataArrayRef.current.length; i++) {
- micSum += micDataArrayRef.current[i];
- }
- const micAverage = micSum / micDataArrayRef.current.length;
- micVolume = Math.min(100, (micAverage / 255) * 100);
- setMicVolumeLevel(micVolume);
- hasMic = micVolume > 2;
- } catch (e) {
- console.warn('[Settings] 分析麦克风音量时出错:', e);
- }
- }
-
- // 分析系统音频音量
- if (systemAnalyserRef.current && systemDataArrayRef.current && systemContextActive) {
- try {
- systemAnalyserRef.current.getByteFrequencyData(systemDataArrayRef.current);
- let systemSum = 0;
- for (let i = 0; i < systemDataArrayRef.current.length; i++) {
- systemSum += systemDataArrayRef.current[i];
- }
- const systemAverage = systemSum / systemDataArrayRef.current.length;
- systemVolume = Math.min(100, (systemAverage / 255) * 100);
- setSystemVolumeLevel(systemVolume);
- hasSystem = systemVolume > 2;
- } catch (e) {
- console.warn('[Settings] 分析系统音频音量时出错:', e);
- }
- }
-
- // 计算总体音量(两个音源的最大值,而不是平均值,以便更好地显示活动)
- const totalVolume = Math.max(micVolume, systemVolume);
- setTotalVolumeLevel(totalVolume);
-
- // 更新状态文本
- let statusText = '正在监听';
- const activeSources = [];
- if (hasMic) activeSources.push('麦克风');
- if (hasSystem) activeSources.push('系统音频');
-
- if (activeSources.length > 0) {
- statusText += ` - ${activeSources.join(' + ')} 有输入`;
- } else {
- statusText += ' - 等待音频输入...';
- }
-
- setAudioStatus(statusText);
-
- animationIdRef.current = requestAnimationFrame(analyzeAudio);
- };
-
- const loadConfigs = async () => {
- try {
- setLoading(true);
- if (window.electronAPI?.getAllLLMConfigs) {
- const configs = await window.electronAPI.getAllLLMConfigs();
- setLlmConfigs(configs);
- }
- if (window.electronAPI?.getDefaultLLMConfig) {
- const defaultCfg = await window.electronAPI.getDefaultLLMConfig();
- setDefaultConfig(defaultCfg);
- }
- } catch (error) {
- console.error('Failed to load configs:', error);
- } finally {
- setLoading(false);
- }
- };
-
- const handleAddConfig = async () => {
- try {
- if (!newConfig.name || !newConfig.apiKey) {
- alert('请填写配置名称和API密钥');
- return;
- }
-
- if (window.electronAPI?.saveLLMConfig) {
- const configData = {
- name: newConfig.name,
- provider: newConfig.provider,
- api_key: newConfig.apiKey,
- base_url: newConfig.baseUrl || null,
- is_default: newConfig.isDefault
- };
-
- await window.electronAPI.saveLLMConfig(configData);
-
- // 重置表单
- setNewConfig({
- name: '',
- provider: 'openai',
- apiKey: '',
- baseUrl: '',
- isDefault: false
- });
- setShowAddConfig(false);
-
- // 重新加载配置列表
- loadConfigs();
- }
- } catch (error) {
- console.error('添加配置失败:', error);
- alert('添加配置失败,请重试');
- }
+ audioDevicesHook.handleAudioSourcesLoaded();
+ }, [audioDevicesHook.speaker1Source, audioDevicesHook.speaker2Source, audioDevicesHook.audioDevices]);
+
+ // 开始监听
+ const handleStartListening = async () => {
+ await audioCaptureHook.startListening({
+ selectedAudioDevice: audioDevicesHook.selectedAudioDevice,
+ captureSystemAudio: audioDevicesHook.captureSystemAudio
+ });
};
return (
@@ -706,175 +69,127 @@ function Settings() {
- {loading ? (
-
- ) : (
-
- {llmConfigs.length === 0 && !showAddConfig ? (
-
-
暂无LLM配置
-
-
- ) : (
- <>
- {!showAddConfig && (
-
-
-
- )}
- {llmConfigs.map((config) => (
-
-
-
-
- {config.name || '未命名配置'}
- {defaultConfig?.id === config.id && (
-
- 默认
-
- )}
-
-
- {config.provider || '未知提供商'}
-
-
-
- {defaultConfig?.id !== config.id && (
-
- )}
-
-
-
-
- ))}
-
- {showAddConfig && (
-
-
添加新配置
-
-
-
- setNewConfig({ ...newConfig, name: e.target.value })}
- className="w-full px-3 py-2 border border-border-light dark:border-border-dark rounded-lg bg-surface-light dark:bg-surface-dark text-text-light dark:text-text-dark focus:outline-none focus:ring-2 focus:ring-primary/50"
- placeholder="例如:OpenAI GPT-4"
- />
-
+
+ {!llmHook.showAddConfig && llmHook.llmConfigs.length > 0 && (
+
+
+
+ )}
+
+ {llmHook.llmConfigs.length === 0 && !llmHook.showAddConfig ? (
+
+
暂无LLM配置
+
+
+ ) : (
+ <>
+
-
-
- setNewConfig({ ...newConfig, apiKey: e.target.value })}
- className="w-full px-3 py-2 border border-border-light dark:border-border-dark rounded-lg bg-surface-light dark:bg-surface-dark text-text-light dark:text-text-dark focus:outline-none focus:ring-2 focus:ring-primary/50"
- placeholder="sk-..."
- />
-
+ {llmHook.showAddConfig && (
+
+ )}
+ >
+ )}
+
+ {/* 功能绑定到指定 LLM 配置 */}
+
+
+
按功能选择 LLM 配置
+
+ 不同功能可绑定不同的 LLM(未选择时沿用默认配置)。
+
+
-
-
- setNewConfig({ ...newConfig, baseUrl: e.target.value })}
- className="w-full px-3 py-2 border border-border-light dark:border-border-dark rounded-lg bg-surface-light dark:bg-surface-dark text-text-light dark:text-text-dark focus:outline-none focus:ring-2 focus:ring-primary/50"
- placeholder="https://api.openai.com/v1"
- />
-
+ {llmHook.featureBindingError ? (
+
+ {llmHook.featureBindingError}
+
+ ) : null}
+
+
+ {[
+ { key: 'suggestion', label: '对话建议' },
+ { key: 'review', label: '复盘报告' }
+ ].map((item) => (
+
+
{item.label}
+
+
+ ))}
+
+
+
+
-
- setNewConfig({ ...newConfig, isDefault: e.target.checked })}
- className="w-4 h-4 text-primary border-border-light dark:border-border-dark rounded focus:ring-primary"
- />
-
-
+ {/* 对话建议配置 */}
+
+
+
+ auto_awesome
+ 对话建议配置
+
+
+ 控制 LLM 生成选项的触发策略、上下文窗口以及使用的模型。
+
+
-
-
-
-
-
-
- )}
- >
- )}
+ {suggestionHook.suggestionLoading ? (
+
+ ) : (
+
)}
@@ -885,200 +200,27 @@ function Settings() {
音频输入设置
- {audioDevices.length === 0 ? (
-
-
- 未检测到音频输入设备
-
-
-
- ) : (
-
-
-
-
-
- 选择要使用的麦克风设备(用于识别用户说话)
-
- {speaker1Source && (
-
- check_circle
- 已保存配置
-
- )}
-
-
-
- {
- const checked = e.target.checked;
- setCaptureSystemAudio(checked);
-
- // 统一通过 saveAudioSource 确保存在 speaker2 配置:
- // - 如果之前没有 speaker2Source,会自动创建
- // - 如果已有,则仅更新 is_active
- try {
- const deviceId =
- (speaker2Source && speaker2Source.device_id) ||
- selectedSystemAudioDevice ||
- 'system-loopback';
- const deviceName =
- (speaker2Source && speaker2Source.device_name) ||
- '系统音频(屏幕捕获)';
-
- await saveAudioSource(
- '角色(系统音频)',
- deviceId,
- deviceName,
- checked
- );
- } catch (err) {
- console.error('更新系统音频源配置失败:', err);
- }
-
- if (!checked) {
- // 关闭系统音频时清理错误提示
- setDesktopCapturerError(null);
- }
- }}
- className="rounded border-border-light dark:border-border-dark text-primary focus:ring-primary"
- />
-
-
-
-
-
-
-
- 测试麦克风监听
-
-
-
- {!isListening ? (
-
- ) : (
-
- )}
-
-
- {isListening && (
-
-
- {audioStatus.includes('✅') || audioStatus.includes('成功') ? (
- check_circle
- ) : audioStatus.includes('⚠️') || audioStatus.includes('❌') || audioStatus.includes('失败') || audioStatus.includes('错误') ? (
- error
- ) : (
- mic
- )}
- {audioStatus.replace(/[✅⚠️❌]/g, '').trim()}
-
-
- {desktopCapturerError && (
-
-
- warning
- 原生屏幕音频捕获失败
-
-
{desktopCapturerError}
-
- 自动捕获系统音频失败。请检查系统权限或驱动。
-
-
- )}
-
-
-
-
麦克风
-
-
- {micVolumeLevel.toFixed(0)}%
-
-
-
- {captureSystemAudio && (
-
-
系统音频
-
-
- {systemVolumeLevel.toFixed(0)}%
-
-
- )}
-
-
-
总音量
-
-
- {totalVolumeLevel.toFixed(0)}%
-
-
-
-
- )}
-
-
-
- )}
+
+
+
{/* ASR设置 */}
@@ -1101,20 +243,9 @@ function Settings() {
- {/* 其他设置 */}
-
-
- tune
- 其他设置
-
-
- 更多设置选项即将推出
-
-
);
}
-export default Settings;
-
+export default Settings;
\ No newline at end of file
diff --git a/desktop/src/renderer/pages/StoryTreePage.jsx b/desktop/src/renderer/pages/StoryTreePage.jsx
new file mode 100644
index 0000000..8f638a4
--- /dev/null
+++ b/desktop/src/renderer/pages/StoryTreePage.jsx
@@ -0,0 +1,691 @@
+
+import React, { useEffect, useState, useRef, useMemo } from 'react';
+import { useParams, useNavigate } from 'react-router-dom';
+import { useConversationReview } from '../hooks/useConversationReview.js';
+
+export default function StoryTreePage() {
+ const { conversationId } = useParams();
+ const navigate = useNavigate();
+ const { review, isLoading } = useConversationReview(conversationId);
+ const [conversation, setConversation] = useState(null);
+
+ const [visibleIndex, setVisibleIndex] = useState(0);
+ const [hoveredIndex, setHoveredIndex] = useState(null);
+ const [isPlaying, setIsPlaying] = useState(false);
+ const [isAudioPlaying, setIsAudioPlaying] = useState(false);
+ const [audioSource, setAudioSource] = useState(null);
+ const audioRef = useRef(null);
+ const scrollContainerRef = useRef(null);
+
+ // Constants for tree layout
+ const STEP_X = 180;
+ const START_X = 120;
+ const BASE_Y = 170;
+ const OFFSET_Y = 80;
+
+ useEffect(() => {
+ window.electronAPI?.getConversationById(conversationId).then(setConversation);
+ }, [conversationId]);
+
+ // Calculate node positions based on choice_type
+ const nodes = useMemo(() => {
+ if (!review?.nodes) return [];
+
+ const res = [];
+ // Each review.node[i] is a decision made at Point i.
+ // The segment from Point i to Point i+1 is the result of that decision.
+
+ for (let i = 0; i <= review.nodes.length; i++) {
+ let y = BASE_Y;
+ if (i > 0) {
+ // Point i's Y position is determined by the choice made at Point i-1
+ const prevNode = review.nodes[i - 1];
+ if (prevNode.choice_type === 'matched') y -= OFFSET_Y;
+ if (prevNode.choice_type === 'custom') y += OFFSET_Y;
+ }
+
+ const decisionData = review.nodes[i];
+ res.push({
+ ...(decisionData || {}),
+ node_id: decisionData?.node_id || `point_${i}`,
+ x: START_X + i * STEP_X,
+ y: y,
+ is_terminal: i === review.nodes.length,
+ has_decision: !!decisionData
+ });
+ }
+ return res;
+ }, [review?.nodes]);
+
+ // Generate main path (Always full path for animation)
+ const mainPathD = useMemo(() => {
+ if (nodes.length === 0) return '';
+ let d = `M ${nodes[0].x} ${nodes[0].y}`;
+ for (let i = 1; i < nodes.length; i++) {
+ const curr = nodes[i];
+ const prev = nodes[i - 1];
+ const cp1x = prev.x + (curr.x - prev.x) * 0.5;
+ const cp2x = curr.x - (curr.x - prev.x) * 0.5;
+ d += ` C ${cp1x} ${prev.y}, ${cp2x} ${curr.y}, ${curr.x} ${curr.y}`;
+ }
+ return d;
+ }, [nodes]);
+
+ // Measure path length for animation
+ const pathRef = useRef(null);
+ const [pathLength, setPathLength] = useState(0);
+
+ useEffect(() => {
+ if (pathRef.current) {
+ setPathLength(pathRef.current.getTotalLength());
+ }
+ }, [mainPathD]);
+
+ // Auto-play effect
+ useEffect(() => {
+ let interval;
+ if (isPlaying && nodes.length > 0) {
+ interval = setInterval(() => {
+ setVisibleIndex(prev => {
+ const next = prev + 1;
+ if (next >= nodes.length) {
+ setIsPlaying(false);
+ return nodes.length - 1;
+ }
+ if (scrollContainerRef.current) {
+ const container = scrollContainerRef.current;
+ const targetX = nodes[next].x - container.offsetWidth / 2 + STEP_X / 2;
+ container.scrollTo({ left: targetX, behavior: 'smooth' });
+ }
+ return next;
+ });
+ }, 1200);
+ }
+ return () => clearInterval(interval);
+ }, [isPlaying, nodes]);
+
+ // Start auto-play after mount
+ useEffect(() => {
+ if (nodes.length > 0) {
+ const timer = setTimeout(() => setIsPlaying(true), 800);
+ return () => clearTimeout(timer);
+ }
+ }, [nodes.length]);
+
+ const handlePlayPause = () => {
+ if (visibleIndex >= nodes.length - 1) {
+ setVisibleIndex(0);
+ if (scrollContainerRef.current) {
+ scrollContainerRef.current.scrollTo({ left: 0, behavior: 'smooth' });
+ }
+ }
+ setIsPlaying(!isPlaying);
+ };
+
+ const handlePlayAudio = async (filePath) => {
+ if (!filePath) return;
+
+ try {
+ // If clicking the same audio that's already playing, toggle it
+ if (audioSource && isAudioPlaying) {
+ audioRef.current.pause();
+ setIsAudioPlaying(false);
+ return;
+ }
+
+ const dataUrl = await window.electronAPI.asrGetAudioDataUrl(filePath);
+ if (dataUrl) {
+ setAudioSource(dataUrl);
+ setIsAudioPlaying(true);
+ // The actual play will be triggered by useEffect on audioSource change
+ }
+ } catch (error) {
+ console.error('Failed to play audio:', error);
+ }
+ };
+
+ useEffect(() => {
+ if (audioSource && audioRef.current) {
+ audioRef.current.play().catch(err => {
+ console.error('Audio play error:', err);
+ setIsAudioPlaying(false);
+ });
+ }
+ }, [audioSource]);
+
+ const handleDeleteAudio = async (node) => {
+ if (!node.audio_file_path || !node.audio_record_id) return;
+
+ const confirmed = window.confirm('确定要删除这段录音吗?物理文件将被移除且无法恢复。');
+ if (!confirmed) return;
+
+ try {
+ const result = await window.electronAPI.asrDeleteAudioFile({
+ recordId: node.audio_record_id,
+ filePath: node.audio_file_path
+ });
+
+ if (result.success) {
+ // Update local state to hide the button
+ // We need to find the node in the review.nodes array and update it
+ // Since review is from a hook, we might not be able to update it directly easily
+ // But StoryTreePage will re-render if we can trigger a refresh or local state update
+ // For now, let's just update the local nodes useMemo dependency if possible or use a local override state
+
+ // Simple approach: show a toast and disable the button locally for this session
+ node.audio_file_path = null;
+ if (audioSource) {
+ audioRef.current.pause();
+ setIsAudioPlaying(false);
+ }
+ alert('录音已删除');
+ } else {
+ alert('删除失败: ' + result.error);
+ }
+ } catch (error) {
+ console.error('Failed to delete audio:', error);
+ alert('删除过程中发生错误');
+ }
+ };
+
+ if (isLoading || !review) {
+ return (
+
+ );
+ }
+
+ const { summary } = review;
+ const characterName = conversation?.character_name || '...';
+ const activeNode = hoveredIndex !== null ? nodes[hoveredIndex] : nodes[visibleIndex];
+
+ const firstTimestamp = nodes[0]?.timestamp || 0;
+
+ // Format timestamp to relative e.g. T+15s
+ // Format timestamp to proper HH:mm:ss
+ const formatTime = (ts) => {
+ if (!ts) return '';
+ try {
+ return new Date(ts).toLocaleTimeString('zh-CN', {
+ hour12: false,
+ hour: '2-digit',
+ minute: '2-digit',
+ second: '2-digit'
+ });
+ } catch (e) {
+ return '';
+ }
+ };
+
+ return (
+
+
+
+ {/* Header */}
+
+
+ {/* Bento Grid */}
+
+
+ {/* Stats Card */}
+
+
+ 复盘统计
+
+
+
+ 好感变化
+ 0 ? 'text-success' : summary.total_affinity_change < 0 ? 'text-error' : 'text-text-light dark:text-text-dark'}`}>
+ {summary.total_affinity_change > 0 ? '+' : ''}{summary.total_affinity_change || 0}
+
+
+
+
+
{summary.matched_count || 0}
+
命中建议
+
+
+
{summary.custom_count || 0}
+
自定义
+
+
+
+
+
+ {/* Dynamic Insight Panel */}
+
+ {activeNode ? (
+
+
+
+
+ {formatTime(activeNode.timestamp)}
+
+ {activeNode.has_source ? (activeNode.node_title || '关键决策') : (activeNode.node_title || '转折点分析')}
+
+ {/* Source Badge */}
+ {activeNode.has_source ? (
+
+ Suggestion
+
+ ) : (
+
+ Insight
+
+ )}
+
+
+ {activeNode.choice_type === 'matched' ? '命中建议' : '自定义回复'}
+
+
+
+
+
+
用户行为
+ {activeNode.audio_file_path && (
+
+
+
+
+ )}
+
+
{activeNode.user_description}
+
+
+
决策点评
+
{activeNode.reasoning}
+
+
+
+
+ {/* Show Alternative/Ghost Options */}
+ {/* Show Alternative/Ghost Options */}
+ {activeNode.has_source && (
+
+
+ {activeNode.ghost_options?.length > 0 ? "其他可能性 / 建议选项" : "当时仅有一条建议"}
+
+ {activeNode.ghost_options?.length > 0 ? (
+
+ {activeNode.ghost_options.map((opt, idx) => (
+
+
+ {opt.content}
+
+ ))}
+
+ ) : (
+
+ 无其他候选建议
+
+ )}
+
+ )}
+
+ ) : (
+
+ 点击或悬停节点查看详细分析
+
+ )}
+
+
+ {/* Plot Tree */}
+
+
+ {/* Tree Header */}
+
+
+
+ 互动剧情树 · {characterName}
+
+
+
+
+
+ 你的选择
+
+
+ 命中建议
+
+
+ 自定义
+
+
+ 错失机会
+
+
+
+
+ {/* Background Labels */}
+
+
+ 情感 / 共鸣
+
+
+ 理性 / 逻辑
+
+
+
+ {/* Scrollable SVG Container */}
+
+
+
+
+ {/* Removed Bottom Text Labels for Ghost Options as they are now inline */}
+
+ {/* Bottom Label Removed */}
+
+
+
+
+
+ {/* Summary Card */}
+ {summary.conversation_summary && (
+
+
+ 对话总结
+
+
+ {summary.conversation_summary || summary.chat_overview}
+
+
+ )}
+
+
+
+
+ {/* Custom CSS for animations */}
+
+
+ );
+}
diff --git a/desktop/src/renderer/pages/asrSettingsUtils.js b/desktop/src/renderer/pages/asrSettingsUtils.js
new file mode 100644
index 0000000..95f7754
--- /dev/null
+++ b/desktop/src/renderer/pages/asrSettingsUtils.js
@@ -0,0 +1,48 @@
+export const SIZE_UNITS = ['B', 'KB', 'MB', 'GB', 'TB'];
+
+export function formatBytes(bytes) {
+ if (!bytes || bytes <= 0) return '0 B';
+ const exponent = Math.min(Math.floor(Math.log(bytes) / Math.log(1024)), SIZE_UNITS.length - 1);
+ const value = bytes / (1024 ** exponent);
+ return `${value.toFixed(value >= 10 || exponent === 0 ? 0 : 1)} ${SIZE_UNITS[exponent]}`;
+}
+
+export function formatSpeed(bytesPerSecond) {
+ if (!bytesPerSecond || bytesPerSecond <= 0) return '—';
+ return `${formatBytes(bytesPerSecond)}/s`;
+}
+
+export function buildStatusMap(statusList = []) {
+ return statusList.reduce((acc, status) => {
+ if (!status?.modelId) return acc;
+ acc[status.modelId] = {
+ bytesPerSecond: 0,
+ ...status,
+ };
+ return acc;
+ }, {});
+}
+
+export function calculateProgress(downloadedBytes, totalBytes) {
+ if (!totalBytes || totalBytes <= 0) return 0;
+ return Math.min(100, Math.round((downloadedBytes / totalBytes) * 100));
+}
+
+export function isPresetActive(preset, activeModelId) {
+ if (!activeModelId) return false;
+ return activeModelId === preset.id || activeModelId === preset.repoId;
+}
+
+export const engineNames = {
+ funasr: 'FunASR',
+ siliconflow: 'SiliconFlow(云端)',
+};
+
+export const languageOptions = [
+ { value: 'zh', label: '中文' },
+ { value: 'en', label: '英文' },
+ { value: 'ja', label: '日文' },
+ { value: 'auto', label: '自动检测' },
+];
+
+
diff --git a/desktop/src/renderer/utils/audioUtils.js b/desktop/src/renderer/utils/audioUtils.js
new file mode 100644
index 0000000..5fedb19
--- /dev/null
+++ b/desktop/src/renderer/utils/audioUtils.js
@@ -0,0 +1,115 @@
+/**
+ * 音频处理工具函数
+ */
+
+let audioContextStateLogRef = { mic: null, system: null };
+
+/**
+ * 记录AudioContext的详细信息
+ * @param {AudioContext} context - AudioContext实例
+ * @param {string} label - 标签(mic/system)
+ */
+export const logAudioContextDetails = (context, label) => {
+ if (!context) {
+ console.warn(`[AudioDebug] ${label} AudioContext 不存在或已销毁`);
+ return;
+ }
+
+ const details = {
+ state: context.state,
+ sampleRate: context.sampleRate,
+ baseLatency: context.baseLatency ?? 'n/a',
+ outputLatency: context.outputLatency ?? 'n/a',
+ currentTime: Number(context.currentTime.toFixed(3))
+ };
+
+ console.log(`[AudioDebug] ${label} AudioContext 详情:`, details);
+};
+
+/**
+ * 为AudioContext附加调试处理器
+ * @param {AudioContext} context - AudioContext实例
+ * @param {string} label - 标签(mic/system)
+ */
+export const attachAudioContextDebugHandlers = (context, label) => {
+ if (!context) return;
+
+ const handler = () => {
+ const prevState = audioContextStateLogRef.current?.[label];
+ if (prevState !== context.state) {
+ console.log(`[AudioDebug] ${label} AudioContext 状态: ${context.state}`);
+ audioContextStateLogRef.current = audioContextStateLogRef.current || {};
+ audioContextStateLogRef.current[label] = context.state;
+ }
+
+ if (context.state === 'suspended') {
+ console.warn(`[AudioDebug] ${label} AudioContext 已暂停,尝试恢复...`);
+ } else if (context.state === 'closed') {
+ console.warn(`[AudioDebug] ${label} AudioContext 已关闭`);
+ }
+ };
+
+ context.onstatechange = handler;
+ logAudioContextDetails(context, label);
+};
+
+/**
+ * 创建全局错误处理器用于AudioContext错误
+ * @param {Function} logAudioContextDetails - 日志函数引用
+ * @returns {Function} 清理函数
+ */
+export const createAudioErrorHandler = (logAudioContextDetails, micRef, sysRef) => {
+ const handleWindowError = (event) => {
+ if (event?.message?.includes('AudioContext')) {
+ console.error('[AudioDebug] 捕获到全局 AudioContext 错误:', event.message, event.error);
+
+ if (typeof logAudioContextDetails === 'function') {
+ logAudioContextDetails(micRef?.current, '麦克风');
+ logAudioContextDetails(sysRef?.current, '系统音频');
+ }
+ }
+ };
+
+ window.addEventListener('error', handleWindowError);
+ return () => window.removeEventListener('error', handleWindowError);
+};
+
+/**
+ * 分析音频音量
+ * @param {AnalyserNode} analyser - 分析器节点
+ * @param {Uint8Array} dataArray - 数据数组
+ * @returns {number} 音量百分比 (0-100)
+ */
+export const analyzeAudioVolume = (analyser, dataArray) => {
+ if (!analyser || !dataArray) return 0;
+
+ try {
+ analyser.getByteFrequencyData(dataArray);
+ let sum = 0;
+ for (let i = 0; i < dataArray.length; i++) {
+ sum += dataArray[i];
+ }
+ const average = sum / dataArray.length;
+ return Math.min(100, (average / 255) * 100);
+ } catch (e) {
+ console.warn('[AudioDebug] 分析音频音量时出错:', e);
+ return 0;
+ }
+};
+
+/**
+ * 关闭AudioContext
+ * @param {AudioContext} context - AudioContext实例
+ */
+export const closeAudioContext = async (context) => {
+ if (!context) return;
+
+ context.onstatechange = null;
+ try {
+ if (context.state !== 'closed') {
+ await context.close();
+ }
+ } catch (e) {
+ console.warn('关闭 AudioContext 时出错:', e);
+ }
+};
\ No newline at end of file
diff --git a/desktop/src/renderer/utils/validation.js b/desktop/src/renderer/utils/validation.js
new file mode 100644
index 0000000..3ba9933
--- /dev/null
+++ b/desktop/src/renderer/utils/validation.js
@@ -0,0 +1,67 @@
+/**
+ * 数值验证工具函数
+ */
+
+/**
+ * 将值强制转换为数字,如果无效则返回默认值
+ * @param {*} value - 要转换的值
+ * @param {number} fallback - 默认值
+ * @returns {number}
+ */
+export const coerceNumberValue = (value, fallback) => {
+ const parsed = Number(value);
+ return Number.isFinite(parsed) ? parsed : fallback;
+};
+
+/**
+ * 验证非空字符串
+ * @param {string} str - 要验证的字符串
+ * @returns {boolean}
+ */
+export const isNonEmptyString = (str) => {
+ return typeof str === 'string' && str.trim().length > 0;
+};
+
+/**
+ * 验证API密钥格式
+ * @param {string} apiKey - API密钥
+ * @returns {boolean}
+ */
+export const isValidApiKey = (apiKey) => {
+ return isNonEmptyString(apiKey) && apiKey.length > 10;
+};
+
+/**
+ * 验证Base URL格式
+ * @param {string} url - Base URL
+ * @returns {boolean}
+ */
+export const isValidBaseUrl = (url) => {
+ if (!url) return true; // 可选字段
+ try {
+ const parsed = new URL(url);
+ return parsed.protocol === 'http:' || parsed.protocol === 'https:';
+ } catch {
+ return false;
+ }
+};
+
+/**
+ * 验证模型名称
+ * @param {string} modelName - 模型名称
+ * @returns {boolean}
+ */
+export const isValidModelName = (modelName) => {
+ return isNonEmptyString(modelName) && modelName.trim().length > 0;
+};
+
+/**
+ * 验证超时时间(毫秒)
+ * @param {number|string|null|undefined} value - 超时时间
+ * @returns {boolean}
+ */
+export const isValidTimeoutMs = (value) => {
+ if (value === '' || value === null || value === undefined) return true;
+ const num = Number(value);
+ return Number.isFinite(num) && num > 0;
+};
diff --git a/desktop/src/shared/asr-models.js b/desktop/src/shared/asr-models.js
index 054498b..d9f1996 100644
--- a/desktop/src/shared/asr-models.js
+++ b/desktop/src/shared/asr-models.js
@@ -1,113 +1,75 @@
export const ASR_MODEL_PRESETS = [
- // FunASR 模型(默认)- 使用 ModelScope 下载,国内访问更稳定
- // FunASR 通过 name_maps_ms 将简称映射到 ModelScope 仓库:
- // paraformer-zh-streaming -> iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online
- // paraformer-zh -> iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
- // ct-punc -> iic/punc_ct-transformer_cn-en-common-vocab471067-large
- // fa-zh -> iic/speech_timestamp_prediction-v1-16k-offline
+ // SiliconFlow 云端模型(默认)
+ {
+ id: 'siliconflow-cloud',
+ label: 'SiliconFlow Cloud (推荐)',
+ description: '远程 API 模式,无需本地下载模型,轻量级,但需要联网。',
+ engine: 'siliconflow',
+ sizeBytes: 0,
+ recommendedSpec: '任意配置',
+ speedHint: '网络延迟',
+ language: 'zh',
+ isDefault: true,
+ isRemote: true,
+ },
+ // 百度实时 ASR (Demo)
+ {
+ id: 'baidu-cloud',
+ label: 'Baidu Cloud (Demo)',
+ description: '百度语音实时识别 API,低延迟,高精度,需联网。',
+ engine: 'baidu',
+ sizeBytes: 0,
+ recommendedSpec: '任意配置',
+ speedHint: '网络延迟',
+ language: 'zh',
+ isRemote: true,
+ },
+ // FunASR ONNX 模型
+ // 2-Pass 架构: VAD + 流式ASR + 离线ASR + 标点
{
id: 'funasr-paraformer',
label: 'FunASR ParaFormer',
- description: 'FunASR 流式识别,专为中文优化,标点准确,速度快',
- engine: 'funasr', // 指定使用 FunASR 引擎
- // FunASR 内部使用简称 "paraformer-zh-streaming",自动从 ModelScope 下载
- repoId: 'paraformer-zh-streaming',
- // ModelScope 实际仓库 ID
+ description: 'FunASR 流式识别,INT8 量化版,体积更小、速度更快,精度略低',
+ engine: 'funasr',
+ // ONNX 模型配置 (用于 2-Pass 架构)
+ onnxModels: {
+ vad: 'damo/speech_fsmn_vad_zh-cn-16k-common-onnx',
+ online: 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online-onnx',
+ offline: 'damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-onnx',
+ punc: 'damo/punc_ct-transformer_zh-cn-common-vocab272727-onnx',
+ },
+ // 用于缓存路径检测 (兼容 model-manager.js)
+ repoId: 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online-onnx',
modelScopeRepoId: 'iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online',
- sizeBytes: 300 * 1024 * 1024,
+ // 本地统计(ModelScope 缓存): online quant ~240MB + offline quant ~247MB + punc ~274MB + VAD ~1MB ≈ 760MB
+ sizeBytes: 760 * 1024 * 1024, // 约 0.76GB(INT8 量化包体,含 VAD/流式/离线/标点)
recommendedSpec: '≥4 核 CPU / ≥4GB 内存',
speedHint: '实时 2x-3x',
language: 'zh',
- isDefault: true,
},
{
id: 'funasr-paraformer-large',
label: 'FunASR ParaFormer Large',
- description: 'FunASR 大模型,更高准确率,专为中文设计',
+ description: 'FunASR 非量化 FP32 版,精度最高但体积更大、占用更高',
engine: 'funasr',
- // FunASR 内部使用简称 "paraformer-zh",自动从 ModelScope 下载
- repoId: 'paraformer-zh',
- // ModelScope 实际仓库 ID
+ // ONNX 模型配置 - 使用更大的离线模型
+ onnxModels: {
+ vad: 'damo/speech_fsmn_vad_zh-cn-16k-common-onnx',
+ online: 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online-onnx',
+ // Large 版本使用非量化模型,精度更高
+ offline: 'damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-onnx',
+ punc: 'damo/punc_ct-transformer_zh-cn-common-vocab272727-onnx',
+ },
+ quantize: false, // Large 版本不使用量化,精度更高
+ // 用于缓存路径检测 (兼容 model-manager.js)
+ repoId: 'damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-onnx',
modelScopeRepoId: 'iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
- sizeBytes: 500 * 1024 * 1024,
- recommendedSpec: '≥8 核 CPU / ≥8GB 内存',
- speedHint: '接近实时',
+ // 估算:INT8 → FP32 约 4x 体积,结合 punc/VAD 实测,整包约 2.1GB
+ sizeBytes: 2100 * 1024 * 1024, // 约 2.1GB(FP32 未量化)
+ recommendedSpec: '≥8 核 CPU / ≥8GB 内存(建议 12GB+ 更流畅)',
+ speedHint: '接近实时 / 精度更高',
language: 'zh',
},
-
- // Faster-Whisper 模型
- {
- id: 'tiny',
- label: 'Whisper Tiny',
- description: '最快速,约 75MB,适合体验或低端设备,准确率较低',
- engine: 'faster-whisper', // 指定使用 Faster-Whisper 引擎
- repoId: 'Systran/faster-whisper-tiny',
- modelScopeRepoId: 'gpustack/faster-whisper-tiny',
- sizeBytes: 76 * 1024 * 1024,
- recommendedSpec: '≥2 核 CPU / ≥2GB 内存',
- speedHint: '实时 4x+',
- language: 'multilingual',
- },
- {
- id: 'base',
- label: 'Whisper Base',
- description: '速度与准确率均衡,约 150MB,适合多语言场景',
- engine: 'faster-whisper',
- repoId: 'Systran/faster-whisper-base',
- modelScopeRepoId: 'gpustack/faster-whisper-base',
- sizeBytes: 152 * 1024 * 1024,
- recommendedSpec: '≥4 核 CPU / ≥4GB 内存',
- speedHint: '实时 2x-3x',
- language: 'multilingual',
- },
- {
- id: 'small',
- label: 'Whisper Small',
- description: '准确率更高,约 466MB,需要更高算力',
- engine: 'faster-whisper',
- repoId: 'Systran/faster-whisper-small',
- modelScopeRepoId: 'gpustack/faster-whisper-small',
- sizeBytes: 466 * 1024 * 1024,
- recommendedSpec: '≥6 核 CPU / ≥6GB 内存',
- speedHint: '实时 1x-2x',
- language: 'multilingual',
- },
- {
- id: 'medium',
- label: 'Whisper Medium',
- description: '旗舰模型,约 1.4GB,中文表现较好但资源占用大',
- engine: 'faster-whisper',
- repoId: 'Systran/faster-whisper-medium',
- modelScopeRepoId: 'gpustack/faster-whisper-medium',
- sizeBytes: 1500 * 1024 * 1024,
- recommendedSpec: '≥8 核 CPU / ≥8GB 内存',
- speedHint: '接近实时',
- language: 'multilingual',
- },
- {
- id: 'large-v2',
- label: 'Whisper Large v2',
- description: '多语言最高精度,约 2.9GB,运行成本高',
- engine: 'faster-whisper',
- repoId: 'Systran/faster-whisper-large-v2',
- modelScopeRepoId: 'gpustack/faster-whisper-large-v2',
- sizeBytes: 2950 * 1024 * 1024,
- recommendedSpec: '≥12 核 CPU / ≥12GB 内存',
- speedHint: '离线或高端主机',
- language: 'multilingual',
- },
- {
- id: 'large-v3',
- label: 'Whisper Large v3',
- description: '最新大模型,约 3.1GB,更好上下文理解',
- engine: 'faster-whisper',
- repoId: 'Systran/faster-whisper-large-v3',
- modelScopeRepoId: 'gpustack/faster-whisper-large-v3',
- sizeBytes: 3100 * 1024 * 1024,
- recommendedSpec: '≥16 核 CPU / ≥16GB 内存',
- speedHint: '离线或服务器',
- language: 'multilingual',
- },
];
export function getAsrModelPreset(modelId) {
@@ -115,3 +77,9 @@ export function getAsrModelPreset(modelId) {
}
+
+
+
+
+
+
diff --git a/desktop/vite.config.js b/desktop/vite.config.js
index cbc8bd5..9332270 100644
--- a/desktop/vite.config.js
+++ b/desktop/vite.config.js
@@ -1,9 +1,29 @@
import { defineConfig } from 'vite';
import react from '@vitejs/plugin-react';
import path from 'path';
+import { performance } from 'node:perf_hooks';
+
+// 开发服务器耗时日志
+const devPerfPlugin = () => {
+ const start = performance.now();
+ return {
+ name: 'dev-perf-logger',
+ apply: 'serve',
+ configResolved() {
+ const cost = (performance.now() - start).toFixed(1);
+ console.log(`[VitePerf] configResolved: ${cost}ms`);
+ },
+ configureServer(server) {
+ server.httpServer?.once('listening', () => {
+ const cost = (performance.now() - start).toFixed(1);
+ console.log(`[VitePerf] dev server listening: ${cost}ms`);
+ });
+ }
+ };
+};
export default defineConfig({
- plugins: [react()],
+ plugins: [react(), devPerfPlugin()],
base: './',
// 指定根目录为 src/renderer,这样 Vite 就能找到 index.html 和 hud.html
root: path.resolve(__dirname, 'src/renderer'),
diff --git a/docs/20251225-004407.gif b/docs/20251225-004407.gif
new file mode 100644
index 0000000..ec0ecdc
Binary files /dev/null and b/docs/20251225-004407.gif differ
diff --git a/docs/ci-debugging-guide.md b/docs/ci-debugging-guide.md
new file mode 100644
index 0000000..a04b789
--- /dev/null
+++ b/docs/ci-debugging-guide.md
@@ -0,0 +1,198 @@
+# GitHub Actions 自动化排查与调试指南
+
+本文档总结了利用 MCP 浏览器工具与 Cursor 协作排查 GitHub Actions 问题的完整工作流。这种方法能够高效定位 CI/CD 构建失败的根因,特别是针对复杂的跨平台编译和依赖问题。
+
+## 核心思路
+
+传统的 CI 排查通常依赖于阅读冗长的文本日志或盲目推测。本方案的核心在于:
+**像用户一样使用浏览器,直观地定位错误现场,结合源码分析,精准修复。**
+
+## 推荐排查工具
+
+1. **MCP Browser Tools** (Cursor 内置):
+ - `browser_navigate`: 访问 GitHub Actions 页面。
+ - `browser_snapshot`: 获取页面结构快照,理解页面内容。
+ - `browser_click`: 模拟点击,深入查看特定 Job 或 Step 的日志。
+ - `browser_wait_for`: 等待动态内容加载。
+
+2. **Codebase Tools**:
+ - `read_file`: 读取工作流配置文件 (`.yml`) 和构建脚本。
+ - `grep` / `search`: 搜索报错关键词。
+
+## CI 验证全流程
+
+当你修改了代码(例如修复了构建逻辑)并希望验证 CI 是否正常时,请遵循以下步骤:
+
+### 1. 提交代码触发 Action
+首先,确保你的修改已提交并推送到远程分支。
+```bash
+git add .
+git commit -m "fix: CI build logic"
+git push
+```
+GitHub Actions 会根据 `.github/workflows/*.yml` 中的 `on: push` 规则自动触发构建。
+
+### 2. 访问 Action 列表页
+推送后,立即通过浏览器工具访问 Actions 页面,确认 Action 是否已被触发。
+
+```javascript
+// 示例工具调用
+browser_navigate({ url: "https://github.com/JStone2934/LiveGalGame/actions" })
+```
+- **目的**:确认最新的 commit 确实触发了一个新的 Workflow Run。
+- **预期**:你应该能看到一个状态为 "in progress"(黄色旋转图标)或 "queued" 的新记录,标题应包含你刚才的 commit message。
+
+### 3. 实时监控构建状态
+使用快照工具周期性查看页面,或点击进入详情页等待。
+
+```javascript
+// 示例工具调用
+browser_snapshot()
+// 如果看到运行记录,点击进入详情
+browser_click({ element: "in progress: Run ...", ref: "e211" })
+```
+- **目的**:监控构建进度。
+- **技巧**:
+ - CI 构建通常需要几分钟到几十分钟。
+ - 你可以使用 `browser_wait_for` 或间隔性调用 `browser_snapshot` 来观察状态变化。
+ - 关注 "Jobs" 列表,看各个平台的构建任务(如 `Desktop mac (arm64)`)是否开始运行。
+
+### 4. 快速定位失败(如果有)
+如果构建变成红色(Failure),立即按照以下步骤定位:
+
+1. **定位失败的 Run**:
+ ```javascript
+ browser_snapshot()
+ // 点击红色的失败记录
+ browser_click({ element: "failed: Run ...", ref: "e211" })
+ ```
+2. **深入 Job 详情**:
+ 进入详情页后,找到失败的 Job(例如 `Desktop mac (arm64)`),点击进入。
+ ```javascript
+ browser_wait_for({ time: 2 }) // 等待加载
+ browser_click({ element: "failed: Desktop mac (arm64)", ref: "..." })
+ ```
+3. **获取详细报错日志**:
+ 查看具体的 Step 日志,复制报错信息进行分析。
+ (参考下文 "排查实战案例")
+
+### 5. 验证成功
+如果构建变成绿色(Success),说明修复有效。
+- **检查 Artifacts**:在 Run 详情页底部,确认是否生成了预期的构建产物(如 `.dmg` 或 `.exe` 文件)。
+- **下载验证**(可选):如果需要回归测试,可下载 Artifact 进行本地运行(参考 `docs/asr-ci-regression.md`)。
+
+---
+
+## 排查工作流详解(当构建失败时)
+
+### 4. 获取详细报错日志
+点击失败的 Job,查看具体的 Step 日志。
+
+**排查实战案例(ASR 依赖编译失败)**:
+1. **现象**:Job `Desktop mac (arm64)` 失败。
+2. **操作**:点击 Job 链接。
+3. **发现**:在 "Install ASR dependencies" 步骤中,pip 报错:
+ ```
+ Package libavformat was not found in the pkg-config search path.
+ ERROR: Failed to build 'av' when getting requirements to build wheel
+ ```
+4. **分析**:
+ - 错误表明 `av` 包试图从源码编译,但系统缺少 `ffmpeg` (libavformat) 库。
+ - 检查 `.github/workflows/desktop-build.yml`,发现是在 `Setup Python` 后直接运行 `pip install -r requirements.txt`。
+ - CI 环境(GitHub Hosted Runner)默认没有安装 FFmpeg 开发库。
+
+### 5. 源码比对与根因分析
+回到代码库,检查相关配置。
+
+- **检查 `requirements.txt`**:确认 `av==10.0.0` 是否存在。
+- **检查构建脚本**:查看 `scripts/prepare-python-env.js`。
+- **发现矛盾**:
+ - 我们已经在 `prepare-python-env.js` 中实现了使用 `conda` 安装预编译二进制包(不需要本地编译)。
+ - 但 CI Workflow 中却保留了一个旧的步骤 `pip install -r requirements.txt`,它运行在 `prepare-python-env.js` **之前**。
+ - **结论**:CI 的冗余步骤抢先执行了 pip 安装,导致编译失败。
+
+### 6. 修复方案
+**移除 CI 中的冗余步骤,统一构建逻辑。**
+
+将环境准备工作完全收敛到 `prepare-python-env.js` 中,利用 Conda 的二进制分发能力解决编译依赖问题。
+
+```yaml
+# 修改前
+- name: Setup Python ...
+- name: Install ASR dependencies ... # ❌ 这里直接 pip install 导致编译失败
+ run: pip install -r requirements.txt
+
+# 修改后
+# ✅ 移除上述步骤,直接进入 Build 阶段,由脚本统一处理
+- name: Build desktop package
+ run: pnpm run build:mac # 内部调用 prepare-python-env.js
+```
+
+## 实操示例:从最新失败的 Action 中提取详细错误(mac 构建)
+
+以下示例基于 `feat/desktop-branch` 最新一次失败的 Run(#31,commit `e2684c9`),演示如何迅速获取最关键的错误日志:
+
+1) 打开 Action 列表
+ - 访问 `https://github.com/JStone2934/LiveGalGame/actions`,找到最新一条失败的记录(红色 failed)。
+
+2) 进入对应 Run
+ - 点击失败记录,进入详情页后在 “Jobs” 中选择 `Desktop mac (arm64)`(或 x64)。
+
+3) 若网页日志加载不便,可用 GitHub API 直接抓取日志并定位错误
+ - 下载日志(需要 GH_TOKEN,参考 `docs/asr-ci-regression.md`):
+ ```bash
+ cd /Users/cccmmmdd/LiveGalGame
+ curl -L -H "Authorization: Bearer $GH_TOKEN" \
+ -o /tmp/run-19983014012.log \
+ https://api.github.com/repos/JStone2934/LiveGalGame/actions/runs/19983014012/logs
+ cd /tmp && unzip -o run-19983014012.log -d run-19983014012-logs
+ tail -n 120 "run-19983014012-logs/0_Desktop mac (arm64).txt"
+ ```
+ - 关键报错(旧记录,faster-whisper 已移除,仅保留参考):
+ ```
+ error libmamba Could not solve for environment specs
+ └─ faster-whisper =0.10 * does not exist (perhaps a typo or a missing channel).
+ Error: Command failed: .../mamba install -y -p ... ffmpeg av=10.* faster-whisper=0.10.*
+ ```
+ 现已去除 faster-whisper 依赖,无需处理该问题。
+
+4) 修复方向(供后续操作参考)
+ - FunASR 构建:保持 ffmpeg/av 与 funasr_onnx 兼容即可。
+
+### 最新经验(2025-12-06)
+- 当前已去除 faster-whisper,仅需确保 funasr_onnx 及其依赖正常安装。
+- 日志/模型下载前务必先开启代理:在 Runner 或本地执行 `dl1` 启用代理后再跑 curl/pip/HF 下载,否则可能超时或 SOCKS 依赖缺失导致模型注册失败。
+- 若 GitHub 日志直链下载报 HTTP2 framing 错误,可尝试加 `--http1.1`:`curl --http1.1 -L -o run.log https://api.github.com/.../logs`。
+
+## 持续迭代的循环操作指南
+
+当你需要多轮修复 / 回归时,按以下闭环重复:
+
+1) 做出最小必要改动并提交
+ - 只提交与修复相关的文件,避免把大体积缓存目录(如 `python-bootstrap/`、`__pycache__`)加入版本库。
+ - 推送到对应分支,触发 Actions。
+
+2) 监控最新一次 Run
+ - Actions 列表页找到最新记录(状态可能是 queued/in progress/failed)。
+ - 若失败,点击进入对应 Run,锁定失败的 macOS Job。
+
+3) 获取并阅读日志
+ - 优先看网页日志;若加载慢,用 API 下载压缩日志并解压。
+ - 关注最后的错误段落,通常在尾部能看到最直接的报错。
+
+4) 分析并收敛修复
+ - 判断是依赖解算、编译缺库、脚本逻辑或环境版本问题。
+ - 在源码/脚本里做最小修正,再次提交、推送。
+
+5) 循环直到绿色
+ - 每轮只解决当前最致命的失败点。
+ - 构建转绿后,检查 Artifacts,必要时下载做回归运行(参考 `docs/asr-ci-regression.md`)。
+
+## 总结
+
+通过 **Browser 浏览日志 -> Codebase 定位配置 -> 逻辑一致性分析** 的闭环流程,我们能够快速识别出 CI 配置与实际构建脚本之间的冲突。
+
+这种方法的优势在于:
+1. **所见即所得**:直接看到 CI 运行时的真实报错,而非本地猜测。
+2. **上下文完整**:能同时看到报错发生的 Step 和其前后的环境设置。
+3. **精准打击**:直接定位到配置文件的具体行数进行修复。