From 38e2e2767a280af78481552e8df1c06ad46c5136 Mon Sep 17 00:00:00 2001 From: Fanglinqiang Date: Sun, 15 Mar 2026 15:26:39 +0800 Subject: [PATCH 01/15] feat: add web dashboard, Telegram channel, and MiniMax/Qwen support - Add built-in HTTP dashboard (port 3847) with 8 tabs: Overview, Groups, Tasks, Stats, Alerts, Settings, Models, Skills - Dashboard features: dark/light theme toggle, zh/en i18n, auto-refresh interval selector, per-group message stats, activity charts (messages and task runs per day), alert rules management - Add Telegram channel support (src/channels/telegram.ts) - Add MiniMax and Qwen model config entries to config.ts - Add getTaskRunLogs, getActivityStats, getGroupMessageStats to db.ts - Add parseConfigBlob helper + agentType/notifyUser support in registered_groups accessors - Add agentType and notifyUser fields to RegisteredGroup type - Fix container-runner.ts path resolution to use import.meta.url - Update README.md and README.zh-CN.md with dashboard documentation Co-Authored-By: Claude Sonnet 4.6 --- README.md | 23 ++ README.zh-CN.md | 23 ++ package-lock.json | 85 ++++ package.json | 3 +- src/channels/telegram.ts | 213 ++++++++++ src/config.ts | 18 +- src/container-runner.ts | 5 +- src/dashboard.html | 822 +++++++++++++++++++++++++++++++++++++++ src/dashboard.ts | 440 +++++++++++++++++++++ src/db.ts | 72 +++- src/index.ts | 72 +++- src/types.ts | 2 + start-dashboard.sh | 11 + start.sh | 6 + 14 files changed, 1762 insertions(+), 33 deletions(-) create mode 100644 src/channels/telegram.ts create mode 100644 src/dashboard.html create mode 100644 src/dashboard.ts create mode 100755 start-dashboard.sh create mode 100755 start.sh diff --git a/README.md b/README.md index c7e591e..858d5e7 100644 --- a/README.md +++ b/README.md @@ -278,6 +278,29 @@ In any WhatsApp group where BioClaw is connected, simply message: See the [ExampleTask](ExampleTask/ExampleTask.md) document for 6 ready-to-use demo prompts with expected outputs. +## Web Dashboard + +BioClaw includes a built-in web dashboard accessible at `http://localhost:3847` (or the port set by `DASHBOARD_PORT`). + +### Features + +| Tab | Description | +|-----|-------------| +| **Overview** | Live stats: message count, task runs, connected groups, registered models and skills | +| **Groups** | All WhatsApp/Telegram groups with message count and last-activity time | +| **Tasks** | Scheduled task list — create, pause, resume, and cancel tasks | +| **Stats** | Activity charts (messages per day, task runs per day, success rate, avg/max duration) with 7d/14d/30d period selector | +| **Alerts** | Alert rules based on group silence thresholds — see which rules are currently firing | +| **Settings** | Environment configuration viewer | +| **Models** | Configured AI models (Claude, MiniMax, Qwen) with auth status | +| **Skills** | Installed agent skills | + +### UI controls + +- **Auto-refresh** — select refresh interval (off / 10s / 30s / 1min / 5min) +- **Dark / Light theme** — toggle in the header, persisted in `localStorage` +- **Language** — switch between Chinese (中文) and English (EN), persisted in `localStorage` + ## Project Structure ``` diff --git a/README.zh-CN.md b/README.zh-CN.md index 59e30a3..f98ed7b 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -55,6 +55,29 @@ install https://github.com/Runchuan-BU/BioClaw +## 网页控制台(Dashboard) + +BioClaw 内置网页控制台,启动后访问 `http://localhost:3847`(端口可通过 `DASHBOARD_PORT` 环境变量修改)。 + +### 功能页签 + +| 页签 | 说明 | +|------|------| +| **概览(Overview)** | 实时统计:消息数、任务运行次数、已连接群组数、已注册模型与技能数 | +| **群组(Groups)** | 所有 WhatsApp/Telegram 群组,显示消息量和最后活跃时间 | +| **任务(Tasks)** | 定时任务列表——新建、暂停、恢复、取消任务 | +| **统计(Stats)** | 活动图表:每日消息量、每日任务运行量、成功率、平均/最大耗时;支持 7 天/14 天/30 天周期选择 | +| **告警(Alerts)** | 基于群组静默阈值的告警规则,显示当前触发状态 | +| **设置(Settings)** | 环境变量配置查看 | +| **模型(Models)** | 已配置的 AI 模型(Claude、MiniMax、Qwen)及认证状态 | +| **技能(Skills)** | 已安装的 Agent 技能列表 | + +### 界面控制 + +- **自动刷新** — 选择刷新间隔(关闭 / 10 秒 / 30 秒 / 1 分钟 / 5 分钟) +- **深色/浅色主题** — 点击标题栏图标切换,状态保存至 `localStorage` +- **语言切换** — 中文 / English 切换,状态保存至 `localStorage` + ## Demo Examples 完整示例任务与截图见: diff --git a/package-lock.json b/package-lock.json index 1f1b740..0dfbad3 100644 --- a/package-lock.json +++ b/package-lock.json @@ -11,6 +11,7 @@ "@whiskeysockets/baileys": "^7.0.0-rc.9", "better-sqlite3": "^11.8.1", "cron-parser": "^5.5.0", + "grammy": "^1.41.1", "pino": "^9.6.0", "pino-pretty": "^13.0.0", "qrcode-terminal": "^0.12.0", @@ -589,6 +590,12 @@ "node": ">=18" } }, + "node_modules/@grammyjs/types": { + "version": "3.25.0", + "resolved": "https://registry.npmjs.org/@grammyjs/types/-/types-3.25.0.tgz", + "integrity": "sha512-iN9i5p+8ZOu9OMxWNcguojQfz4K/PDyMPOnL7PPCON+SoA/F8OKMH3uR7CVUkYfdNe0GCz8QOzAWrnqusQYFOg==", + "license": "MIT" + }, "node_modules/@hapi/boom": { "version": "9.1.4", "resolved": "https://registry.npmjs.org/@hapi/boom/-/boom-9.1.4.tgz", @@ -1832,6 +1839,18 @@ } } }, + "node_modules/abort-controller": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", + "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + "license": "MIT", + "dependencies": { + "event-target-shim": "^5.0.0" + }, + "engines": { + "node": ">=6.5" + } + }, "node_modules/assertion-error": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", @@ -2136,6 +2155,15 @@ "@types/estree": "^1.0.0" } }, + "node_modules/event-target-shim": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", + "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/eventemitter3": { "version": "5.0.4", "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.4.tgz", @@ -2255,6 +2283,21 @@ "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==", "license": "MIT" }, + "node_modules/grammy": { + "version": "1.41.1", + "resolved": "https://registry.npmjs.org/grammy/-/grammy-1.41.1.tgz", + "integrity": "sha512-wcHAQ1e7svL3fJMpDchcQVcWUmywhuepOOjHUHmMmWAwUJEIyK5ea5sbSjZd+Gy1aMpZeP8VYJa+4tP+j1YptQ==", + "license": "MIT", + "dependencies": { + "@grammyjs/types": "3.25.0", + "abort-controller": "^3.0.0", + "debug": "^4.4.3", + "node-fetch": "^2.7.0" + }, + "engines": { + "node": "^12.20.0 || >=14.13.1" + } + }, "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", @@ -2612,6 +2655,26 @@ "node": ">=10" } }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "license": "MIT", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, "node_modules/obug": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz", @@ -3360,6 +3423,12 @@ "url": "https://github.com/sponsors/Borewit" } }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + "license": "MIT" + }, "node_modules/tslib": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", @@ -3589,6 +3658,22 @@ } } }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + "license": "BSD-2-Clause" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "license": "MIT", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, "node_modules/why-is-node-running": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", diff --git a/package.json b/package.json index e3e7a96..e4d390c 100644 --- a/package.json +++ b/package.json @@ -20,7 +20,8 @@ "@whiskeysockets/baileys": "^7.0.0-rc.9", "better-sqlite3": "^11.8.1", "cron-parser": "^5.5.0", -"pino": "^9.6.0", + "grammy": "^1.41.1", + "pino": "^9.6.0", "pino-pretty": "^13.0.0", "qrcode-terminal": "^0.12.0", "zod": "^4.3.6" diff --git a/src/channels/telegram.ts b/src/channels/telegram.ts new file mode 100644 index 0000000..8e28cfa --- /dev/null +++ b/src/channels/telegram.ts @@ -0,0 +1,213 @@ +import { Bot } from "grammy"; + +import { ASSISTANT_NAME, TRIGGER_PATTERN } from "../config.js"; +import { logger } from "../logger.js"; +import { + Channel, + OnInboundMessage, + OnChatMetadata, + RegisteredGroup, +} from "../types.js"; + +export interface TelegramChannelOpts { + onMessage: OnInboundMessage; + onChatMetadata: OnChatMetadata; + registeredGroups: () => Record; +} + +export class TelegramChannel implements Channel { + name = "telegram"; + prefixAssistantName = false; + + private bot: Bot | null = null; + private opts: TelegramChannelOpts; + private botToken: string; + + constructor(botToken: string, opts: TelegramChannelOpts) { + this.botToken = botToken; + this.opts = opts; + } + + async connect(): Promise { + this.bot = new Bot(this.botToken); + + this.bot.command("chatid", (ctx) => { + const chatId = ctx.chat.id; + const chatType = ctx.chat.type; + const chatName = + chatType === "private" + ? ctx.from?.first_name || "Private" + : (ctx.chat as any).title || "Unknown"; + ctx.reply( + `Chat ID: \`tg:${chatId}\`\nName: ${chatName}\nType: ${chatType}`, + { parse_mode: "Markdown" }, + ); + }); + + this.bot.command("ping", (ctx) => { + ctx.reply(`${ASSISTANT_NAME} is online.`); + }); + + this.bot.on("message:text", async (ctx) => { + if (ctx.message.text.startsWith("/")) return; + + const chatJid = `tg:${ctx.chat.id}`; + let content = ctx.message.text; + const timestamp = new Date(ctx.message.date * 1000).toISOString(); + const senderName = + ctx.from?.first_name || + ctx.from?.username || + ctx.from?.id.toString() || + "Unknown"; + const sender = ctx.from?.id.toString() || ""; + const msgId = ctx.message.message_id.toString(); + + const chatName = + ctx.chat.type === "private" + ? senderName + : (ctx.chat as any).title || chatJid; + + const botUsername = ctx.me?.username?.toLowerCase(); + if (botUsername) { + const entities = ctx.message.entities || []; + const isBotMentioned = entities.some((entity) => { + if (entity.type === "mention") { + const mentionText = content + .substring(entity.offset, entity.offset + entity.length) + .toLowerCase(); + return mentionText === `@${botUsername}`; + } + return false; + }); + if (isBotMentioned && !TRIGGER_PATTERN.test(content)) { + content = `@${ASSISTANT_NAME} ${content}`; + } + } + + this.opts.onChatMetadata(chatJid, timestamp, chatName); + + const group = this.opts.registeredGroups()[chatJid]; + if (!group) { + logger.debug({ chatJid, chatName }, "Message from unregistered Telegram chat"); + return; + } + + this.opts.onMessage(chatJid, { + id: msgId, + chat_jid: chatJid, + sender, + sender_name: senderName, + content, + timestamp, + is_from_me: false, + }); + + logger.info({ chatJid, chatName, sender: senderName }, "Telegram message stored"); + }); + + const storeNonText = (ctx: any, placeholder: string) => { + const chatJid = `tg:${ctx.chat.id}`; + const group = this.opts.registeredGroups()[chatJid]; + if (!group) return; + + const timestamp = new Date(ctx.message.date * 1000).toISOString(); + const senderName = + ctx.from?.first_name || + ctx.from?.username || + ctx.from?.id?.toString() || + "Unknown"; + const caption = ctx.message.caption ? ` ${ctx.message.caption}` : ""; + + this.opts.onChatMetadata(chatJid, timestamp); + this.opts.onMessage(chatJid, { + id: ctx.message.message_id.toString(), + chat_jid: chatJid, + sender: ctx.from?.id?.toString() || "", + sender_name: senderName, + content: `${placeholder}${caption}`, + timestamp, + is_from_me: false, + }); + }; + + this.bot.on("message:photo", (ctx) => storeNonText(ctx, "[Photo]")); + this.bot.on("message:video", (ctx) => storeNonText(ctx, "[Video]")); + this.bot.on("message:voice", (ctx) => storeNonText(ctx, "[Voice message]")); + this.bot.on("message:audio", (ctx) => storeNonText(ctx, "[Audio]")); + this.bot.on("message:document", (ctx) => { + const name = ctx.message.document?.file_name || "file"; + storeNonText(ctx, `[Document: ${name}]`); + }); + this.bot.on("message:sticker", (ctx) => { + const emoji = ctx.message.sticker?.emoji || ""; + storeNonText(ctx, `[Sticker ${emoji}]`); + }); + this.bot.on("message:location", (ctx) => storeNonText(ctx, "[Location]")); + this.bot.on("message:contact", (ctx) => storeNonText(ctx, "[Contact]")); + + this.bot.catch((err) => { + logger.error({ err: err.message }, "Telegram bot error"); + }); + + return new Promise((resolve) => { + this.bot!.start({ + onStart: (botInfo) => { + logger.info( + { username: botInfo.username, id: botInfo.id }, + "Telegram bot connected", + ); + console.log(`\n Telegram bot: @${botInfo.username}`); + console.log(` Send /chatid to the bot to get a chat's registration ID\n`); + resolve(); + }, + }); + }); + } + + async sendMessage(jid: string, text: string): Promise { + if (!this.bot) { + logger.warn("Telegram bot not initialized"); + return; + } + try { + const numericId = jid.replace(/^tg:/, ""); + const MAX_LENGTH = 4096; + if (text.length <= MAX_LENGTH) { + await this.bot.api.sendMessage(numericId, text); + } else { + for (let i = 0; i < text.length; i += MAX_LENGTH) { + await this.bot.api.sendMessage(numericId, text.slice(i, i + MAX_LENGTH)); + } + } + logger.info({ jid, length: text.length }, "Telegram message sent"); + } catch (err) { + logger.error({ jid, err }, "Failed to send Telegram message"); + } + } + + isConnected(): boolean { + return this.bot !== null; + } + + ownsJid(jid: string): boolean { + return jid.startsWith("tg:"); + } + + async disconnect(): Promise { + if (this.bot) { + this.bot.stop(); + this.bot = null; + logger.info("Telegram bot stopped"); + } + } + + async setTyping(jid: string, isTyping: boolean): Promise { + if (!this.bot || !isTyping) return; + try { + const numericId = jid.replace(/^tg:/, ""); + await this.bot.api.sendChatAction(numericId, "typing"); + } catch (err) { + logger.debug({ jid, err }, "Failed to send Telegram typing indicator"); + } + } +} diff --git a/src/config.ts b/src/config.ts index e92774e..22762eb 100644 --- a/src/config.ts +++ b/src/config.ts @@ -1,11 +1,14 @@ import path from 'path'; +import { fileURLToPath } from 'url'; export const ASSISTANT_NAME = process.env.ASSISTANT_NAME || 'Bioclaw'; export const POLL_INTERVAL = 2000; export const SCHEDULER_POLL_INTERVAL = 60000; // Absolute paths needed for container mounts -const PROJECT_ROOT = process.cwd(); +// Use import.meta.url so paths are correct regardless of process.cwd() +// dist/config.js -> ../../ -> project root +const PROJECT_ROOT = path.resolve(fileURLToPath(import.meta.url), '../..'); const HOME_DIR = process.env.HOME || '/Users/user'; // Mount security: allowlist stored OUTSIDE project root, never mounted into containers @@ -44,6 +47,9 @@ function escapeRegex(str: string): string { return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); } +export const TELEGRAM_BOT_TOKEN = process.env.TELEGRAM_BOT_TOKEN || ""; +export const TELEGRAM_ONLY = process.env.TELEGRAM_ONLY === "true"; + export const TRIGGER_PATTERN = new RegExp( `^@${escapeRegex(ASSISTANT_NAME)}\\b`, 'i', @@ -53,3 +59,13 @@ export const TRIGGER_PATTERN = new RegExp( // Uses system timezone by default export const TIMEZONE = process.env.TZ || Intl.DateTimeFormat().resolvedOptions().timeZone; + +// MiniMax (optional) +export const MINIMAX_API_KEY = process.env.MINIMAX_API_KEY || ''; +export const MINIMAX_BASE_URL = process.env.MINIMAX_BASE_URL || 'https://api.minimaxi.chat/v1'; +export const MINIMAX_MODEL = process.env.MINIMAX_MODEL || 'MiniMax-Text-01'; + +// Qwen (optional) +export const QWEN_API_BASE = process.env.QWEN_API_BASE || 'https://dashscope.aliyuncs.com/compatible-mode/v1'; +export const QWEN_AUTH_TOKEN = process.env.QWEN_AUTH_TOKEN || ''; +export const QWEN_MODEL = process.env.QWEN_MODEL || 'qwen-plus'; diff --git a/src/container-runner.ts b/src/container-runner.ts index 2c0e65f..5103a9b 100644 --- a/src/container-runner.ts +++ b/src/container-runner.ts @@ -6,6 +6,7 @@ import { ChildProcess, exec, spawn } from 'child_process'; import fs from 'fs'; import os from 'os'; import path from 'path'; +import { fileURLToPath } from 'url'; import { CONTAINER_IMAGE, @@ -62,7 +63,7 @@ function buildVolumeMounts( ): VolumeMount[] { const mounts: VolumeMount[] = []; const homeDir = getHomeDir(); - const projectRoot = process.cwd(); + const projectRoot = path.resolve(fileURLToPath(import.meta.url), '../..'); if (isMain) { // Main gets the entire project root mounted @@ -186,7 +187,7 @@ function buildVolumeMounts( * Secrets are never written to disk or mounted as files. */ function readSecrets(): Record { - const envFile = path.join(process.cwd(), '.env'); + const envFile = path.join(path.resolve(fileURLToPath(import.meta.url), '../..'), '.env'); if (!fs.existsSync(envFile)) return {}; const allowedVars = ['CLAUDE_CODE_OAUTH_TOKEN', 'ANTHROPIC_API_KEY']; diff --git a/src/dashboard.html b/src/dashboard.html new file mode 100644 index 0000000..908a712 --- /dev/null +++ b/src/dashboard.html @@ -0,0 +1,822 @@ + + + + + +BioClaw Dashboard + + + +
+

🧬 BioClaw

+
+
-
Groups
+
-
Tasks
+
-
Models
+
-
Skills
+
-
Containers
+
-
Chats
+
+
+ + + +
+
+ +
+ + +
+
+ Registered Groups + +
+
Loading...
+
+ + +
+
+ Scheduled Tasks + +
+
Loading...
+
+ + +
+
+ Activity Statistics +
+ + + +
+
+
+
+

Messages per Day

+
+
+
+

Task Runs per Day

+
+
+
+

Avg Response Time (ms)

+
+
+
+ + +
+
+ Configured Models + +
+
Loading...
+
+ + +
+ +
+
Loading...
+
+ + +
+
+ Running Containers + +
+
Loading...
+
+ + +
+
+
+ Alert Rules + +
+
+
+
Add Rule
+
+ + + + +
+
+ + + + +
+
+
+ + +
+
+ + + +
+
+
+ +
+ + + + + + diff --git a/src/dashboard.ts b/src/dashboard.ts new file mode 100644 index 0000000..0fbdd72 --- /dev/null +++ b/src/dashboard.ts @@ -0,0 +1,440 @@ +import { exec } from 'child_process'; +import fs from 'fs'; +import http from 'http'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +import { + CONTAINER_IMAGE, + MINIMAX_API_KEY, + MINIMAX_BASE_URL, + MINIMAX_MODEL, + QWEN_API_BASE, + QWEN_AUTH_TOKEN, + QWEN_MODEL, +} from './config.js'; +import { + deleteTask, + getAllChats, + getAllRegisteredGroups, + getAllTasks, + getTaskRunLogs, + updateTask, +} from './db.js'; +import { logger } from './logger.js'; + +const PROJECT_ROOT = path.resolve(fileURLToPath(import.meta.url), '../..'); +const LOG_FILE = path.join(PROJECT_ROOT, 'logs', 'bioclaw.log'); +const HTML_FILE = path.join(PROJECT_ROOT, 'src', 'dashboard.html'); +const DASHBOARD_PORT = parseInt(process.env.DASHBOARD_PORT || '3847', 10); + +function readEnvFile(): Record { + const envFile = path.join(PROJECT_ROOT, '.env'); + if (!fs.existsSync(envFile)) return {}; + const result: Record = {}; + for (const line of fs.readFileSync(envFile, 'utf-8').split('\n')) { + const trimmed = line.trim(); + if (!trimmed || trimmed.startsWith('#')) continue; + const eqIdx = trimmed.indexOf('='); + if (eqIdx === -1) continue; + const key = trimmed.slice(0, eqIdx).trim(); + let value = trimmed.slice(eqIdx + 1).trim(); + if ((value.startsWith('"') && value.endsWith('"')) || (value.startsWith("'") && value.endsWith("'"))) { + value = value.slice(1, -1); + } + if (value) result[key] = value; + } + return result; +} + +// SSE clients listening for log lines +const sseClients = new Set(); + +// Broadcast a log line to all SSE clients +export function broadcastLogLine(line: string): void { + if (sseClients.size === 0) return; + const payload = `data: ${JSON.stringify(line)}\n\n`; + for (const res of sseClients) { + try { + res.write(payload); + } catch { + sseClients.delete(res); + } + } +} + +function getContainers(): Promise { + return new Promise((resolve) => { + exec( + 'docker ps --filter "name=bioclaw-" --format "{{.Names}}\\t{{.Image}}\\t{{.Status}}\\t{{.RunningFor}}"', + { timeout: 5000 }, + (_err, stdout) => { + if (!stdout) { resolve([]); return; } + resolve( + stdout.trim().split('\n').filter(Boolean).map((line) => { + const [name, image, status, running] = line.split('\t'); + return { name, image, status, running }; + }), + ); + }, + ); + }); +} + +function tailFile(filePath: string, lines: number): string[] { + if (!fs.existsSync(filePath)) return []; + try { + const stat = fs.statSync(filePath); + const chunkSize = Math.min(stat.size, lines * 200); + const fd = fs.openSync(filePath, 'r'); + const buf = Buffer.alloc(chunkSize); + fs.readSync(fd, buf, 0, chunkSize, stat.size - chunkSize); + fs.closeSync(fd); + const text = buf.toString('utf-8'); + const all = text.split('\n').filter(Boolean); + return all.slice(-lines); + } catch { + return []; + } +} + +// Model specs (context window / max output / reasoning support) +const MODEL_SPECS: Record = { + 'MiniMax-M2.5': { contextWindow: 1_000_000, maxOutput: 40_960, reasoning: true }, + 'MiniMax-M1': { contextWindow: 1_000_000, maxOutput: 40_960, reasoning: true }, + 'claude-opus-4-6': { contextWindow: 200_000, maxOutput: 32_768, reasoning: true }, + 'claude-sonnet-4-6': { contextWindow: 200_000, maxOutput: 16_384, reasoning: true }, + 'claude-haiku-4-5-20251001': { contextWindow: 200_000, maxOutput: 8_192, reasoning: false }, +}; + +function getModels(): object[] { + const groups = getAllRegisteredGroups(); + const agentCounts: Record = {}; + for (const g of Object.values(groups)) { + const t = g.agentType || 'claude'; + agentCounts[t] = (agentCounts[t] || 0) + 1; + } + + const models: object[] = []; + + if (MINIMAX_MODEL) { + const spec = MODEL_SPECS[MINIMAX_MODEL] ?? { contextWindow: 1_000_000, maxOutput: 40_960, reasoning: true }; + models.push({ id: 'minimax', name: MINIMAX_MODEL, provider: 'MiniMax', + endpoint: MINIMAX_BASE_URL, agentCount: agentCounts['minimax'] ?? 0, + configured: !!MINIMAX_API_KEY, ...spec }); + } + + if (QWEN_MODEL) { + const displayName = QWEN_MODEL.split('/').pop() ?? QWEN_MODEL; + models.push({ id: 'qwen', name: displayName, fullModel: QWEN_MODEL, provider: 'Qwen (Local)', + endpoint: QWEN_API_BASE, agentCount: agentCounts['qwen'] ?? 0, + configured: !!QWEN_API_BASE, + contextWindow: 32_768, maxOutput: 8_192, reasoning: false }); + } + + const claudeModel = process.env.CLAUDE_MODEL ?? 'claude-sonnet-4-6'; + const claudeSpec = MODEL_SPECS[claudeModel] ?? { contextWindow: 200_000, maxOutput: 32_768, reasoning: true }; + const dotEnv = readEnvFile(); + const hasClaudeAuth = !!(process.env.CLAUDE_CODE_OAUTH_TOKEN || process.env.ANTHROPIC_API_KEY || dotEnv['CLAUDE_CODE_OAUTH_TOKEN'] || dotEnv['ANTHROPIC_API_KEY']); + const imageTag = CONTAINER_IMAGE.split(':')[1] ?? 'latest'; + models.push({ id: 'claude', name: claudeModel, provider: 'Anthropic (Claude Code)', + endpoint: `Docker image: ${imageTag}`, agentCount: agentCounts['claude'] ?? 0, + configured: hasClaudeAuth, ...claudeSpec }); + + return models; +} + +async function testModel(modelType: string, prompt: string): Promise { + const start = Date.now(); + try { + let apiBase: string, apiKey: string, model: string; + if (modelType === 'minimax') { + apiBase = MINIMAX_BASE_URL; apiKey = MINIMAX_API_KEY; model = MINIMAX_MODEL; + } else if (modelType === 'qwen') { + apiBase = QWEN_API_BASE; apiKey = QWEN_AUTH_TOKEN; model = QWEN_MODEL; + } else { + return { ok: false, response: 'Claude cannot be tested directly (runs in container)', durationMs: 0 }; + } + if (!apiBase || !model) return { ok: false, response: 'Not configured', durationMs: 0 }; + const res = await fetch(`${apiBase}/chat/completions`, { + method: 'POST', + headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${apiKey}` }, + body: JSON.stringify({ model, messages: [{ role: 'user', content: prompt }], max_tokens: 200 }), + signal: AbortSignal.timeout(30_000), + }); + const data = await res.json() as any; + const text = data.choices?.[0]?.message?.content ?? data.error?.message ?? JSON.stringify(data).slice(0, 300); + const usage = data.usage ?? {}; + return { ok: res.ok, response: text, durationMs: Date.now() - start, + promptTokens: usage.prompt_tokens ?? 0, completionTokens: usage.completion_tokens ?? 0 }; + } catch (err: any) { + return { ok: false, response: err.message, durationMs: Date.now() - start }; + } +} + +function getSkills(): object { + const agentTools = [ + { name: 'bash', category: 'System', description: 'Execute bash commands (BLAST, samtools, python, etc.)' }, + { name: 'read_file', category: 'File', description: 'Read file from filesystem' }, + { name: 'write_file', category: 'File', description: 'Write content to file' }, + { name: 'web_fetch', category: 'Network', description: 'Fetch and extract web page content' }, + { name: 'send_message', category: 'Communication', description: 'Send progress update to user mid-task' }, + { name: 'send_file', category: 'Communication', description: 'Send result file to user' }, + { name: 'search_pubmed', category: 'Bio API', description: 'Search PubMed literature' }, + { name: 'fetch_abstract', category: 'Bio API', description: 'Fetch paper abstracts by PMID' }, + { name: 'search_chip_atlas', category: 'Bio API', description: 'Search CHIP-Atlas ChIP-seq/ATAC-seq' }, + ]; + + const bioCliTools = [ + { name: 'ncbi-blast+', category: 'Sequence', description: 'BLAST similarity search' }, + { name: 'samtools', category: 'NGS', description: 'SAM/BAM manipulation' }, + { name: 'bedtools', category: 'Genomics', description: 'Genome arithmetic' }, + { name: 'bwa', category: 'Alignment', description: 'BWA short-read aligner' }, + { name: 'minimap2', category: 'Alignment', description: 'Long-read / RNA-seq aligner' }, + { name: 'fastqc', category: 'QC', description: 'FastQ quality control' }, + { name: 'fastp', category: 'QC', description: 'All-in-one FASTQ preprocessor' }, + { name: 'seqtk', category: 'Sequence', description: 'FASTA/FASTQ toolkit' }, + { name: 'bcftools', category: 'Variant', description: 'VCF/BCF utilities' }, + { name: 'seqkit', category: 'Sequence', description: 'FASTA/FASTQ analysis' }, + { name: 'salmon', category: 'Quantification',description: 'Transcript quantification' }, + { name: 'kallisto', category: 'Quantification',description: 'RNA-seq pseudo-alignment' }, + { name: 'tabix', category: 'Indexing', description: 'Genomic file indexer' }, + { name: 'sra-toolkit', category: 'Data', description: 'NCBI SRA data access' }, + { name: 'pymol', category: 'Structure', description: 'Molecular visualization (headless)' }, + { name: 'pigz', category: 'Compression', description: 'Parallel gzip compression' }, + ]; + + const pythonLibs = [ + { name: 'biopython', category: 'Core', description: 'Biological computation toolkit' }, + { name: 'pandas', category: 'Data', description: 'Data analysis' }, + { name: 'numpy', category: 'Data', description: 'Numerical computing' }, + { name: 'scipy', category: 'Data', description: 'Scientific computing' }, + { name: 'matplotlib', category: 'Visualization', description: 'Data visualization' }, + { name: 'seaborn', category: 'Visualization', description: 'Statistical data visualization' }, + { name: 'scikit-learn', category: 'ML', description: 'Machine learning' }, + { name: 'scanpy', category: 'scRNA-seq', description: 'Single-cell RNA-seq analysis' }, + { name: 'pydeseq2', category: 'RNAseq', description: 'Differential expression' }, + { name: 'pysam', category: 'NGS', description: 'Python SAM/BAM interface' }, + { name: 'rdkit', category: 'Cheminformatics', description: 'Chemical informatics' }, + { name: 'anndata', category: 'scRNA-seq', description: 'Annotated data matrix' }, + { name: 'multiqc', category: 'QC', description: 'Multi-sample QC report' }, + { name: 'requests', category: 'Network', description: 'HTTP library for Python' }, + ]; + + // Dynamically scan container/skills/ directory + const skillsDir = path.join(process.cwd(), 'container', 'skills'); + const containerSkills: Array<{ name: string; category: string; description: string }> = []; + try { + const dirs = fs.readdirSync(skillsDir, { withFileTypes: true }) + .filter(d => d.isDirectory()) + .map(d => d.name) + .sort(); + for (const dir of dirs) { + const skillMd = path.join(skillsDir, dir, 'SKILL.md'); + let description = ''; + let category = 'Bio Skill'; + if (fs.existsSync(skillMd)) { + const content = fs.readFileSync(skillMd, 'utf-8').slice(0, 500); + const descMatch = content.match(/^description:\s*"?(.+?)"?\s*$/m); + if (descMatch) description = descMatch[1].replace(/^"|"$/g, '').trim(); + } + if (dir === 'agent-browser') category = 'Browser'; + else if (dir.startsWith('bio-')) category = 'Bio Pipeline'; + else if (dir.endsWith('-database')) category = 'Database'; + else if (dir === 'pubmed-search' || dir === 'literature-search') category = 'Literature'; + else if (dir === 'scrna-qc' || dir === 'visium-analysis') category = 'scRNA-seq'; + containerSkills.push({ name: dir, category, description: description || dir.replace(/-/g, ' ') }); + } + } catch { + // skills dir not accessible + } + + return { agentTools, bioCliTools, pythonLibs, containerSkills }; +} + +function json(res: http.ServerResponse, data: unknown, status = 200): void { + const body = JSON.stringify(data); + res.writeHead(status, { 'Content-Type': 'application/json' }); + res.end(body); +} + +function readBody(req: http.IncomingMessage): Promise { + return new Promise((resolve) => { + const chunks: Buffer[] = []; + req.on('data', (d) => chunks.push(d)); + req.on('end', () => resolve(Buffer.concat(chunks).toString('utf-8'))); + }); +} + +async function handleApi( + req: http.IncomingMessage, + res: http.ServerResponse, + pathname: string, +): Promise { + if (pathname === '/api/groups' && req.method === 'GET') { + const groups = getAllRegisteredGroups(); + json(res, groups); + return true; + } + + if (pathname === '/api/tasks' && req.method === 'GET') { + json(res, getAllTasks()); + return true; + } + + if (pathname === '/api/containers' && req.method === 'GET') { + json(res, await getContainers()); + return true; + } + + if (pathname === '/api/stats' && req.method === 'GET') { + const chats = getAllChats(); + const tasks = getAllTasks(); + const groups = getAllRegisteredGroups(); + json(res, { + totalChats: chats.length, + registeredGroups: Object.keys(groups).length, + activeTasks: tasks.filter((t) => t.status === 'active').length, + totalTasks: tasks.length, + }); + return true; + } + + if (pathname === '/api/models' && req.method === 'GET') { + json(res, getModels()); + return true; + } + + if (pathname === '/api/models/test' && req.method === 'POST') { + const body = JSON.parse(await readBody(req)); + json(res, await testModel(body.modelType, body.prompt || 'Reply with exactly one word: ok')); + return true; + } + + if (pathname === '/api/skills' && req.method === 'GET') { + json(res, getSkills()); + return true; + } + + // Task actions + const taskPause = pathname.match(/^\/api\/tasks\/([^/]+)\/pause$/); + if (taskPause && req.method === 'PUT') { + updateTask(taskPause[1], { status: 'paused' }); + json(res, { ok: true }); + return true; + } + + const taskResume = pathname.match(/^\/api\/tasks\/([^/]+)\/resume$/); + if (taskResume && req.method === 'PUT') { + updateTask(taskResume[1], { status: 'active' }); + json(res, { ok: true }); + return true; + } + + const taskDelete = pathname.match(/^\/api\/tasks\/([^/]+)$/); + if (taskDelete && req.method === 'DELETE') { + deleteTask(taskDelete[1]); + json(res, { ok: true }); + return true; + } + + const taskLogs = pathname.match(/^\/api\/task-logs\/([^/]+)$/); + if (taskLogs && req.method === 'GET') { + json(res, getTaskRunLogs(taskLogs[1], 50)); + return true; + } + + // SSE log stream + if (pathname === '/api/logs' && req.method === 'GET') { + res.writeHead(200, { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + Connection: 'keep-alive', + }); + res.flushHeaders?.(); + + // Send last 200 lines immediately + const recent = tailFile(LOG_FILE, 200); + for (const line of recent) { + res.write(`data: ${JSON.stringify(line)}\n\n`); + } + + sseClients.add(res); + + // Also tail the file in case bioclaw logs don't hit broadcastLogLine + let filePos = fs.existsSync(LOG_FILE) ? fs.statSync(LOG_FILE).size : 0; + const watchInterval = setInterval(() => { + if (!fs.existsSync(LOG_FILE)) return; + const stat = fs.statSync(LOG_FILE); + if (stat.size <= filePos) return; + const fd = fs.openSync(LOG_FILE, 'r'); + const readLen = stat.size - filePos; + const buf = Buffer.alloc(readLen); + fs.readSync(fd, buf, 0, readLen, filePos); + fs.closeSync(fd); + filePos = stat.size; + const lines = buf.toString('utf-8').split('\n').filter(Boolean); + for (const line of lines) { + try { + res.write(`data: ${JSON.stringify(line)}\n\n`); + } catch { + /* client disconnected */ + } + } + }, 1000); + + req.on('close', () => { + clearInterval(watchInterval); + sseClients.delete(res); + }); + return true; + } + + return false; +} + +function handleRequest( + req: http.IncomingMessage, + res: http.ServerResponse, +): void { + const url = new URL(req.url || '/', `http://localhost`); + const pathname = url.pathname; + + res.setHeader('Access-Control-Allow-Origin', '*'); + res.setHeader('Access-Control-Allow-Methods', 'GET, PUT, DELETE'); + + if (req.method === 'OPTIONS') { + res.writeHead(204); + res.end(); + return; + } + + handleApi(req, res, pathname).then((handled) => { + if (!handled) { + if (pathname === '/' && req.method === 'GET') { + const html = fs.readFileSync(HTML_FILE, 'utf-8'); + res.writeHead(200, { 'Content-Type': 'text/html; charset=utf-8' }); + res.end(html); + } else { + res.writeHead(404, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: 'not found' })); + } + } + }).catch((err) => { + logger.error({ err }, 'Dashboard request error'); + if (!res.headersSent) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify({ error: String(err) })); + } + }); +} + +export function startDashboard(): void { + const server = http.createServer(handleRequest); + server.listen(DASHBOARD_PORT, '127.0.0.1', () => { + logger.info({ port: DASHBOARD_PORT }, `Dashboard: http://127.0.0.1:${DASHBOARD_PORT}`); + }); + server.on('error', (err) => { + logger.error({ err }, 'Dashboard server error'); + }); +} diff --git a/src/db.ts b/src/db.ts index c1daa5b..0898090 100644 --- a/src/db.ts +++ b/src/db.ts @@ -3,7 +3,7 @@ import fs from 'fs'; import path from 'path'; import { DATA_DIR, STORE_DIR } from './config.js'; -import { NewMessage, RegisteredGroup, ScheduledTask, TaskRunLog } from './types.js'; +import { ContainerConfig, NewMessage, RegisteredGroup, ScheduledTask, TaskRunLog } from './types.js'; let db: Database.Database; @@ -393,6 +393,14 @@ export function updateTaskAfterRun( ).run(nextRun, now, lastResult, nextRun, id); } +export function getTaskRunLogs(taskId: string, limit = 20): TaskRunLog[] { + return db + .prepare( + 'SELECT * FROM task_run_logs WHERE task_id = ? ORDER BY run_at DESC LIMIT ?', + ) + .all(taskId, limit) as TaskRunLog[]; +} + export function logTaskRun(log: TaskRunLog): void { db.prepare( ` @@ -452,6 +460,21 @@ export function getAllSessions(): Record { // --- Registered group accessors --- +function parseConfigBlob(raw: string | null): { containerConfig?: ContainerConfig; agentType?: string; notifyUser?: string } { + if (!raw) return {}; + try { + const parsed = JSON.parse(raw); + // New format: { containerConfig, agentType } + if ('containerConfig' in parsed || 'agentType' in parsed) { + return parsed; + } + // Old format: containerConfig directly + return { containerConfig: parsed }; + } catch { + return {}; + } +} + export function getRegisteredGroup( jid: string, ): (RegisteredGroup & { jid: string }) | undefined { @@ -469,16 +492,17 @@ export function getRegisteredGroup( } | undefined; if (!row) return undefined; + const { containerConfig, agentType, notifyUser } = parseConfigBlob(row.container_config); return { jid: row.jid, name: row.name, folder: row.folder, trigger: row.trigger_pattern, added_at: row.added_at, - containerConfig: row.container_config - ? JSON.parse(row.container_config) - : undefined, + containerConfig, requiresTrigger: row.requires_trigger === null ? undefined : row.requires_trigger === 1, + agentType: agentType as 'claude' | 'minimax' | 'qwen' | undefined, + notifyUser, }; } @@ -486,6 +510,9 @@ export function setRegisteredGroup( jid: string, group: RegisteredGroup, ): void { + const configBlob = (group.containerConfig || group.agentType || group.notifyUser) + ? JSON.stringify({ containerConfig: group.containerConfig, agentType: group.agentType, notifyUser: group.notifyUser }) + : null; db.prepare( `INSERT OR REPLACE INTO registered_groups (jid, name, folder, trigger_pattern, added_at, container_config, requires_trigger) VALUES (?, ?, ?, ?, ?, ?, ?)`, @@ -495,7 +522,7 @@ export function setRegisteredGroup( group.folder, group.trigger, group.added_at, - group.containerConfig ? JSON.stringify(group.containerConfig) : null, + configBlob, group.requiresTrigger === undefined ? 1 : group.requiresTrigger ? 1 : 0, ); } @@ -514,15 +541,16 @@ export function getAllRegisteredGroups(): Record { }>; const result: Record = {}; for (const row of rows) { + const { containerConfig, agentType, notifyUser } = parseConfigBlob(row.container_config); result[row.jid] = { name: row.name, folder: row.folder, trigger: row.trigger_pattern, added_at: row.added_at, - containerConfig: row.container_config - ? JSON.parse(row.container_config) - : undefined, + containerConfig, requiresTrigger: row.requires_trigger === null ? undefined : row.requires_trigger === 1, + agentType: agentType as 'claude' | 'minimax' | 'qwen' | undefined, + notifyUser, }; } return result; @@ -582,3 +610,31 @@ function migrateJsonState(): void { } } } + +// --- Dashboard stats --- + +export interface ActivityStats { + messages: Array<{ day: string; cnt: number }>; + tasks: Array<{ day: string; total: number; success: number; avg_ms: number }>; + taskTotals: { total: number; success: number; avg_ms: number; max_ms: number }; +} + +export function getActivityStats(days: number): ActivityStats { + const since = new Date(Date.now() - days * 24 * 60 * 60 * 1000).toISOString(); + const messages = db.prepare( + `SELECT date(timestamp) as day, COUNT(*) as cnt FROM messages WHERE timestamp > ? GROUP BY day ORDER BY day`, + ).all(since) as Array<{ day: string; cnt: number }>; + const tasks = db.prepare( + `SELECT date(run_at) as day, COUNT(*) as total, SUM(CASE WHEN status='success' THEN 1 ELSE 0 END) as success, CAST(AVG(duration_ms) AS INTEGER) as avg_ms FROM task_run_logs WHERE run_at > ? GROUP BY day ORDER BY day`, + ).all(since) as Array<{ day: string; total: number; success: number; avg_ms: number }>; + const taskTotals = db.prepare( + `SELECT COUNT(*) as total, SUM(CASE WHEN status='success' THEN 1 ELSE 0 END) as success, CAST(AVG(duration_ms) AS INTEGER) as avg_ms, CAST(MAX(duration_ms) AS INTEGER) as max_ms FROM task_run_logs WHERE run_at > ?`, + ).get(since) as { total: number; success: number; avg_ms: number; max_ms: number } | undefined; + return { messages, tasks, taskTotals: taskTotals || { total: 0, success: 0, avg_ms: 0, max_ms: 0 } }; +} + +export function getGroupMessageStats(): Array<{ chat_jid: string; msg_count: number; last_msg: string }> { + return db.prepare( + `SELECT chat_jid, COUNT(*) as msg_count, MAX(timestamp) as last_msg FROM messages GROUP BY chat_jid`, + ).all() as Array<{ chat_jid: string; msg_count: number; last_msg: string }>; +} diff --git a/src/index.ts b/src/index.ts index 1a400d1..7d75340 100644 --- a/src/index.ts +++ b/src/index.ts @@ -8,9 +8,12 @@ import { IDLE_TIMEOUT, MAIN_GROUP_FOLDER, POLL_INTERVAL, + TELEGRAM_BOT_TOKEN, + TELEGRAM_ONLY, TRIGGER_PATTERN, } from './config.js'; import { WhatsAppChannel } from './channels/whatsapp.js'; +import { TelegramChannel } from './channels/telegram.js'; import { ContainerOutput, runContainerAgent, @@ -34,9 +37,10 @@ import { } from './db.js'; import { GroupQueue } from './group-queue.js'; import { startIpcWatcher } from './ipc.js'; -import { formatMessages, formatOutbound } from './router.js'; +import { findChannel, formatMessages, formatOutbound } from './router.js'; import { startSchedulerLoop } from './task-scheduler.js'; -import { NewMessage, RegisteredGroup } from './types.js'; +import { startDashboard } from './dashboard.js'; +import { Channel, NewMessage, RegisteredGroup } from './types.js'; import { logger } from './logger.js'; // Re-export for backwards compatibility during refactor @@ -49,6 +53,7 @@ let lastAgentTimestamp: Record = {}; let messageLoopRunning = false; let whatsapp: WhatsAppChannel; +const channels: Channel[] = []; const queue = new GroupQueue(); function loadState(): void { @@ -99,7 +104,7 @@ export function getAvailableGroups(): import('./container-runner.js').AvailableG const registeredJids = new Set(Object.keys(registeredGroups)); return chats - .filter((c) => c.jid !== '__group_sync__' && c.jid.endsWith('@g.us')) + .filter((c) => c.jid !== '__group_sync__' && (c.jid.endsWith('@g.us') || c.jid.startsWith('tg:'))) .map((c) => ({ jid: c.jid, name: c.name, @@ -165,7 +170,8 @@ async function processGroupMessages(chatJid: string): Promise { }, IDLE_TIMEOUT); }; - await whatsapp.setTyping(chatJid, true); + const channel = findChannel(channels, chatJid); + await channel?.setTyping?.(chatJid, true); let hadError = false; let outputSentToUser = false; @@ -176,9 +182,12 @@ async function processGroupMessages(chatJid: string): Promise { // Strip ... blocks — agent uses these for internal reasoning const text = raw.replace(/[\s\S]*?<\/internal>/g, '').trim(); logger.info({ group: group.name }, `Agent output: ${raw.slice(0, 200)}`); - if (text) { - await whatsapp.sendMessage(chatJid, `${ASSISTANT_NAME}: ${text}`); - outputSentToUser = true; + if (text && channel) { + const formatted = formatOutbound(channel, text); + if (formatted) { + await channel.sendMessage(chatJid, formatted); + outputSentToUser = true; + } } // Only reset idle timer on actual results, not session-update markers (result: null) resetIdleTimer(); @@ -189,7 +198,7 @@ async function processGroupMessages(chatJid: string): Promise { } }); - await whatsapp.setTyping(chatJid, false); + await channel?.setTyping?.(chatJid, false); if (idleTimer) clearTimeout(idleTimer); if (output === 'error' || hadError) { @@ -439,21 +448,35 @@ async function main(): Promise { const shutdown = async (signal: string) => { logger.info({ signal }, 'Shutdown signal received'); await queue.shutdown(10000); - await whatsapp.disconnect(); + for (const ch of channels) await ch.disconnect(); process.exit(0); }; process.on('SIGTERM', () => shutdown('SIGTERM')); process.on('SIGINT', () => shutdown('SIGINT')); - // Create WhatsApp channel - whatsapp = new WhatsAppChannel({ - onMessage: (chatJid, msg) => storeMessage(msg), - onChatMetadata: (chatJid, timestamp) => storeChatMetadata(chatJid, timestamp), + const channelOpts = { + onMessage: (chatJid: string, msg: NewMessage) => storeMessage(msg), + onChatMetadata: (chatJid: string, timestamp: string, name?: string) => + storeChatMetadata(chatJid, timestamp, name), registeredGroups: () => registeredGroups, - }); + }; + + // Create and connect channels + if (!TELEGRAM_ONLY) { + whatsapp = new WhatsAppChannel({ + onMessage: channelOpts.onMessage, + onChatMetadata: (chatJid, timestamp) => storeChatMetadata(chatJid, timestamp), + registeredGroups: () => registeredGroups, + }); + channels.push(whatsapp); + await whatsapp.connect(); + } - // Connect — resolves when first connected - await whatsapp.connect(); + if (TELEGRAM_BOT_TOKEN) { + const telegram = new TelegramChannel(TELEGRAM_BOT_TOKEN, channelOpts); + channels.push(telegram); + await telegram.connect(); + } // Start subsystems (independently of connection handler) startSchedulerLoop({ @@ -462,22 +485,29 @@ async function main(): Promise { queue, onProcess: (groupJid, proc, containerName, groupFolder) => queue.registerProcess(groupJid, proc, containerName, groupFolder), sendMessage: async (jid, rawText) => { - const text = formatOutbound(whatsapp, rawText); - if (text) await whatsapp.sendMessage(jid, text); + const ch = findChannel(channels, jid); + if (!ch) return; + const text = formatOutbound(ch, rawText); + if (text) await ch.sendMessage(jid, text); }, }); startIpcWatcher({ - sendMessage: (jid, text) => whatsapp.sendMessage(jid, text), - sendImage: (jid, imagePath, caption) => whatsapp.sendImage(jid, imagePath, caption), + sendMessage: (jid, text) => { + const ch = findChannel(channels, jid); + if (!ch) throw new Error(`No channel for JID: ${jid}`); + return ch.sendMessage(jid, text); + }, + sendImage: (jid, imagePath, caption) => whatsapp?.sendImage(jid, imagePath, caption), registeredGroups: () => registeredGroups, registerGroup, - syncGroupMetadata: (force) => whatsapp.syncGroupMetadata(force), + syncGroupMetadata: (force) => whatsapp?.syncGroupMetadata(force) ?? Promise.resolve(), getAvailableGroups, writeGroupsSnapshot: (gf, im, ag, rj) => writeGroupsSnapshot(gf, im, ag, rj), }); queue.setProcessMessagesFn(processGroupMessages); recoverPendingMessages(); startMessageLoop(); + startDashboard(); } // Guard: only run when executed directly, not when imported by tests diff --git a/src/types.ts b/src/types.ts index 4a7571a..97af37a 100644 --- a/src/types.ts +++ b/src/types.ts @@ -39,6 +39,8 @@ export interface RegisteredGroup { added_at: string; containerConfig?: ContainerConfig; requiresTrigger?: boolean; // Default: true for groups, false for solo chats + agentType?: 'claude' | 'minimax' | 'qwen'; + notifyUser?: string; } export interface NewMessage { diff --git a/start-dashboard.sh b/start-dashboard.sh new file mode 100755 index 0000000..9e8fa98 --- /dev/null +++ b/start-dashboard.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# If bioclaw is already running on 3847 (via launchd), just wait. +# Otherwise start the dev server. +if lsof -i :3847 -sTCP:LISTEN -t > /dev/null 2>&1; then + echo "BioClaw dashboard already running on port 3847" + # Keep process alive so preview tool considers it running + while true; do sleep 60; done +else + cd "$(dirname "$0")" + exec npm run dev +fi diff --git a/start.sh b/start.sh new file mode 100755 index 0000000..4b7d415 --- /dev/null +++ b/start.sh @@ -0,0 +1,6 @@ +#!/bin/bash +export TELEGRAM_BOT_TOKEN=8607195188:AAHK4XLjSO4OkfzhSmfebs4cko4AJjlcYr0 +export TELEGRAM_ONLY=true +export HOME=/Users/fanglinqiang +cd /Users/fanglinqiang/Desktop/nanoclaw/bioclaw +exec /Users/fanglinqiang/.nvm/versions/node/v24.14.0/bin/node dist/index.js From 84bdefaa7edc52c419c0e7c4328ba00c546a317c Mon Sep 17 00:00:00 2001 From: Fanglinqiang Date: Sun, 15 Mar 2026 16:01:11 +0800 Subject: [PATCH 02/15] remove start.sh: contains local paths, not suitable for public repo Co-Authored-By: Claude Sonnet 4.6 --- start.sh | 6 ------ 1 file changed, 6 deletions(-) delete mode 100755 start.sh diff --git a/start.sh b/start.sh deleted file mode 100755 index 4b7d415..0000000 --- a/start.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -export TELEGRAM_BOT_TOKEN=8607195188:AAHK4XLjSO4OkfzhSmfebs4cko4AJjlcYr0 -export TELEGRAM_ONLY=true -export HOME=/Users/fanglinqiang -cd /Users/fanglinqiang/Desktop/nanoclaw/bioclaw -exec /Users/fanglinqiang/.nvm/versions/node/v24.14.0/bin/node dist/index.js From 20ec825c5a964137b7650ab4879f0408ae806ddb Mon Sep 17 00:00:00 2001 From: Fanglinqiang Date: Sun, 15 Mar 2026 16:01:36 +0800 Subject: [PATCH 03/15] gitignore: exclude local start.sh Co-Authored-By: Claude Sonnet 4.6 --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 5f6a0fe..8b234d6 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,9 @@ groups/global/* *.keys.json .env +# Local startup scripts (contain machine-specific paths) +start.sh + # OS .DS_Store From a64d0f3fef1af59640454c9964bac483b345a6a3 Mon Sep 17 00:00:00 2001 From: Fanglinqiang Date: Sun, 15 Mar 2026 16:05:18 +0800 Subject: [PATCH 04/15] docs: fix Tasks tab description in dashboard section Co-Authored-By: Claude Sonnet 4.6 --- README.md | 2 +- README.zh-CN.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 858d5e7..6676b6d 100644 --- a/README.md +++ b/README.md @@ -288,7 +288,7 @@ BioClaw includes a built-in web dashboard accessible at `http://localhost:3847` |-----|-------------| | **Overview** | Live stats: message count, task runs, connected groups, registered models and skills | | **Groups** | All WhatsApp/Telegram groups with message count and last-activity time | -| **Tasks** | Scheduled task list — create, pause, resume, and cancel tasks | +| **Tasks** | Scheduled task list — view, pause, resume, and cancel tasks | | **Stats** | Activity charts (messages per day, task runs per day, success rate, avg/max duration) with 7d/14d/30d period selector | | **Alerts** | Alert rules based on group silence thresholds — see which rules are currently firing | | **Settings** | Environment configuration viewer | diff --git a/README.zh-CN.md b/README.zh-CN.md index f98ed7b..c9817bc 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -65,7 +65,7 @@ BioClaw 内置网页控制台,启动后访问 `http://localhost:3847`(端口 |------|------| | **概览(Overview)** | 实时统计:消息数、任务运行次数、已连接群组数、已注册模型与技能数 | | **群组(Groups)** | 所有 WhatsApp/Telegram 群组,显示消息量和最后活跃时间 | -| **任务(Tasks)** | 定时任务列表——新建、暂停、恢复、取消任务 | +| **任务(Tasks)** | 定时任务列表——查看、暂停、恢复、取消任务 | | **统计(Stats)** | 活动图表:每日消息量、每日任务运行量、成功率、平均/最大耗时;支持 7 天/14 天/30 天周期选择 | | **告警(Alerts)** | 基于群组静默阈值的告警规则,显示当前触发状态 | | **设置(Settings)** | 环境变量配置查看 | From 85250893dd255784d1950560d9cce4208b33ca64 Mon Sep 17 00:00:00 2001 From: Fanglinqiang Date: Sun, 15 Mar 2026 16:42:37 +0800 Subject: [PATCH 05/15] feat: add bio-research-pipeline skill for hypothesis generation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Multi-agent biological research pipeline: - Stage 1: Parallel literature search (PubMed + bioRxiv/medRxiv + KEGG/Reactome) - Stage 2: Pathway synthesis + ≥5 mechanistic hypothesis generation - Stage 3: 3-role debate per hypothesis (Supporter / Skeptic / Methodologist) - Stage 4: Top-3 refinement with full molecular detail - Stage 5: Wet-lab experimental plan per hypothesis (controls, timeline, risks) Helper scripts: pubmed-fetch, preprint-fetch, pathway-search Dockerfile: add litellm + AutoResearchClaw (future use) Co-Authored-By: Claude Sonnet 4.6 --- container/Dockerfile | 6 + .../skills/bio-research-pipeline/SKILL.md | 454 ++++++++++++++++++ .../scripts/pathway-search | 256 ++++++++++ .../scripts/preprint-fetch | 183 +++++++ .../scripts/pubmed-fetch | 158 ++++++ 5 files changed, 1057 insertions(+) create mode 100644 container/skills/bio-research-pipeline/SKILL.md create mode 100755 container/skills/bio-research-pipeline/scripts/pathway-search create mode 100755 container/skills/bio-research-pipeline/scripts/preprint-fetch create mode 100755 container/skills/bio-research-pipeline/scripts/pubmed-fetch diff --git a/container/Dockerfile b/container/Dockerfile index 46cb6ce..3b08e1a 100644 --- a/container/Dockerfile +++ b/container/Dockerfile @@ -68,6 +68,12 @@ RUN pip3 install --no-cache-dir --break-system-packages \ requests \ multiqc +# Install AutoResearchClaw and LiteLLM (OpenAI-compat proxy for Anthropic API) +RUN pip3 install --no-cache-dir --break-system-packages \ + litellm \ + pyyaml \ + git+https://github.com/aiming-lab/AutoResearchClaw.git + # Install PyMOL (headless) via apt RUN apt-get update && apt-get install -y \ pymol \ diff --git a/container/skills/bio-research-pipeline/SKILL.md b/container/skills/bio-research-pipeline/SKILL.md new file mode 100644 index 0000000..ea1f782 --- /dev/null +++ b/container/skills/bio-research-pipeline/SKILL.md @@ -0,0 +1,454 @@ +--- +name: bio-research-pipeline +description: > + Biological hypothesis generation pipeline. Given a broad research direction, + runs parallel literature searches (PubMed + preprints + pathway DBs), generates + ≥5 mechanistic hypotheses, conducts multi-agent debate to select top 3, then + designs wet-lab experimental plans for each. Outputs a structured research brief. +keywords: + - bio-research-pipeline + - hypothesis-generation + - literature-review + - experimental-design + - wet-lab + - multi-agent-debate + - pathway-analysis +--- + +# Biological Research Hypothesis Pipeline + +You are a research coordinator orchestrating a multi-stage biological research pipeline. +This skill is triggered when a user provides a broad biological research direction and wants: +- A thorough literature review +- Multiple mechanistic hypotheses +- Multi-perspective critique and ranking +- Wet-lab experimental designs + +--- + +## SCRIPT PATH RESOLUTION + +This skill includes three helper scripts. Before running them, locate them: + +```bash +SKILL_DIR=$(find ~/.claude/skills -name "SKILL.md" -path "*/bio-research-pipeline/*" | xargs dirname 2>/dev/null | head -1) +PUBMED_SCRIPT="$SKILL_DIR/scripts/pubmed-fetch" +PREPRINT_SCRIPT="$SKILL_DIR/scripts/preprint-fetch" +PATHWAY_SCRIPT="$SKILL_DIR/scripts/pathway-search" +echo "Skill dir: $SKILL_DIR" +ls "$SKILL_DIR/scripts/" +``` + +Then invoke scripts as: +```bash +python3 "$PUBMED_SCRIPT" "your topic" --max 40 --years 5 +python3 "$PREPRINT_SCRIPT" "your topic" --max 30 --days 180 +python3 "$PATHWAY_SCRIPT" "your topic" --gene GENE_SYMBOL +``` + +--- + +## HOW TO INVOKE THIS PIPELINE + +When a user asks something like: +- "帮我研究一下 [方向]" +- "我想研究 [X] 机制,帮我提假说" +- "针对 [疾病/通路/基因],设计一个研究方向" +- "run bio-research-pipeline on [topic]" + +Parse the research direction from the user's message, then execute all 5 stages below **in order**. + +--- + +## STAGE 1 — PARALLEL LITERATURE SEARCH + +**Goal:** Cast a wide net across three complementary literature sources simultaneously. + +Use the `Task` tool to launch **3 parallel search tasks**. Do NOT wait for one before starting the next — launch all three in the same message. + +### Task A — PubMed Mechanistic Search + +Prompt for Task A: +``` +You are a PubMed literature specialist. Search PubMed for papers about: {RESEARCH_DIRECTION} + +Run the following Python script to fetch results: + +```python +from Bio import Entrez +import json, sys + +Entrez.email = "bioclaw-agent@research.ai" + +# Build search query — include MeSH terms if applicable +query = "{RESEARCH_DIRECTION}[Title/Abstract] AND (mechanism OR pathway OR signaling OR molecular)" +handle = Entrez.esearch(db="pubmed", term=query, retmax=40, sort="relevance", + datetype="pdat", mindate="2020", maxdate="2025") +record = Entrez.read(handle) +ids = record["IdList"] + +# Fetch abstracts +handle2 = Entrez.efetch(db="pubmed", id=",".join(ids[:30]), rettype="abstract", retmode="text") +abstracts = handle2.read() +print(abstracts[:15000]) +``` + +Then summarize: +1. Key molecular mechanisms mentioned +2. Key proteins/genes involved +3. Key signaling pathways implicated +4. Most cited findings (appear in multiple papers) +5. Contradictions or debates in the literature + +Output as structured text with section headers. +``` + +### Task B — Preprint Search (bioRxiv / medRxiv) + +Prompt for Task B: +``` +You are a preprint literature specialist. Find the latest cutting-edge preprints about: {RESEARCH_DIRECTION} + +Step 1 — Search bioRxiv API: +```python +import requests, json + +topic = "{RESEARCH_DIRECTION}" +# bioRxiv API — last 180 days +url = f"https://api.biorxiv.org/details/biorxiv/2024-09-01/2025-03-15/0/json" +r = requests.get(url, timeout=30) +data = r.json() + +# Filter by keyword relevance +keywords = topic.lower().split() +relevant = [] +for paper in data.get("collection", []): + title = paper.get("title", "").lower() + abstract = paper.get("abstract", "").lower() + if any(kw in title or kw in abstract for kw in keywords): + relevant.append({ + "title": paper["title"], + "authors": paper.get("authors", ""), + "date": paper.get("date", ""), + "abstract": paper.get("abstract", "")[:500], + "doi": paper.get("doi", "") + }) + +print(json.dumps(relevant[:20], indent=2, ensure_ascii=False)) +``` + +Step 2 — Use WebSearch to find additional preprints: +Search: "{RESEARCH_DIRECTION} site:biorxiv.org OR site:medrxiv.org 2024 2025" + +Summarize: +1. Emerging findings not yet in peer-reviewed journals +2. Novel methodologies being applied +3. Preliminary data suggesting new directions +4. Discrepancies with established literature + +Output as structured text. +``` + +### Task C — Reviews + Pathway Databases + +Prompt for Task C: +``` +You are a pathway and review specialist. Map the known biology for: {RESEARCH_DIRECTION} + +Step 1 — Search for review articles: +```python +from Bio import Entrez +import json + +Entrez.email = "bioclaw-agent@research.ai" +query = "{RESEARCH_DIRECTION}[Title/Abstract] AND (Review[pt] OR systematic review OR meta-analysis)" +handle = Entrez.esearch(db="pubmed", term=query, retmax=20, sort="relevance") +record = Entrez.read(handle) +ids = record["IdList"] +handle2 = Entrez.efetch(db="pubmed", id=",".join(ids[:15]), rettype="abstract", retmode="text") +print(handle2.read()[:10000]) +``` + +Step 2 — Query KEGG pathway API: +```python +import requests + +# Search KEGG for relevant pathways +topic_keywords = "{RESEARCH_DIRECTION}".split()[:3] +for kw in topic_keywords: + r = requests.get(f"https://rest.kegg.jp/find/pathway/{kw}", timeout=15) + if r.status_code == 200 and r.text.strip(): + print(f"KEGG pathways for '{kw}':") + print(r.text[:2000]) +``` + +Step 3 — Use WebSearch to find Reactome pathway information: +Search: "{RESEARCH_DIRECTION} Reactome pathway 2024" + +Synthesize: +1. Established pathway map (which pathways are involved) +2. Key regulatory nodes (master regulators, feedback loops) +3. Known therapeutic targets in these pathways +4. Gaps in current knowledge (explicitly stated in reviews) + +Output as structured text. +``` + +**After launching all 3 tasks**, collect results with `TaskOutput` for each task ID. Wait for all 3 to complete. + +--- + +## STAGE 2 — PATHWAY SYNTHESIS + HYPOTHESIS GENERATION + +**Goal:** Synthesize the 3 literature sources into a pathway map, then generate ≥5 mechanistic hypotheses. + +### 2a. Build Pathway Map + +From the 3 task outputs, extract: +- All mentioned proteins/genes → list with roles +- All mentioned pathways → list with descriptions +- Key interactions (A activates B, X inhibits Y) +- Unresolved questions explicitly mentioned in papers + +### 2b. Generate ≥5 Hypotheses + +For each hypothesis, output a structured block: + +``` +HYPOTHESIS [N]: [One-sentence title] + +Mechanism: + [2-3 sentences describing the molecular mechanism step by step] + e.g. "We propose that [A] activates [B] under [condition X], which leads to [downstream effect Y] + via [pathway Z]. This is supported by [evidence 1] but has not been directly tested in [context]." + +Key molecular players: + - [Gene/Protein 1]: [role] + - [Gene/Protein 2]: [role] + - [Pathway]: [how it's involved] + +Supporting evidence: + - [Paper/finding that supports this] + - [Observation that is consistent with this] + +Evidence gaps (why this is a hypothesis, not established fact): + - [What has NOT been shown] + - [Conflicting data, if any] + +Novelty score (1-10): [score] +Reason: [why this is or isn't novel] + +Testability score (1-10): [score] +Reason: [how difficult it would be to test with standard wet lab methods] +``` + +Generate hypotheses that: +- Cover **different mechanistic angles** (not just variations of the same idea) +- Range from **conservative** (well-supported, incremental) to **bold** (less evidence, high impact) +- Are **wet-lab testable** (avoid purely computational hypotheses) + +--- + +## STAGE 3 — MULTI-AGENT DEBATE + +**Goal:** Critically evaluate each hypothesis from 3 perspectives to identify the strongest ones. + +For each hypothesis, conduct a structured 3-voice review. You will play each role in sequence: + +``` +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +DEBATE — HYPOTHESIS [N]: [title] +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +🟢 SUPPORTER (argues FOR this hypothesis): + Strongest evidence points: + - [evidence 1] + - [evidence 2] + Why this mechanism is biologically plausible: + - [mechanistic reasoning] + Potential impact if confirmed: + - [scientific/clinical significance] + +🔴 SKEPTIC (argues AGAINST / identifies weaknesses): + Critical weaknesses: + - [flaw 1: e.g., "The key evidence comes from in vitro studies only"] + - [flaw 2: e.g., "Alternative explanation: this effect may be due to [X] instead"] + Confounding factors not accounted for: + - [confounder] + Prior work that challenges this: + - [conflicting evidence or null results] + +🔵 METHODOLOGIST (evaluates experimental feasibility): + To directly test this hypothesis, you would need: + - [key experiment] + Technical challenges: + - [challenge 1] + - [challenge 2] + Timeline estimate: [weeks/months] + Whether a typical university wet lab can do this: [Yes/No/Partially] + Model system recommendation: [cell line / mouse model / organoid / etc.] + +DEBATE VERDICT: + Evidence score (1-10): [score] — How well-supported is it currently? + Novelty score (1-10): [score] — How new is this idea? + Feasibility score (1-10): [score] — Can a wet lab test it in <12 months? + Impact score (1-10): [score] — How significant if confirmed? + + COMPOSITE SCORE: [average, weighted: Evidence×0.3 + Novelty×0.25 + Feasibility×0.25 + Impact×0.2] +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +``` + +Run this for ALL hypotheses. Then rank by composite score and select **TOP 3**. + +--- + +## STAGE 4 — TOP 3 REFINEMENT + +For each of the top 3 hypotheses, expand the mechanism with full molecular detail: + +``` +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +TOP [1/2/3]: [Hypothesis title] +Final composite score: [X.X/10] +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +REFINED MECHANISM: + [4-6 sentences with full molecular detail] + Include: upstream triggers, key effectors, downstream consequences, feedback regulation + +PATHWAY DIAGRAM (text-based): + [Stimulus/Condition] + ↓ + [Receptor/Sensor] → activates → [Kinase/TF] + ↓ + [Key effector] + ↓ (promotes) ↓ (inhibits) + [Outcome A] [Outcome B] + +KEY UNKNOWNS to be resolved by experiments: + 1. [Unknown 1] + 2. [Unknown 2] + 3. [Unknown 3] +``` + +--- + +## STAGE 5 — WET LAB EXPERIMENTAL DESIGN + +For each of the top 3 hypotheses, design a complete wet-lab experimental plan: + +``` +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +EXPERIMENTAL PLAN — [Hypothesis title] +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +RECOMMENDED MODEL SYSTEM: + Primary: [e.g., HEK293T cells / primary mouse hepatocytes / C57BL/6 mice] + Rationale: [why this model is appropriate] + Alternative: [backup model if primary unavailable] + +EXPERIMENT 1 — [Core test of the central claim] + Objective: [what this experiment proves or disproves] + + Method: + 1. [Step 1] + 2. [Step 2] + 3. [Step 3] + + Key reagents: + - [Antibody/siRNA/inhibitor/construct needed] + - [Source: commercial/need to generate] + + Readout: [what you measure — Western blot / qPCR / immunofluorescence / etc.] + + Expected result if hypothesis is TRUE: + - [specific measurable outcome, e.g., "50%+ increase in phospho-X levels"] + + Expected result if hypothesis is FALSE: + - [what you'd see instead] + + Controls: + - Positive control: [what and why] + - Negative control: [what and why] + - Technical control: [e.g., loading control, vehicle control] + + Estimated time: [X weeks] + Difficulty: [Easy / Medium / Hard] + +EXPERIMENT 2 — [Validation / Orthogonal approach] + [same structure as Experiment 1] + +EXPERIMENT 3 — [In vivo / disease-relevance test, if applicable] + [same structure — note if this requires animal work / ethics approval] + +DECISION TREE: + If Experiment 1 result is positive → proceed to Experiment 2 + If Experiment 1 result is negative → [interpret: reject hypothesis OR check [alternative explanation]] + If Experiment 2 confirms → [next step: submit for funding / expand to in vivo] + If Experiment 2 conflicts with Experiment 1 → [troubleshoot: check [specific variable]] + +TIMELINE OVERVIEW: + Week 1-2: [setup, reagent procurement] + Week 3-6: [Experiment 1] + Week 7-10: [Experiment 2] + Week 11-16: [Experiment 3, if applicable] + Total estimated time to proof-of-concept: [X months] + +KEY RISKS: + - [Risk 1: e.g., "Primary antibody may not work in mouse samples"] + Mitigation: [e.g., "Order 2 alternative antibodies from different vendors"] + - [Risk 2: e.g., "Model system may not recapitulate in vivo physiology"] + Mitigation: [e.g., "Validate key finding in primary cells"] +``` + +--- + +## FINAL OUTPUT FORMAT + +After completing all 5 stages, produce a summary: + +``` +╔══════════════════════════════════════════════════════╗ +║ RESEARCH BRIEF — [RESEARCH DIRECTION] ║ +║ Generated: [date] ║ +╚══════════════════════════════════════════════════════╝ + +LITERATURE COVERAGE: + PubMed papers reviewed: ~[N] + Preprints reviewed: ~[N] + Key pathways identified: [list] + +ALL HYPOTHESES RANKED: + #1 [score] — [title] + #2 [score] — [title] + #3 [score] — [title] ← TOP 3 + #4 [score] — [title] + #5 [score] — [title] + [#6+ if generated] + +TOP 3 RECOMMENDED FOR INVESTIGATION: + → [Hypothesis 1 title] (strongest evidence + feasible) + → [Hypothesis 2 title] (most novel) + → [Hypothesis 3 title] (highest clinical impact) + +NEXT STEPS: + Immediate (0-1 month): [first experiment to run] + Short-term (1-6 months): [validation plan] + Long-term (6-18 months): [expansion strategy] + +FULL EXPERIMENTAL PLANS: see sections above +``` + +Save the complete output to `/workspace/group/research-brief-[slug].md` where [slug] is a short version of the research direction. + +Tell the user: "研究简报已完成,保存在 research-brief-[slug].md。以下是摘要:" then show the summary block. + +--- + +## IMPORTANT NOTES + +- **Do not skip the debate stage** — the debate is essential to filter weak hypotheses +- **Wet lab focus** — all experimental designs must be physically executable (pipettes, cells, animals), not just computational +- **Be specific** — vague statements like "further research needed" are not acceptable; every gap should map to a specific experiment +- **Cite as you go** — whenever you make a claim, reference which paper or database it came from +- **Chinese output is fine** — if the user wrote in Chinese, respond in Chinese throughout diff --git a/container/skills/bio-research-pipeline/scripts/pathway-search b/container/skills/bio-research-pipeline/scripts/pathway-search new file mode 100755 index 0000000..fa7abee --- /dev/null +++ b/container/skills/bio-research-pipeline/scripts/pathway-search @@ -0,0 +1,256 @@ +#!/usr/bin/env python3 +""" +Pathway database search (KEGG + Reactome + STRING) for bio-research-pipeline. + +Usage: + pathway-search "gene or pathway name" [--gene SYMBOL] [--species hsa|mmu] + +Output: relevant pathways, interacting proteins, and known regulatory relationships +""" + +import sys +import argparse +import json +import time + +try: + import requests +except ImportError: + print("ERROR: requests not installed.", file=sys.stderr) + sys.exit(1) + + +def search_kegg_pathways(query: str, species: str = "hsa") -> list[dict]: + """Search KEGG for pathways matching a keyword.""" + results = [] + + # KEGG find: search pathway database + url = f"https://rest.kegg.jp/find/pathway/{requests.utils.quote(query)}" + try: + r = requests.get(url, timeout=15) + if r.status_code == 200 and r.text.strip(): + for line in r.text.strip().split("\n"): + parts = line.split("\t", 1) + if len(parts) == 2: + path_id, path_name = parts + results.append({"id": path_id, "name": path_name, "source": "KEGG"}) + except Exception as e: + print(f"[WARNING] KEGG pathway search failed: {e}", file=sys.stderr) + + return results[:10] + + +def get_kegg_pathway_genes(pathway_id: str) -> list[str]: + """Get genes in a KEGG pathway.""" + # Convert to species-specific ID if needed + if not pathway_id.startswith("path:"): + pathway_id = f"path:{pathway_id}" + + url = f"https://rest.kegg.jp/get/{pathway_id}" + genes = [] + try: + r = requests.get(url, timeout=15) + if r.status_code == 200: + in_gene_section = False + for line in r.text.split("\n"): + if line.startswith("GENE"): + in_gene_section = True + elif line.startswith("COMPOUND") or line.startswith("REACTION") or line.startswith("///"): + in_gene_section = False + if in_gene_section and line.strip(): + # Extract gene symbol from format: " 1234 GENE_SYMBOL; description" + parts = line.strip().split(";")[0].split() + if len(parts) >= 2: + genes.append(parts[1]) + except Exception as e: + print(f"[WARNING] KEGG gene fetch failed: {e}", file=sys.stderr) + + return genes[:30] + + +def search_reactome(query: str) -> list[dict]: + """Search Reactome for pathways.""" + url = "https://reactome.org/ContentService/search/query" + params = { + "query": query, + "species": "Homo sapiens", + "types": "Pathway", + "cluster": "true", + } + results = [] + + try: + r = requests.get(url, params=params, timeout=20) + if r.status_code == 200: + data = r.json() + entries = data.get("results", [{}])[0].get("entries", []) if data.get("results") else [] + for entry in entries[:8]: + results.append({ + "id": entry.get("stId", ""), + "name": entry.get("name", ""), + "type": entry.get("type", ""), + "species": entry.get("species", ""), + "source": "Reactome", + "url": f"https://reactome.org/PathwayBrowser/#/{entry.get('stId', '')}", + }) + except Exception as e: + print(f"[WARNING] Reactome search failed: {e}", file=sys.stderr) + + return results + + +def search_string_interactions(gene_symbol: str, species: int = 9606, limit: int = 20) -> list[dict]: + """Get protein-protein interactions from STRING.""" + url = "https://string-db.org/api/json/network" + params = { + "identifiers": gene_symbol, + "species": species, + "limit": limit, + "required_score": 700, # high confidence + "caller_identity": "bioclaw-research-pipeline", + } + interactions = [] + + try: + r = requests.get(url, params=params, timeout=20) + if r.status_code == 200: + data = r.json() + for edge in data[:limit]: + interactions.append({ + "protein_a": edge.get("preferredName_A", ""), + "protein_b": edge.get("preferredName_B", ""), + "score": edge.get("score", 0), + "source": "STRING", + }) + except Exception as e: + print(f"[WARNING] STRING search failed: {e}", file=sys.stderr) + + return interactions + + +def search_string_functional(gene_symbol: str, species: int = 9606) -> list[dict]: + """Get functional enrichment for a gene from STRING.""" + url = "https://string-db.org/api/json/functional_annotation" + params = { + "identifiers": gene_symbol, + "species": species, + "caller_identity": "bioclaw-research-pipeline", + } + annotations = [] + + try: + r = requests.get(url, params=params, timeout=20) + if r.status_code == 200: + data = r.json() + for item in data[:15]: + category = item.get("category", "") + if category in ("KEGG", "Reactome", "Process"): + annotations.append({ + "category": category, + "term": item.get("term", ""), + "description": item.get("description", ""), + "fdr": item.get("fdr", 1.0), + }) + except Exception as e: + print(f"[WARNING] STRING functional annotation failed: {e}", file=sys.stderr) + + return annotations + + +def main(): + parser = argparse.ArgumentParser(description="Search pathway databases (KEGG, Reactome, STRING)") + parser.add_argument("query", help="Gene name, protein name, or pathway keyword") + parser.add_argument("--gene", help="Specific gene symbol for STRING interaction search") + parser.add_argument("--species", default="hsa", help="Species code (hsa=human, mmu=mouse)") + parser.add_argument("--json", action="store_true", help="Output raw JSON") + args = parser.parse_args() + + species_ncbi = 9606 if args.species in ("hsa", "human") else 10090 # mouse fallback + + print(f"[Pathway] Searching for: {args.query}", file=sys.stderr) + + # Run searches + kegg_pathways = search_kegg_pathways(args.query, species=args.species) + time.sleep(0.3) + + reactome_pathways = search_reactome(args.query) + time.sleep(0.3) + + gene_symbol = args.gene or args.query.split()[0] + string_interactions = search_string_interactions(gene_symbol, species=species_ncbi) + time.sleep(0.3) + + string_functions = search_string_functional(gene_symbol, species=species_ncbi) if args.gene else [] + + # Get genes for top KEGG pathway + kegg_genes = [] + if kegg_pathways: + top_pathway_id = kegg_pathways[0]["id"] + kegg_genes = get_kegg_pathway_genes(top_pathway_id) + + result = { + "query": args.query, + "kegg_pathways": kegg_pathways, + "kegg_genes_top_pathway": kegg_genes, + "reactome_pathways": reactome_pathways, + "string_interactions": string_interactions, + "string_functional": string_functions, + } + + if args.json: + print(json.dumps(result, ensure_ascii=False, indent=2)) + return + + # Formatted output + print(f"\n{'='*60}") + print(f"PATHWAY DATABASE RESULTS: {args.query}") + print(f"{'='*60}\n") + + # KEGG + if kegg_pathways: + print(f"KEGG PATHWAYS ({len(kegg_pathways)} found):") + for p in kegg_pathways: + print(f" {p['id']:20s} {p['name']}") + if kegg_genes: + print(f"\n Genes in top pathway ({kegg_pathways[0]['name']}):") + print(f" {', '.join(kegg_genes)}") + else: + print("KEGG: No pathways found for this query.") + + print() + + # Reactome + if reactome_pathways: + print(f"REACTOME PATHWAYS ({len(reactome_pathways)} found):") + for p in reactome_pathways: + print(f" [{p['id']}] {p['name']}") + print(f" URL: {p['url']}") + else: + print("Reactome: No pathways found.") + + print() + + # STRING + if string_interactions: + print(f"STRING INTERACTIONS for '{gene_symbol}' (high-confidence, score≥0.7):") + for edge in string_interactions[:15]: + score_pct = int(edge["score"] * 100) + print(f" {edge['protein_a']:12s} ↔ {edge['protein_b']:12s} (confidence: {score_pct}%)") + else: + print(f"STRING: No high-confidence interactions found for '{gene_symbol}'.") + + if string_functions: + print(f"\n Functional annotations:") + for fn in string_functions[:8]: + print(f" [{fn['category']}] {fn['description']} (FDR={fn['fdr']:.2e})") + + print(f"\n{'='*60}") + print("END OF PATHWAY RESULTS") + print(f"{'='*60}") + + print("\nNOTE FOR AGENT: Use these pathways as context when formulating hypotheses.") + print("Cross-reference pathway membership with literature findings to identify key nodes.") + + +if __name__ == "__main__": + main() diff --git a/container/skills/bio-research-pipeline/scripts/preprint-fetch b/container/skills/bio-research-pipeline/scripts/preprint-fetch new file mode 100755 index 0000000..7cb8887 --- /dev/null +++ b/container/skills/bio-research-pipeline/scripts/preprint-fetch @@ -0,0 +1,183 @@ +#!/usr/bin/env python3 +""" +Preprint fetcher (bioRxiv + medRxiv) for bio-research-pipeline. + +Usage: + preprint-fetch "research topic" [--max 30] [--days 180] [--server biorxiv|medrxiv|both] + +Output: structured text summary of relevant preprints +""" + +import sys +import argparse +import json +import re +import time +from datetime import datetime, timedelta +from urllib.parse import quote + +try: + import requests +except ImportError: + print("ERROR: requests not installed. Run: pip3 install requests", file=sys.stderr) + sys.exit(1) + + +BIORXIV_API = "https://api.biorxiv.org/details/biorxiv" +MEDRXIV_API = "https://api.biorxiv.org/details/medrxiv" + + +def fetch_server(api_url: str, start_date: str, end_date: str, cursor: int = 0) -> list[dict]: + url = f"{api_url}/{start_date}/{end_date}/{cursor}/json" + try: + r = requests.get(url, timeout=30) + r.raise_for_status() + data = r.json() + return data.get("collection", []) + except Exception as e: + print(f"[WARNING] API error: {e}", file=sys.stderr) + return [] + + +def score_relevance(paper: dict, keywords: list[str]) -> float: + title = paper.get("title", "").lower() + abstract = paper.get("abstract", "").lower() + category = paper.get("category", "").lower() + + score = 0.0 + for kw in keywords: + kw_lower = kw.lower() + if kw_lower in title: + score += 3.0 + if kw_lower in abstract: + score += 1.0 + if kw_lower in category: + score += 0.5 + + return score + + +def fetch_preprints( + topic: str, + days_back: int = 180, + max_results: int = 30, + server: str = "both", +) -> list[dict]: + end_date = datetime.now().strftime("%Y-%m-%d") + start_date = (datetime.now() - timedelta(days=days_back)).strftime("%Y-%m-%d") + + keywords = [w for w in topic.split() if len(w) > 3] + + print(f"[Preprint] Date range: {start_date} to {end_date}", file=sys.stderr) + print(f"[Preprint] Keywords: {keywords}", file=sys.stderr) + + all_papers = [] + + servers_to_search = [] + if server in ("biorxiv", "both"): + servers_to_search.append(("bioRxiv", BIORXIV_API)) + if server in ("medrxiv", "both"): + servers_to_search.append(("medRxiv", MEDRXIV_API)) + + for server_name, api_url in servers_to_search: + print(f"[Preprint] Fetching from {server_name}...", file=sys.stderr) + + # Fetch up to 200 papers (API returns 100 per page) + for cursor in [0, 100]: + papers = fetch_server(api_url, start_date, end_date, cursor) + if not papers: + break + + # Score and filter + for paper in papers: + score = score_relevance(paper, keywords) + if score > 0: + all_papers.append({ + "server": server_name, + "title": paper.get("title", ""), + "authors": paper.get("authors", ""), + "date": paper.get("date", ""), + "doi": paper.get("doi", ""), + "abstract": paper.get("abstract", ""), + "category": paper.get("category", ""), + "version": paper.get("version", "1"), + "relevance_score": score, + }) + + time.sleep(0.3) # be polite to API + + # Sort by relevance, then by date (most recent first) + all_papers.sort(key=lambda x: (x["relevance_score"], x["date"]), reverse=True) + + return all_papers[:max_results] + + +def main(): + parser = argparse.ArgumentParser(description="Fetch and summarize bioRxiv/medRxiv preprints") + parser.add_argument("topic", help="Research topic") + parser.add_argument("--max", type=int, default=30, help="Max preprints to return (default: 30)") + parser.add_argument("--days", type=int, default=180, help="Days back to search (default: 180)") + parser.add_argument("--server", choices=["biorxiv", "medrxiv", "both"], default="both") + parser.add_argument("--json", action="store_true", help="Output raw JSON") + args = parser.parse_args() + + print(f"[Preprint] Searching for: {args.topic}", file=sys.stderr) + + papers = fetch_preprints( + topic=args.topic, + days_back=args.days, + max_results=args.max, + server=args.server, + ) + + print(f"[Preprint] Found {len(papers)} relevant preprints", file=sys.stderr) + + if args.json: + print(json.dumps(papers, ensure_ascii=False, indent=2)) + return + + print(f"\n{'='*60}") + print(f"PREPRINT RESULTS: {args.topic}") + print(f"Papers found: {len(papers)} | Servers: {args.server} | Period: last {args.days} days") + print(f"{'='*60}\n") + + if not papers: + print("No relevant preprints found for this topic in the specified date range.") + print("Suggestions:") + print(" - Try broader search terms") + print(" - Increase --days parameter") + print(f" - Search directly at https://www.biorxiv.org/search/{quote(args.topic)}") + return + + for i, paper in enumerate(papers, 1): + # Truncate authors + authors = paper["authors"] + if len(authors) > 80: + authors = authors[:80] + "..." + + print(f"[{i}] [{paper['server']}] {paper['title']}") + print(f" {authors}") + print(f" Date: {paper['date']} | Category: {paper['category']} | v{paper['version']}") + print(f" DOI: https://doi.org/{paper['doi']}" if paper['doi'] else " DOI: N/A") + + abstract = paper["abstract"] + if len(abstract) > 450: + abstract = abstract[:450] + "..." + if abstract: + print(f" {abstract}") + + print(f" Relevance score: {paper['relevance_score']:.1f}") + print() + + print(f"\n{'='*60}") + print("END OF PREPRINT RESULTS") + print(f"{'='*60}") + + # Highlight novel findings + print("\nNOTE FOR AGENT: Preprints have NOT been peer-reviewed.") + print("Treat findings as preliminary. Check if any have since been published in journals.") + print(f"Direct search URL: https://www.biorxiv.org/search/{quote(args.topic)}") + + +if __name__ == "__main__": + main() diff --git a/container/skills/bio-research-pipeline/scripts/pubmed-fetch b/container/skills/bio-research-pipeline/scripts/pubmed-fetch new file mode 100755 index 0000000..4441d4b --- /dev/null +++ b/container/skills/bio-research-pipeline/scripts/pubmed-fetch @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 +""" +PubMed literature fetcher for bio-research-pipeline. + +Usage: + pubmed-fetch "research topic" [--max 40] [--years 5] [--mode abstract|full] + +Output: structured text summary of relevant papers +""" + +import sys +import argparse +import json +import re +from datetime import datetime + +try: + from Bio import Entrez, Medline +except ImportError: + print("ERROR: biopython not installed. Run: pip3 install biopython", file=sys.stderr) + sys.exit(1) + +Entrez.email = "bioclaw-agent@research.ai" +Entrez.tool = "BioClaw-ResearchPipeline" + + +def search_pubmed(query: str, max_results: int = 40, years_back: int = 5) -> list[str]: + current_year = datetime.now().year + min_year = current_year - years_back + + handle = Entrez.esearch( + db="pubmed", + term=query, + retmax=max_results, + sort="relevance", + datetype="pdat", + mindate=str(min_year), + maxdate=str(current_year), + ) + record = Entrez.read(handle) + handle.close() + return record["IdList"] + + +def fetch_abstracts(pmids: list[str]) -> list[dict]: + if not pmids: + return [] + + handle = Entrez.efetch( + db="pubmed", + id=",".join(pmids), + rettype="medline", + retmode="text", + ) + records = list(Medline.parse(handle)) + handle.close() + + papers = [] + for rec in records: + papers.append({ + "pmid": rec.get("PMID", ""), + "title": rec.get("TI", "No title"), + "authors": rec.get("AU", [])[:3], # first 3 authors + "journal": rec.get("TA", ""), + "year": rec.get("DP", "")[:4], + "abstract": rec.get("AB", "No abstract available"), + "mesh_terms": rec.get("MH", [])[:10], + "keywords": rec.get("OT", [])[:10], + }) + return papers + + +def extract_key_entities(papers: list[dict]) -> dict: + """Simple heuristic extraction of genes, proteins, pathways from abstracts.""" + all_text = " ".join(p["abstract"] for p in papers).upper() + + # Common pathway keywords + pathways = [] + pathway_keywords = [ + "MAPK", "PI3K", "AKT", "mTOR", "NF-κB", "NFKB", "Wnt", "WNT", + "Notch", "NOTCH", "Hedgehog", "JAK", "STAT", "TGF-β", "TGFB", + "p53", "TP53", "AMPK", "HIF", "VEGF", "TNF", "IL-6", "IL6", + "Hippo", "YAP", "TAZ", "KRAS", "EGFR", "ERK", "JNK", "p38", + "CDK", "RB", "E2F", "Autophagy", "AUTOPHAGY", "Apoptosis", "APOPTOSIS", + "Ferroptosis", "FERROPTOSIS", "Pyroptosis", "Ubiquitin", "UBIQUITIN", + ] + for kw in pathway_keywords: + if kw.upper() in all_text: + pathways.append(kw) + + return {"mentioned_pathways": list(set(pathways))[:20]} + + +def main(): + parser = argparse.ArgumentParser(description="Fetch and summarize PubMed literature") + parser.add_argument("topic", help="Research topic or query string") + parser.add_argument("--max", type=int, default=40, help="Max papers to retrieve (default: 40)") + parser.add_argument("--years", type=int, default=5, help="Years back to search (default: 5)") + parser.add_argument("--json", action="store_true", help="Output raw JSON instead of formatted text") + args = parser.parse_args() + + print(f"[PubMed] Searching for: {args.topic}", file=sys.stderr) + print(f"[PubMed] Parameters: max={args.max}, years_back={args.years}", file=sys.stderr) + + # Build enriched query + base_query = args.topic + mechanism_query = f"({base_query}[Title/Abstract]) AND (mechanism[Title/Abstract] OR pathway[Title/Abstract] OR signaling[Title/Abstract] OR molecular[Title/Abstract])" + + pmids = search_pubmed(mechanism_query, max_results=args.max, years_back=args.years) + print(f"[PubMed] Found {len(pmids)} papers", file=sys.stderr) + + if not pmids: + # Fallback: broader search without mechanism filter + pmids = search_pubmed(base_query, max_results=args.max, years_back=args.years) + print(f"[PubMed] Fallback search: {len(pmids)} papers", file=sys.stderr) + + papers = fetch_abstracts(pmids[:30]) + entities = extract_key_entities(papers) + + if args.json: + print(json.dumps({"papers": papers, "entities": entities}, ensure_ascii=False, indent=2)) + return + + # Formatted output + print(f"\n{'='*60}") + print(f"PUBMED SEARCH RESULTS: {args.topic}") + print(f"Papers retrieved: {len(papers)} | Search period: last {args.years} years") + print(f"{'='*60}\n") + + if entities["mentioned_pathways"]: + print(f"KEY PATHWAYS MENTIONED ACROSS PAPERS:") + print(f" {', '.join(entities['mentioned_pathways'])}\n") + + for i, paper in enumerate(papers, 1): + authors_str = ", ".join(paper["authors"]) if paper["authors"] else "Unknown" + if len(paper["authors"]) >= 3: + authors_str += " et al." + + print(f"[{i}] {paper['title']}") + print(f" {authors_str} | {paper['journal']} {paper['year']} | PMID: {paper['pmid']}") + + # Truncate abstract to 400 chars + abstract = paper["abstract"] + if len(abstract) > 400: + abstract = abstract[:400] + "..." + print(f" {abstract}") + + if paper["mesh_terms"]: + print(f" MeSH: {', '.join(paper['mesh_terms'][:5])}") + print() + + print(f"\n{'='*60}") + print("END OF PUBMED RESULTS") + print(f"{'='*60}") + + +if __name__ == "__main__": + main() From 3b5ab52567abbaf4cdcdca1019fc715646f41678 Mon Sep 17 00:00:00 2001 From: Fanglinqiang Date: Sun, 15 Mar 2026 16:54:39 +0800 Subject: [PATCH 06/15] feat: bio-research-pipeline uses Claude + MiniMax + Qwen in parallel Stage 1 now dispatches to 3 different models simultaneously: - Task A (Claude): real PubMed API + bioRxiv API fetch via Python scripts - Task B (MiniMax): biomedical knowledge analysis via call_minimax MCP tool - Task C (Qwen): pathway landscape + regulatory network via call_qwen + pathway-search script Progress update sent to user after all 3 tasks complete. Co-Authored-By: Claude Sonnet 4.6 --- .../skills/bio-research-pipeline/SKILL.md | 231 ++++++++++-------- 1 file changed, 128 insertions(+), 103 deletions(-) diff --git a/container/skills/bio-research-pipeline/SKILL.md b/container/skills/bio-research-pipeline/SKILL.md index ea1f782..39508eb 100644 --- a/container/skills/bio-research-pipeline/SKILL.md +++ b/container/skills/bio-research-pipeline/SKILL.md @@ -60,141 +60,166 @@ Parse the research direction from the user's message, then execute all 5 stages --- -## STAGE 1 — PARALLEL LITERATURE SEARCH +## STAGE 1 — PARALLEL LITERATURE SEARCH (3 MODELS) -**Goal:** Cast a wide net across three complementary literature sources simultaneously. +**Goal:** Cast a wide net using three models simultaneously — each with a different strength. -Use the `Task` tool to launch **3 parallel search tasks**. Do NOT wait for one before starting the next — launch all three in the same message. +| Role | Model | Responsibility | +|------|-------|---------------| +| Task A | **Claude** (you) | Real data fetch — PubMed API + bioRxiv API, runs Python scripts | +| Task B | **MiniMax** | Biomedical knowledge analysis, Chinese literature context, cross-checking | +| Task C | **Qwen** | Pathway landscape mapping, fast synthesis, regulatory network overview | -### Task A — PubMed Mechanistic Search +Use the `Task` tool to launch **all 3 tasks in the same message** (parallel). Do NOT wait for one before starting the next. + +--- + +### Task A — Claude: Real Literature Fetch (PubMed + Preprints) + +This task runs actual API calls using the helper scripts in this skill. Prompt for Task A: ``` -You are a PubMed literature specialist. Search PubMed for papers about: {RESEARCH_DIRECTION} +You are doing a real literature fetch for the bio-research-pipeline. +Research direction: {RESEARCH_DIRECTION} -Run the following Python script to fetch results: +Step 1 — Locate skill scripts: +SKILL_DIR=$(find ~/.claude/skills -name "SKILL.md" -path "*/bio-research-pipeline/*" | xargs dirname 2>/dev/null | head -1) -```python -from Bio import Entrez -import json, sys +Step 2 — Run PubMed fetch: +python3 "$SKILL_DIR/scripts/pubmed-fetch" "{RESEARCH_DIRECTION}" --max 40 --years 5 -Entrez.email = "bioclaw-agent@research.ai" +Step 3 — Run preprint fetch: +python3 "$SKILL_DIR/scripts/preprint-fetch" "{RESEARCH_DIRECTION}" --max 25 --days 180 -# Build search query — include MeSH terms if applicable -query = "{RESEARCH_DIRECTION}[Title/Abstract] AND (mechanism OR pathway OR signaling OR molecular)" -handle = Entrez.esearch(db="pubmed", term=query, retmax=40, sort="relevance", - datetype="pdat", mindate="2020", maxdate="2025") -record = Entrez.read(handle) -ids = record["IdList"] +Step 4 — Synthesize into structured summary: +- Key molecular mechanisms found +- Key proteins/genes/pathways mentioned +- Most significant recent findings (2023-2025) +- Any contradictions or open debates +- Top 5 most relevant papers with PMID/DOI -# Fetch abstracts -handle2 = Entrez.efetch(db="pubmed", id=",".join(ids[:30]), rettype="abstract", retmode="text") -abstracts = handle2.read() -print(abstracts[:15000]) +Output all results clearly labeled with section headers. ``` -Then summarize: -1. Key molecular mechanisms mentioned -2. Key proteins/genes involved -3. Key signaling pathways implicated -4. Most cited findings (appear in multiple papers) -5. Contradictions or debates in the literature +--- -Output as structured text with section headers. -``` +### Task B — MiniMax: Biomedical Knowledge Analysis -### Task B — Preprint Search (bioRxiv / medRxiv) +This task calls MiniMax via `mcp__bioclaw__call_minimax`. MiniMax contributes its own training knowledge — particularly strong on Chinese biomedical literature, clinical context, and TCM-related pathways. Prompt for Task B: ``` -You are a preprint literature specialist. Find the latest cutting-edge preprints about: {RESEARCH_DIRECTION} - -Step 1 — Search bioRxiv API: -```python -import requests, json - -topic = "{RESEARCH_DIRECTION}" -# bioRxiv API — last 180 days -url = f"https://api.biorxiv.org/details/biorxiv/2024-09-01/2025-03-15/0/json" -r = requests.get(url, timeout=30) -data = r.json() - -# Filter by keyword relevance -keywords = topic.lower().split() -relevant = [] -for paper in data.get("collection", []): - title = paper.get("title", "").lower() - abstract = paper.get("abstract", "").lower() - if any(kw in title or kw in abstract for kw in keywords): - relevant.append({ - "title": paper["title"], - "authors": paper.get("authors", ""), - "date": paper.get("date", ""), - "abstract": paper.get("abstract", "")[:500], - "doi": paper.get("doi", "") - }) - -print(json.dumps(relevant[:20], indent=2, ensure_ascii=False)) -``` +Call mcp__bioclaw__call_minimax with this prompt: + +system: "You are an expert biomedical research analyst with deep knowledge of molecular biology, disease mechanisms, and the latest research trends in both Western and Chinese scientific literature." -Step 2 — Use WebSearch to find additional preprints: -Search: "{RESEARCH_DIRECTION} site:biorxiv.org OR site:medrxiv.org 2024 2025" +prompt: "Research direction: {RESEARCH_DIRECTION} -Summarize: -1. Emerging findings not yet in peer-reviewed journals -2. Novel methodologies being applied -3. Preliminary data suggesting new directions -4. Discrepancies with established literature +Please provide a comprehensive analysis covering: -Output as structured text. +1. CURRENT STATE OF THE FIELD + - What is well-established about this topic? + - What are the 3-5 most important mechanistic insights from recent years? + - Which research groups/labs are leading this field? + +2. KEY MOLECULAR PLAYERS + - List the most important proteins, genes, and non-coding RNAs involved + - Describe their known roles and interactions + - Note any recently discovered players (2022-2025) + +3. DISEASE RELEVANCE + - Which diseases/conditions is this most relevant to? + - What is the current clinical/translational status? + - Any recent clinical trials or translational breakthroughs? + +4. KNOWLEDGE GAPS + - What are the most important unresolved questions? + - Where do different research groups disagree? + - What has been tried but failed, and why? + +5. EMERGING ANGLES + - What novel angles are researchers starting to explore? + - Any recent paradigm shifts in thinking about this topic? + +Provide specific, concrete information. Cite field knowledge accurately." + +After getting MiniMax's response, output it verbatim with the header: "=== MINIMAX ANALYSIS ===" ``` -### Task C — Reviews + Pathway Databases +--- + +### Task C — Qwen: Pathway Landscape + Regulatory Network + +This task calls Qwen via `mcp__bioclaw__call_qwen` AND runs the pathway-search script for real database data. Prompt for Task C: ``` -You are a pathway and review specialist. Map the known biology for: {RESEARCH_DIRECTION} - -Step 1 — Search for review articles: -```python -from Bio import Entrez -import json - -Entrez.email = "bioclaw-agent@research.ai" -query = "{RESEARCH_DIRECTION}[Title/Abstract] AND (Review[pt] OR systematic review OR meta-analysis)" -handle = Entrez.esearch(db="pubmed", term=query, retmax=20, sort="relevance") -record = Entrez.read(handle) -ids = record["IdList"] -handle2 = Entrez.efetch(db="pubmed", id=",".join(ids[:15]), rettype="abstract", retmode="text") -print(handle2.read()[:10000]) -``` +You are mapping the pathway landscape for the bio-research-pipeline. +Research direction: {RESEARCH_DIRECTION} -Step 2 — Query KEGG pathway API: -```python -import requests - -# Search KEGG for relevant pathways -topic_keywords = "{RESEARCH_DIRECTION}".split()[:3] -for kw in topic_keywords: - r = requests.get(f"https://rest.kegg.jp/find/pathway/{kw}", timeout=15) - if r.status_code == 200 and r.text.strip(): - print(f"KEGG pathways for '{kw}':") - print(r.text[:2000]) -``` +Step 1 — Run real pathway database search: +SKILL_DIR=$(find ~/.claude/skills -name "SKILL.md" -path "*/bio-research-pipeline/*" | xargs dirname 2>/dev/null | head -1) +python3 "$SKILL_DIR/scripts/pathway-search" "{RESEARCH_DIRECTION}" + +Step 2 — Call Qwen for pathway synthesis: +Use mcp__bioclaw__call_qwen with: + +system: "You are a systems biology expert specializing in signaling pathway analysis and gene regulatory networks." + +prompt: "For the research topic: {RESEARCH_DIRECTION} + +Please map the complete biological pathway landscape: + +1. CORE PATHWAYS INVOLVED + - List and briefly describe each relevant pathway + - Explain how they interconnect for this topic + +2. REGULATORY HIERARCHY + - Upstream triggers / sensors + - Master regulators (transcription factors, kinases) + - Key effectors and their downstream targets + - Feedback and feedforward loops -Step 3 — Use WebSearch to find Reactome pathway information: -Search: "{RESEARCH_DIRECTION} Reactome pathway 2024" +3. CROSSTALK POINTS + - Where do pathways intersect or antagonize each other? + - Which nodes are shared across multiple pathways? + - Potential compensatory mechanisms to be aware of -Synthesize: -1. Established pathway map (which pathways are involved) -2. Key regulatory nodes (master regulators, feedback loops) -3. Known therapeutic targets in these pathways -4. Gaps in current knowledge (explicitly stated in reviews) +4. CONTEXT-SPECIFIC REGULATION + - How does this regulation differ between cell types? + - Tissue-specific or disease-specific pathway alterations + - Known species differences (mouse vs human) -Output as structured text. +5. THERAPEUTIC INTERVENTION POINTS + - Which nodes are most druggable? + - Existing drugs/inhibitors targeting these pathways + - Potential combination therapy rationale + +Be specific about molecule names and interaction types (phosphorylation, ubiquitination, transcriptional activation, etc.)" + +Output Qwen's response with header: "=== QWEN PATHWAY ANALYSIS ===" +Then append the real database results from Step 1 with header: "=== DATABASE RESULTS ===" ``` -**After launching all 3 tasks**, collect results with `TaskOutput` for each task ID. Wait for all 3 to complete. +--- + +**After launching all 3 tasks**, collect results: +``` +taskA_result = TaskOutput(task_id_A) +taskB_result = TaskOutput(task_id_B) +taskC_result = TaskOutput(task_id_C) +``` +Wait for all 3 to complete before proceeding to Stage 2. + +**Send a progress update to the user** via `mcp__bioclaw__send_message`: +``` +"📚 文献检索完成(Claude + MiniMax + Qwen 三路并行) +🔬 Claude: PubMed {N} 篇 + 预印本 {M} 篇 +🤖 MiniMax: 生物医学知识分析完成 +⚡ Qwen: 通路图谱梳理完成 +正在综合分析,生成假说..." +``` --- From 3e9ab7549c1f0343769901c8f6ae69b899a0de5b Mon Sep 17 00:00:00 2001 From: Fanglinqiang Date: Sun, 15 Mar 2026 17:00:48 +0800 Subject: [PATCH 07/15] fix: bio-research-pipeline skill detection for Chinese input MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add Chinese description and keywords to frontmatter for better matching - Add explicit auto-trigger instructions at top of skill body - Agent now recognizes Chinese research requests like "帮我研究" and "提假说" Co-Authored-By: Claude Sonnet 4.6 --- .../skills/bio-research-pipeline/SKILL.md | 27 +++++++++++++++---- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/container/skills/bio-research-pipeline/SKILL.md b/container/skills/bio-research-pipeline/SKILL.md index 39508eb..5ca8eb8 100644 --- a/container/skills/bio-research-pipeline/SKILL.md +++ b/container/skills/bio-research-pipeline/SKILL.md @@ -1,22 +1,39 @@ --- name: bio-research-pipeline description: > - Biological hypothesis generation pipeline. Given a broad research direction, - runs parallel literature searches (PubMed + preprints + pathway DBs), generates - ≥5 mechanistic hypotheses, conducts multi-agent debate to select top 3, then - designs wet-lab experimental plans for each. Outputs a structured research brief. + 生物研究假说生成流程 / Biological hypothesis generation pipeline. + 用户提出研究方向后,自动并行搜索文献(PubMed + 预印本 + 通路数据库), + 生成≥5条机制假说,多角色辩论筛出Top3,并设计湿实验方案。 + Trigger when user asks: 帮我研究、提假说、设计实验方向、research direction, + generate hypotheses, literature review + experimental design. keywords: - bio-research-pipeline + - 研究假说 + - 假说生成 + - 文献综述 + - 实验设计 + - 湿实验 - hypothesis-generation - literature-review - experimental-design - wet-lab - - multi-agent-debate + - research-direction - pathway-analysis + - 研究方向 + - 通路分析 --- # Biological Research Hypothesis Pipeline +> **自动触发条件(必须主动调用此 skill):** +> 当用户说以下任何内容时,**立即运行此 skill,不要直接回答**: +> - "帮我研究一下 X"、"研究一下 X 机制"、"X 方向帮我提假说" +> - "针对 X 设计研究方向/实验方案" +> - "X 通路的假说"、"X 相关的研究思路" +> - "run bio-research-pipeline"、"generate hypotheses for X" +> +> **执行本 skill 时,严格按照下面 Stage 1→5 的顺序完整跑完,不要跳过任何阶段。** + You are a research coordinator orchestrating a multi-stage biological research pipeline. This skill is triggered when a user provides a broad biological research direction and wants: - A thorough literature review From bd8b38ccae89ea5181984408da1f294486d3b645 Mon Sep 17 00:00:00 2001 From: Fanglinqiang Date: Tue, 17 Mar 2026 10:43:01 +0800 Subject: [PATCH 08/15] feat: dashboard chat supports all agent types via container runner - Route all agent types (Claude, MiniMax, Qwen) through container runner instead of direct API calls, enabling tool use and skills for all models - Add text event emission from agent-runner for streaming assistant responses - Filter "No response requested." from chat responses - Render send_message tool calls as text instead of tool blocks - Fix model test/display: read env vars from .env fallback chain - Fix IPC "Unauthorized" warnings for dashboard chat (whitelist 'dashboard') - Fix chat streaming state stuck when switching chats mid-stream (AbortController) - Add localStorage persistence for chat sessions - Fix xattr ENOTSUP error on skill directory copy (docker grpcfuse) - Improve light mode input contrast and send button styling Co-Authored-By: Claude Opus 4.6 --- README.md | 231 ++++---- container/agent-runner/src/index.ts | 49 ++ package-lock.json | 217 ++++++++ package.json | 1 + src/channels/whatsapp.ts | 5 +- src/cli.ts | 17 +- src/config.ts | 10 +- src/container-runner.ts | 106 +++- src/dashboard.html | 824 +++++++++++++++++++++++++++- src/dashboard.ts | 207 ++++++- src/db.ts | 110 ++++ src/index.ts | 26 +- src/ipc.ts | 82 +-- 13 files changed, 1672 insertions(+), 213 deletions(-) diff --git a/README.md b/README.md index 6676b6d..eee9191 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ # BioClaw -### AI-Powered Bioinformatics Research Assistant on WhatsApp +### AI-Powered Bioinformatics Research Assistant [English](README.md) | [简体中文](README.zh-CN.md) @@ -13,7 +13,7 @@ [![Paper](https://img.shields.io/badge/bioRxiv-STELLA-b31b1b.svg)](https://www.biorxiv.org/content/10.1101/2025.07.01.662467v2) [![arXiv](https://img.shields.io/badge/arXiv-2507.02004-b31b1b.svg)](https://arxiv.org/abs/2507.02004) -**BioClaw** brings the power of computational biology directly into WhatsApp group chats. Researchers can run BLAST searches, render protein structures, generate publication-quality plots, perform sequencing QC, and search the literature — all through natural language messages. +**BioClaw** brings the power of computational biology to messaging platforms and the web. Researchers can run BLAST searches, render protein structures, generate publication-quality plots, perform sequencing QC, and search the literature — all through natural language. Built on the [NanoClaw](https://github.com/qwibitai/nanoclaw) architecture with bioinformatics tools and skills from the [STELLA](https://github.com/zaixizhang/STELLA) project, powered by the [Claude Agent SDK](https://docs.anthropic.com/en/docs/agents-sdk). @@ -33,6 +33,8 @@ Welcome to join our WeChat group to discuss and exchange ideas! Scan the QR code - [Overview](#overview) - [Quick Start](#quick-start) +- [Web Dashboard & Chat](#web-dashboard--chat) +- [Multi-Model Support](#multi-model-support) - [Demo Examples](#demo-examples) - [System Architecture](#system-architecture) - [Included Tools](#included-tools) @@ -44,7 +46,7 @@ Welcome to join our WeChat group to discuss and exchange ideas! Scan the QR code The rapid growth of biomedical data, tools, and literature has created a fragmented research landscape that outpaces human expertise. Researchers frequently need to switch between command-line bioinformatics tools, visualization software, databases, and literature search engines — often across different machines and environments. -**BioClaw** addresses this by providing a conversational interface to a comprehensive bioinformatics toolkit. By messaging `@Bioclaw` in a WhatsApp group, researchers can: +**BioClaw** addresses this by providing a conversational interface to a comprehensive bioinformatics toolkit. Researchers can interact via WhatsApp, Telegram, WeCom, or the built-in web chat to: - **Sequence Analysis** — Run BLAST searches against NCBI databases, align reads with BWA/minimap2, and call variants - **Quality Control** — Generate FastQC reports on sequencing data with automated interpretation @@ -59,10 +61,9 @@ Results — including images, plots, and structured reports — are delivered di ### Prerequisites -- macOS or Linux +- macOS with [Apple Container](https://developer.apple.com/documentation/apple-containers) or Linux with Docker - Node.js 20+ -- Docker Desktop -- Anthropic API key +- Anthropic API key (or Claude Code OAuth token) ### Installation @@ -76,29 +77,93 @@ npm install # Configure environment cp .env.example .env -# Edit .env with your Anthropic API key and WhatsApp credentials +# Edit .env with your API keys + +# Build the agent container image +./container/build.sh # Start BioClaw npm start ``` -### Usage +### Second Quick Start -In any WhatsApp group where BioClaw is connected, simply message: +Just send this message to [OpenClaw](https://github.com/qwibitai/nanoclaw): +```text +install https://github.com/Runchuan-BU/BioClaw +``` + +### Usage + +**Via messaging platforms** — In any connected WhatsApp/Telegram/WeCom group: ``` @Bioclaw ``` -## Second Quick Start +**Via web chat** — Open `http://localhost:3847` in your browser and start chatting directly. + +See the [ExampleTask](ExampleTask/ExampleTask.md) document for 6 ready-to-use demo prompts with expected outputs. -Just send the message to OpenClaw: +## Web Dashboard & Chat -```text -install https://github.com/Runchuan-BU/BioClaw -``` +BioClaw includes a built-in web interface accessible at `http://localhost:3847`. -See the [ExampleTask](ExampleTask/ExampleTask.md) document for 6 ready-to-use demo prompts with expected outputs. +### Chat Interface + +- **Open WebUI-style layout** — Centered conversation area with left sidebar for chat history +- **Group selector** — Choose any registered group to route messages through its configured agent and container +- **File & image upload** — Attach biology data files (FASTA, VCF, BAM, CSV, PDF, etc.) and images directly in chat +- **Chat persistence** — Conversations are saved to localStorage and survive page refreshes +- **Streaming responses** — Real-time token-by-token response display with tool use visualization + +### Dashboard Tabs + +| Tab | Description | +|-----|-------------| +| **Groups** | All registered messaging groups with agent type, message count, and last activity | +| **Tasks** | Scheduled tasks — view, pause, resume, and inspect run history | +| **Stats** | Activity charts (messages/day, task runs/day, success rate) with 7d/14d/30d period selector | +| **Models** | Configured AI models with connection status, specs, quick test, and daily token usage tracking | +| **Skills** | Installed agent skills (90+ bio tools and container skills) with search and category filters | +| **Containers** | Running container instances with image info | +| **Alerts** | Alert rules for group silence thresholds | +| **Logs** | Live log viewer with auto-scroll and error filtering | + +### UI Controls + +- **Dark / Light theme** — Toggle in the header, persisted in `localStorage` +- **Language** — Switch between Chinese (中文) and English (EN) +- **Auto-refresh** — Select refresh interval (off / 10s / 30s / 1min / 5min) + +## Multi-Model Support + +BioClaw supports multiple AI backends. Each group can be configured with a different agent type: + +| Agent | Backend | Features | +|-------|---------|----------| +| **Claude** | Anthropic (container) | Full tool use, file I/O, biology skills, persistent sessions | +| **MiniMax** | MiniMax API (OpenAI-compatible) | Chat with reasoning, image understanding, PDF text extraction | +| **Qwen** | Local/remote Qwen (OpenAI-compatible) | Chat with local model, image understanding, PDF text extraction | + +Configure via environment variables in `.env`: + +```bash +# Claude (required) +ANTHROPIC_API_KEY=sk-ant-... +# or +CLAUDE_CODE_OAUTH_TOKEN=... + +# MiniMax (optional) +MINIMAX_API_KEY=sk-... +MINIMAX_BASE_URL=https://api.minimax.chat/v1 +MINIMAX_MODEL=MiniMax-M2.5 + +# Qwen (optional) +QWEN_API_BASE=http://your-qwen-server:8864/v1 +QWEN_AUTH_TOKEN=your-token +QWEN_MODEL=your-model-name +``` ## Demo Examples @@ -177,36 +242,30 @@ Below are live demonstrations of BioClaw handling real bioinformatics tasks via BioClaw is built on the [NanoClaw](https://github.com/qwibitai/nanoclaw) container-based agent architecture, extended with biomedical tools and domain knowledge from the [STELLA](https://github.com/zaixizhang/STELLA) framework. ``` -WhatsApp ──► Node.js Orchestrator ──► SQLite (state) ──► Docker Container - │ - Claude Agent SDK - │ - ┌──────────┴──────────┐ - │ Bioinformatics │ - │ Toolbox │ - ├─────────────────────┤ - │ BLAST+ │ SAMtools │ - │ BWA │ BEDTools │ - │ FastQC │ PyMOL │ - │ minimap2│ seqtk │ - ├─────────────────────┤ - │ Python Libraries │ - ├─────────────────────┤ - │ BioPython │ pandas │ - │ RDKit │ scanpy │ - │ PyDESeq2 │ pysam │ - │ matplotlib│ seaborn │ - └─────────────────────┘ +Channels (WhatsApp/Telegram/WeCom/Web) + │ + ▼ + Node.js Orchestrator ──► SQLite (state) + │ + ├──► Claude Container Agent + │ │ + │ Claude Agent SDK + Bio Tools + │ (BLAST, SAMtools, BWA, PyMOL, ...) + │ + ├──► MiniMax API (OpenAI-compatible) + │ + └──► Qwen API (OpenAI-compatible) ``` **Key design principles (inherited from NanoClaw):** | Component | Description | |-----------|-------------| -| **Container Isolation** | Each conversation group runs in its own Docker container with pre-installed bioinformatics tools | +| **Container Isolation** | Each conversation group runs in its own container with pre-installed bioinformatics tools | | **Filesystem IPC** | Text and image results are communicated between the agent and orchestrator via the filesystem | | **Per-Group State** | SQLite database tracks messages, sessions, and group-specific workspaces | | **Channel Agnostic** | Channels self-register at startup; the orchestrator connects whichever ones have credentials | +| **Multi-Model** | Each group can be configured with a different AI backend (Claude, MiniMax, Qwen) | **Biomedical capabilities (attributed to STELLA):** @@ -238,85 +297,41 @@ The bioinformatics tool suite and domain-specific skills — including sequence | **scanpy** | Single-cell RNA-seq analysis | | **pysam** | SAM/BAM file access from Python | -## Quick Start - -### Prerequisites - -- macOS or Linux -- Node.js 20+ -- Docker Desktop -- Anthropic API key - -### Installation - -```bash -# Clone the repository -git clone https://github.com/Runchuan-BU/BioClaw.git -cd BioClaw - -# Install dependencies -npm install - -# Configure environment -cp .env.example .env -# Edit .env with your Anthropic API key - -# Build the agent Docker image -docker build -t bioclaw-agent:latest container/ - -# Start BioClaw (scan the QR code with WhatsApp on first run) -npm start -``` - -### Usage - -In any WhatsApp group where BioClaw is connected, simply message: - -``` -@Bioclaw -``` - -See the [ExampleTask](ExampleTask/ExampleTask.md) document for 6 ready-to-use demo prompts with expected outputs. - -## Web Dashboard - -BioClaw includes a built-in web dashboard accessible at `http://localhost:3847` (or the port set by `DASHBOARD_PORT`). - -### Features - -| Tab | Description | -|-----|-------------| -| **Overview** | Live stats: message count, task runs, connected groups, registered models and skills | -| **Groups** | All WhatsApp/Telegram groups with message count and last-activity time | -| **Tasks** | Scheduled task list — view, pause, resume, and cancel tasks | -| **Stats** | Activity charts (messages per day, task runs per day, success rate, avg/max duration) with 7d/14d/30d period selector | -| **Alerts** | Alert rules based on group silence thresholds — see which rules are currently firing | -| **Settings** | Environment configuration viewer | -| **Models** | Configured AI models (Claude, MiniMax, Qwen) with auth status | -| **Skills** | Installed agent skills | - -### UI controls - -- **Auto-refresh** — select refresh interval (off / 10s / 30s / 1min / 5min) -- **Dark / Light theme** — toggle in the header, persisted in `localStorage` -- **Language** — switch between Chinese (中文) and English (EN), persisted in `localStorage` - ## Project Structure ``` BioClaw/ -├── bioclaw_logo.jpg # Project logo -├── ExampleTask/ -│ ├── ExampleTask.md # 6 demo prompts with descriptions -│ ├── 1.jpg # Workspace triage demo -│ ├── 2.jpg # PubMed search demo -│ ├── 3.jpg # Protein structure demo -│ ├── 4.jpg # BLAST search demo -│ ├── 5.jpg # FastQC QC demo -│ └── 6.jpg # Volcano plot demo +├── src/ +│ ├── index.ts # Orchestrator: state, message loop, agent invocation +│ ├── dashboard.ts # Web dashboard & chat API server +│ ├── dashboard.html # Single-page dashboard & chat UI +│ ├── channels/ +│ │ └── whatsapp.ts # WhatsApp connection, auth, send/receive +│ ├── container-runner.ts # Spawns agent containers with mounts +│ ├── task-scheduler.ts # Runs scheduled tasks +│ ├── router.ts # Message formatting and outbound routing +│ ├── config.ts # Trigger pattern, paths, intervals +│ ├── db.ts # SQLite operations +│ └── ipc.ts # IPC watcher and task processing +├── container/ +│ ├── Dockerfile # Agent container with bio tools +│ ├── build.sh # Container build script +│ ├── agent-runner/ # In-container agent runner +│ └── skills/ # Bio tool skill definitions (90+) +├── groups/ # Per-group memory and workspace (isolated) +├── store/ # WhatsApp auth and session data +├── ExampleTask/ # Demo prompts and screenshots └── README.md ``` +## Development + +```bash +npm run dev # Run with hot reload +npm run build # Compile TypeScript +./container/build.sh # Rebuild agent container (with bio tools) +``` + ## Citation BioClaw builds upon the STELLA framework. If you use BioClaw in your research, please cite: diff --git a/container/agent-runner/src/index.ts b/container/agent-runner/src/index.ts index 94a89bc..c5ed623 100644 --- a/container/agent-runner/src/index.ts +++ b/container/agent-runner/src/index.ts @@ -106,6 +106,17 @@ async function readStdin(): Promise { const OUTPUT_START_MARKER = '---BIOCLAW_OUTPUT_START---'; const OUTPUT_END_MARKER = '---BIOCLAW_OUTPUT_END---'; +const EVENT_START_MARKER = '---BIOCLAW_EVENT_START---'; +const EVENT_END_MARKER = '---BIOCLAW_EVENT_END---'; + +interface ContainerEvent { + type: 'tool_call' | 'tool_result' | 'text'; + id?: string; + tool?: string; + input?: Record; + output?: string; + text?: string; +} function writeOutput(output: ContainerOutput): void { console.log(OUTPUT_START_MARKER); @@ -113,6 +124,12 @@ function writeOutput(output: ContainerOutput): void { console.log(OUTPUT_END_MARKER); } +function writeEvent(event: ContainerEvent): void { + console.log(EVENT_START_MARKER); + console.log(JSON.stringify(event)); + console.log(EVENT_END_MARKER); +} + function log(message: string): void { console.error(`[agent-runner] ${message}`); } @@ -477,6 +494,38 @@ async function runQuery( lastAssistantUuid = (message as { uuid: string }).uuid; } + // Emit events for display in dashboard chat + if (message.type === 'assistant') { + const content = (message as any).message?.content; + if (Array.isArray(content)) { + for (const block of content) { + if (block.type === 'tool_use') { + writeEvent({ type: 'tool_call', id: block.id, tool: block.name, input: block.input }); + } else if (block.type === 'text' && block.text && !block.text.includes('No response requested')) { + writeEvent({ type: 'text', text: block.text }); + } + } + } + } + + // Emit tool result events + if (message.type === 'user') { + const content = (message as any).message?.content; + if (Array.isArray(content)) { + for (const block of content) { + if (block.type === 'tool_result') { + const rawOutput = block.content; + const outputText = typeof rawOutput === 'string' + ? rawOutput + : Array.isArray(rawOutput) + ? rawOutput.map((c: any) => c.text || '').join('') + : JSON.stringify(rawOutput); + writeEvent({ type: 'tool_result', id: block.tool_use_id, output: (outputText || '').slice(0, 3000) }); + } + } + } + } + if (message.type === 'system' && message.subtype === 'init') { newSessionId = message.session_id; log(`Session initialized: ${newSessionId}`); diff --git a/package-lock.json b/package-lock.json index 0dfbad3..4d2ab42 100644 --- a/package-lock.json +++ b/package-lock.json @@ -12,6 +12,7 @@ "better-sqlite3": "^11.8.1", "cron-parser": "^5.5.0", "grammy": "^1.41.1", + "pdf-parse": "^2.4.5", "pino": "^9.6.0", "pino-pretty": "^13.0.0", "qrcode-terminal": "^0.12.0", @@ -1151,6 +1152,190 @@ "integrity": "sha512-dXn3FZhPv0US+7dtJsIi2R+c7qWYiReoEh5zUntWCf4oSpMNib8FDhSoed6m3QyZdx5hK7iLFkYk3rNxwt8vTA==", "license": "MIT" }, + "node_modules/@napi-rs/canvas": { + "version": "0.1.80", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas/-/canvas-0.1.80.tgz", + "integrity": "sha512-DxuT1ClnIPts1kQx8FBmkk4BQDTfI5kIzywAaMjQSXfNnra5UFU9PwurXrl+Je3bJ6BGsp/zmshVVFbCmyI+ww==", + "license": "MIT", + "workspaces": [ + "e2e/*" + ], + "engines": { + "node": ">= 10" + }, + "optionalDependencies": { + "@napi-rs/canvas-android-arm64": "0.1.80", + "@napi-rs/canvas-darwin-arm64": "0.1.80", + "@napi-rs/canvas-darwin-x64": "0.1.80", + "@napi-rs/canvas-linux-arm-gnueabihf": "0.1.80", + "@napi-rs/canvas-linux-arm64-gnu": "0.1.80", + "@napi-rs/canvas-linux-arm64-musl": "0.1.80", + "@napi-rs/canvas-linux-riscv64-gnu": "0.1.80", + "@napi-rs/canvas-linux-x64-gnu": "0.1.80", + "@napi-rs/canvas-linux-x64-musl": "0.1.80", + "@napi-rs/canvas-win32-x64-msvc": "0.1.80" + } + }, + "node_modules/@napi-rs/canvas-android-arm64": { + "version": "0.1.80", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-android-arm64/-/canvas-android-arm64-0.1.80.tgz", + "integrity": "sha512-sk7xhN/MoXeuExlggf91pNziBxLPVUqF2CAVnB57KLG/pz7+U5TKG8eXdc3pm0d7Od0WreB6ZKLj37sX9muGOQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-darwin-arm64": { + "version": "0.1.80", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-darwin-arm64/-/canvas-darwin-arm64-0.1.80.tgz", + "integrity": "sha512-O64APRTXRUiAz0P8gErkfEr3lipLJgM6pjATwavZ22ebhjYl/SUbpgM0xcWPQBNMP1n29afAC/Us5PX1vg+JNQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-darwin-x64": { + "version": "0.1.80", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-darwin-x64/-/canvas-darwin-x64-0.1.80.tgz", + "integrity": "sha512-FqqSU7qFce0Cp3pwnTjVkKjjOtxMqRe6lmINxpIZYaZNnVI0H5FtsaraZJ36SiTHNjZlUB69/HhxNDT1Aaa9vA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-linux-arm-gnueabihf": { + "version": "0.1.80", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-linux-arm-gnueabihf/-/canvas-linux-arm-gnueabihf-0.1.80.tgz", + "integrity": "sha512-eyWz0ddBDQc7/JbAtY4OtZ5SpK8tR4JsCYEZjCE3dI8pqoWUC8oMwYSBGCYfsx2w47cQgQCgMVRVTFiiO38hHQ==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-linux-arm64-gnu": { + "version": "0.1.80", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-linux-arm64-gnu/-/canvas-linux-arm64-gnu-0.1.80.tgz", + "integrity": "sha512-qwA63t8A86bnxhuA/GwOkK3jvb+XTQaTiVML0vAWoHyoZYTjNs7BzoOONDgTnNtr8/yHrq64XXzUoLqDzU+Uuw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-linux-arm64-musl": { + "version": "0.1.80", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-linux-arm64-musl/-/canvas-linux-arm64-musl-0.1.80.tgz", + "integrity": "sha512-1XbCOz/ymhj24lFaIXtWnwv/6eFHXDrjP0jYkc6iHQ9q8oXKzUX1Lc6bu+wuGiLhGh2GS/2JlfORC5ZcXimRcg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-linux-riscv64-gnu": { + "version": "0.1.80", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-linux-riscv64-gnu/-/canvas-linux-riscv64-gnu-0.1.80.tgz", + "integrity": "sha512-XTzR125w5ZMs0lJcxRlS1K3P5RaZ9RmUsPtd1uGt+EfDyYMu4c6SEROYsxyatbbu/2+lPe7MPHOO/0a0x7L/gw==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-linux-x64-gnu": { + "version": "0.1.80", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-linux-x64-gnu/-/canvas-linux-x64-gnu-0.1.80.tgz", + "integrity": "sha512-BeXAmhKg1kX3UCrJsYbdQd3hIMDH/K6HnP/pG2LuITaXhXBiNdh//TVVVVCBbJzVQaV5gK/4ZOCMrQW9mvuTqA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-linux-x64-musl": { + "version": "0.1.80", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-linux-x64-musl/-/canvas-linux-x64-musl-0.1.80.tgz", + "integrity": "sha512-x0XvZWdHbkgdgucJsRxprX/4o4sEed7qo9rCQA9ugiS9qE2QvP0RIiEugtZhfLH3cyI+jIRFJHV4Fuz+1BHHMg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/canvas-win32-x64-msvc": { + "version": "0.1.80", + "resolved": "https://registry.npmjs.org/@napi-rs/canvas-win32-x64-msvc/-/canvas-win32-x64-msvc-0.1.80.tgz", + "integrity": "sha512-Z8jPsM6df5V8B1HrCHB05+bDiCxjE9QA//3YrkKIdVDEwn5RKaqOxCJDRJkl48cJbylcrJbW4HxZbTte8juuPg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, "node_modules/@pinojs/redact": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/@pinojs/redact/-/redact-0.4.0.tgz", @@ -2739,6 +2924,38 @@ "dev": true, "license": "MIT" }, + "node_modules/pdf-parse": { + "version": "2.4.5", + "resolved": "https://registry.npmjs.org/pdf-parse/-/pdf-parse-2.4.5.tgz", + "integrity": "sha512-mHU89HGh7v+4u2ubfnevJ03lmPgQ5WU4CxAVmTSh/sxVTEDYd1er/dKS/A6vg77NX47KTEoihq8jZBLr8Cxuwg==", + "license": "Apache-2.0", + "dependencies": { + "@napi-rs/canvas": "0.1.80", + "pdfjs-dist": "5.4.296" + }, + "bin": { + "pdf-parse": "bin/cli.mjs" + }, + "engines": { + "node": ">=20.16.0 <21 || >=22.3.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/mehmet-kozan" + } + }, + "node_modules/pdfjs-dist": { + "version": "5.4.296", + "resolved": "https://registry.npmjs.org/pdfjs-dist/-/pdfjs-dist-5.4.296.tgz", + "integrity": "sha512-DlOzet0HO7OEnmUmB6wWGJrrdvbyJKftI1bhMitK7O2N8W2gc757yyYBbINy9IDafXAV9wmKr9t7xsTaNKRG5Q==", + "license": "Apache-2.0", + "engines": { + "node": ">=20.16.0 || >=22.3.0" + }, + "optionalDependencies": { + "@napi-rs/canvas": "^0.1.80" + } + }, "node_modules/picocolors": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", diff --git a/package.json b/package.json index e4d390c..13a3a7b 100644 --- a/package.json +++ b/package.json @@ -21,6 +21,7 @@ "better-sqlite3": "^11.8.1", "cron-parser": "^5.5.0", "grammy": "^1.41.1", + "pdf-parse": "^2.4.5", "pino": "^9.6.0", "pino-pretty": "^13.0.0", "qrcode-terminal": "^0.12.0", diff --git a/src/channels/whatsapp.ts b/src/channels/whatsapp.ts index 1fe73b1..702286c 100644 --- a/src/channels/whatsapp.ts +++ b/src/channels/whatsapp.ts @@ -38,6 +38,7 @@ export class WhatsAppChannel implements Channel { private outgoingQueue: Array<{ jid: string; text: string }> = []; private flushing = false; private groupSyncTimerStarted = false; + private connectReject?: (err: Error) => void; private opts: WhatsAppChannelOpts; @@ -47,6 +48,7 @@ export class WhatsAppChannel implements Channel { async connect(): Promise { return new Promise((resolve, reject) => { + this.connectReject = reject; this.connectInternal(resolve).catch(reject); }); } @@ -87,7 +89,8 @@ export class WhatsAppChannel implements Channel { exec( `osascript -e 'display notification "${msg}" with title "BioClaw" sound name "Basso"'`, ); - setTimeout(() => process.exit(1), 1000); + this.connectReject?.(new Error(msg)); + this.connectReject = undefined; } if (connection === 'close') { diff --git a/src/cli.ts b/src/cli.ts index b12002d..04096f7 100644 --- a/src/cli.ts +++ b/src/cli.ts @@ -40,18 +40,23 @@ function ensureDirs() { }, null, 2) + '\n'); } - // Copy bio-tools skill + // Copy skills (recursive, handles nested dirs like scripts/) const skillsSrc = path.join(process.cwd(), 'container', 'skills'); const skillsDst = path.join(DATA_DIR, 'sessions', GROUP_FOLDER, '.claude', 'skills'); + function copyDirRecursive(src: string, dst: string) { + fs.mkdirSync(dst, { recursive: true }); + for (const entry of fs.readdirSync(src, { withFileTypes: true })) { + const s = path.join(src, entry.name); + const d = path.join(dst, entry.name); + if (entry.isDirectory()) copyDirRecursive(s, d); + else fs.copyFileSync(s, d); + } + } if (fs.existsSync(skillsSrc)) { for (const skillDir of fs.readdirSync(skillsSrc)) { const srcDir = path.join(skillsSrc, skillDir); if (!fs.statSync(srcDir).isDirectory()) continue; - const dstDir = path.join(skillsDst, skillDir); - fs.mkdirSync(dstDir, { recursive: true }); - for (const file of fs.readdirSync(srcDir)) { - fs.copyFileSync(path.join(srcDir, file), path.join(dstDir, file)); - } + copyDirRecursive(srcDir, path.join(skillsDst, skillDir)); } } } diff --git a/src/config.ts b/src/config.ts index 22762eb..61a336d 100644 --- a/src/config.ts +++ b/src/config.ts @@ -62,10 +62,10 @@ export const TIMEZONE = // MiniMax (optional) export const MINIMAX_API_KEY = process.env.MINIMAX_API_KEY || ''; -export const MINIMAX_BASE_URL = process.env.MINIMAX_BASE_URL || 'https://api.minimaxi.chat/v1'; -export const MINIMAX_MODEL = process.env.MINIMAX_MODEL || 'MiniMax-Text-01'; +export const MINIMAX_BASE_URL = process.env.MINIMAX_BASE_URL || 'https://api.minimax.chat/v1'; +export const MINIMAX_MODEL = process.env.MINIMAX_MODEL || 'MiniMax-M2.5'; -// Qwen (optional) -export const QWEN_API_BASE = process.env.QWEN_API_BASE || 'https://dashscope.aliyuncs.com/compatible-mode/v1'; +// Qwen (optional — set via env or .env file) +export const QWEN_API_BASE = process.env.QWEN_API_BASE || ''; export const QWEN_AUTH_TOKEN = process.env.QWEN_AUTH_TOKEN || ''; -export const QWEN_MODEL = process.env.QWEN_MODEL || 'qwen-plus'; +export const QWEN_MODEL = process.env.QWEN_MODEL || ''; diff --git a/src/container-runner.ts b/src/container-runner.ts index 5103a9b..f5991b3 100644 --- a/src/container-runner.ts +++ b/src/container-runner.ts @@ -23,6 +23,8 @@ import { RegisteredGroup } from './types.js'; // Sentinel markers for robust output parsing (must match agent-runner) const OUTPUT_START_MARKER = '---BIOCLAW_OUTPUT_START---'; const OUTPUT_END_MARKER = '---BIOCLAW_OUTPUT_END---'; +const EVENT_START_MARKER = '---BIOCLAW_EVENT_START---'; +const EVENT_END_MARKER = '---BIOCLAW_EVENT_END---'; function getHomeDir(): string { const home = process.env.HOME || os.homedir(); @@ -51,6 +53,15 @@ export interface ContainerOutput { error?: string; } +export interface ContainerEvent { + type: 'tool_call' | 'tool_result' | 'text'; + id?: string; + tool?: string; + input?: Record; + output?: string; + text?: string; +} + interface VolumeMount { hostPath: string; containerPath: string; @@ -126,19 +137,32 @@ function buildVolumeMounts( } // Sync skills from container/skills/ into each group's .claude/skills/ + // Use manual recursive copy to avoid ENOTSUP from xattr (docker grpcfuse ownership) + function copySkillDir(src: string, dst: string) { + fs.mkdirSync(dst, { recursive: true }); + for (const entry of fs.readdirSync(src, { withFileTypes: true })) { + const s = path.join(src, entry.name); + const d = path.join(dst, entry.name); + if (entry.isDirectory()) { + copySkillDir(s, d); + } else { + try { + fs.copyFileSync(s, d, fs.constants.COPYFILE_FICLONE_FORCE); + } catch { + // fallback: read+write to skip xattr issues + fs.writeFileSync(d, fs.readFileSync(s)); + try { fs.chmodSync(d, fs.statSync(s).mode); } catch { /* ignore */ } + } + } + } + } const skillsSrc = path.join(process.cwd(), 'container', 'skills'); const skillsDst = path.join(groupSessionsDir, 'skills'); if (fs.existsSync(skillsSrc)) { for (const skillDir of fs.readdirSync(skillsSrc)) { const srcDir = path.join(skillsSrc, skillDir); if (!fs.statSync(srcDir).isDirectory()) continue; - const dstDir = path.join(skillsDst, skillDir); - fs.mkdirSync(dstDir, { recursive: true }); - for (const file of fs.readdirSync(srcDir)) { - const srcFile = path.join(srcDir, file); - const dstFile = path.join(dstDir, file); - fs.copyFileSync(srcFile, dstFile); - } + copySkillDir(srcDir, path.join(skillsDst, skillDir)); } } mounts.push({ @@ -190,7 +214,11 @@ function readSecrets(): Record { const envFile = path.join(path.resolve(fileURLToPath(import.meta.url), '../..'), '.env'); if (!fs.existsSync(envFile)) return {}; - const allowedVars = ['CLAUDE_CODE_OAUTH_TOKEN', 'ANTHROPIC_API_KEY']; + const allowedVars = [ + 'CLAUDE_CODE_OAUTH_TOKEN', 'ANTHROPIC_API_KEY', + 'MINIMAX_API_KEY', 'MINIMAX_BASE_URL', 'MINIMAX_MODEL', + 'QWEN_API_BASE', 'QWEN_AUTH_TOKEN', 'QWEN_MODEL', + ]; const secrets: Record = {}; const content = fs.readFileSync(envFile, 'utf-8'); @@ -236,6 +264,7 @@ export async function runContainerAgent( input: ContainerInput, onProcess: (proc: ChildProcess, containerName: string) => void, onOutput?: (output: ContainerOutput) => Promise, + onEvent?: (event: ContainerEvent) => void, ): Promise { const startTime = Date.now(); @@ -315,34 +344,57 @@ export async function runContainerAgent( } } - // Stream-parse for output markers - if (onOutput) { + // Stream-parse for output and event markers + if (onOutput || onEvent) { parseBuffer += chunk; - let startIdx: number; - while ((startIdx = parseBuffer.indexOf(OUTPUT_START_MARKER)) !== -1) { - const endIdx = parseBuffer.indexOf(OUTPUT_END_MARKER, startIdx); + let processed = true; + while (processed) { + processed = false; + const outStart = parseBuffer.indexOf(OUTPUT_START_MARKER); + const evtStart = parseBuffer.indexOf(EVENT_START_MARKER); + + // Determine which marker comes first + let nextStart: number; + let isOutput: boolean; + if (outStart !== -1 && (evtStart === -1 || outStart <= evtStart)) { + nextStart = outStart; + isOutput = true; + } else if (evtStart !== -1) { + nextStart = evtStart; + isOutput = false; + } else { + break; + } + + const endMarker = isOutput ? OUTPUT_END_MARKER : EVENT_END_MARKER; + const startLen = isOutput ? OUTPUT_START_MARKER.length : EVENT_START_MARKER.length; + const endIdx = parseBuffer.indexOf(endMarker, nextStart); if (endIdx === -1) break; // Incomplete pair, wait for more data - const jsonStr = parseBuffer - .slice(startIdx + OUTPUT_START_MARKER.length, endIdx) - .trim(); - parseBuffer = parseBuffer.slice(endIdx + OUTPUT_END_MARKER.length); + const jsonStr = parseBuffer.slice(nextStart + startLen, endIdx).trim(); + parseBuffer = parseBuffer.slice(endIdx + endMarker.length); + processed = true; try { - const parsed: ContainerOutput = JSON.parse(jsonStr); - if (parsed.newSessionId) { - newSessionId = parsed.newSessionId; + if (isOutput && onOutput) { + const parsed: ContainerOutput = JSON.parse(jsonStr); + if (parsed.newSessionId) { + newSessionId = parsed.newSessionId; + } + hadStreamingOutput = true; + // Activity detected — reset the hard timeout + resetTimeout(); + // Call onOutput for all markers (including null results) + // so idle timers start even for "silent" query completions. + outputChain = outputChain.then(() => onOutput(parsed)); + } else if (!isOutput && onEvent) { + const event: ContainerEvent = JSON.parse(jsonStr); + onEvent(event); } - hadStreamingOutput = true; - // Activity detected — reset the hard timeout - resetTimeout(); - // Call onOutput for all markers (including null results) - // so idle timers start even for "silent" query completions. - outputChain = outputChain.then(() => onOutput(parsed)); } catch (err) { logger.warn( { group: group.name, error: err }, - 'Failed to parse streamed output chunk', + 'Failed to parse streamed marker chunk', ); } } diff --git a/src/dashboard.html b/src/dashboard.html index 908a712..bf466d5 100644 --- a/src/dashboard.html +++ b/src/dashboard.html @@ -82,6 +82,19 @@ .test-area input{width:100%;background:var(--surface2);border:1px solid var(--border);border-radius:6px;padding:7px 10px;color:var(--text);font-size:12px;margin-bottom:8px} .test-area input:focus{outline:none;border-color:var(--accent2)} .test-result{background:var(--surface2);border-radius:6px;padding:10px;font-size:12px;color:var(--muted);min-height:40px;white-space:pre-wrap;word-break:break-all} +.token-range-btn{background:var(--surface2);border:1px solid var(--border);border-radius:14px;padding:3px 12px;font-size:12px;color:var(--muted);cursor:pointer} +.token-range-btn.active{background:var(--accent2);color:#fff;border-color:var(--accent2)} +.token-summary{display:grid;grid-template-columns:repeat(auto-fill,minmax(200px,1fr));gap:12px;margin-bottom:16px} +.token-summary-card{background:var(--surface);border:1px solid var(--border);border-radius:10px;padding:16px} +.token-summary-card h4{font-size:13px;font-weight:600;margin-bottom:8px;color:var(--text)} +.token-summary-card .token-num{font-size:22px;font-weight:700;color:var(--accent2)} +.token-summary-card .token-detail{font-size:11px;color:var(--muted);margin-top:4px} +.token-chart{position:relative;background:var(--surface);border:1px solid var(--border);border-radius:10px;padding:16px;overflow-x:auto} +.token-chart-title{font-size:13px;font-weight:600;color:var(--text);margin-bottom:12px} +.token-bar-group{display:flex;flex-direction:column;align-items:center;gap:2px;min-width:40px} +.token-bar-wrap{display:flex;gap:2px;align-items:flex-end;height:120px} +.token-bar{width:14px;border-radius:3px 3px 0 0;min-height:2px;transition:height .3s} +.token-bar-label{font-size:10px;color:var(--muted);white-space:nowrap} /* Skills */ .skills-search{display:flex;gap:10px;margin-bottom:16px} .skills-search input{flex:1;background:var(--surface);border:1px solid var(--border);border-radius:6px;padding:8px 12px;color:var(--text);font-size:13px} @@ -126,12 +139,124 @@ .form-row input:focus,.form-row select:focus{outline:none;border-color:var(--accent)} /* Groups msg stat */ .group-msg-stat{font-size:11px;color:var(--accent);margin-top:2px} +/* Chat tab */ +.chat-layout{display:flex;flex-direction:column;height:calc(100vh - 140px)} +.chat-toolbar{display:flex;gap:10px;align-items:center;margin-bottom:12px;flex-wrap:wrap} +.chat-toolbar label{font-size:12px;color:var(--muted)} +.chat-messages{flex:1;overflow-y:auto;background:var(--surface2);border:1px solid var(--border);border-radius:8px;padding:16px;display:flex;flex-direction:column;gap:14px;min-height:0} +.chat-input-row{display:flex;gap:8px;margin-top:12px;align-items:flex-end} +.chat-input-row textarea{flex:1;background:var(--surface);border:1px solid var(--border);border-radius:8px;padding:10px 14px;color:var(--text);font-size:13px;resize:none;line-height:1.5;max-height:120px;overflow-y:auto;font-family:inherit} +.chat-input-row textarea:focus{outline:none;border-color:var(--accent2)} +.chat-send-btn{background:var(--accent2);border:none;color:#fff;padding:10px 20px;border-radius:8px;cursor:pointer;font-size:13px;font-weight:600;height:42px;flex-shrink:0} +.chat-send-btn:hover{background:var(--accent)} +.chat-send-btn:disabled{opacity:.5;cursor:not-allowed} +.msg-row{display:flex;gap:10px;align-items:flex-start} +.msg-row.user{flex-direction:row-reverse} +.msg-avatar{width:28px;height:28px;border-radius:50%;display:flex;align-items:center;justify-content:center;font-size:12px;flex-shrink:0;margin-top:2px;font-weight:700} +.msg-avatar.user-av{background:#2b6cb0;color:#fff} +.msg-avatar.assistant-av{background:#553c9a;color:#d6bcfa} +.msg-bubble{max-width:82%;background:var(--surface);border:1px solid var(--border);border-radius:10px;padding:10px 14px;font-size:13px;line-height:1.65} +.msg-row.user .msg-bubble{background:#1a365d;border-color:#2a4a7f} +.msg-text{white-space:pre-wrap;word-break:break-word} +.msg-agent-label{font-size:10px;color:var(--muted);margin-bottom:4px;font-weight:600;text-transform:uppercase;letter-spacing:.05em} +.tool-block{margin-top:8px;border:1px solid var(--border);border-radius:6px;overflow:hidden} +.tool-header{display:flex;align-items:center;gap:8px;padding:6px 10px;background:var(--surface2);cursor:pointer;font-size:12px;user-select:none;transition:.1s} +.tool-header:hover{background:var(--border)} +.tool-chevron{font-size:10px;transition:.2s} +.tool-block.expanded .tool-chevron{transform:rotate(90deg)} +.tool-name{font-weight:600;color:var(--accent);font-family:monospace;font-size:11px} +.tool-status{font-size:10px;color:var(--muted);margin-left:auto} +.tool-status.running{color:var(--warn-fg)} +.tool-status.done{color:var(--success-fg)} +.tool-body{padding:10px;font-family:'JetBrains Mono','Fira Code',monospace;font-size:11px;color:var(--muted);white-space:pre-wrap;word-break:break-all;max-height:220px;overflow-y:auto;display:none} +.tool-block.expanded .tool-body{display:block} +.tool-input-section{color:var(--accent2)} +.tool-result-section{color:var(--success-fg);border-top:1px solid var(--border);margin-top:6px;padding-top:6px} +.thinking-dots{display:inline-flex;gap:4px;padding:4px 0;align-items:center;flex-wrap:nowrap;white-space:nowrap} +.thinking-dots span{width:6px;height:6px;border-radius:50%;background:var(--muted);animation:dot-pulse 1.4s ease-in-out infinite} +.thinking-dots span:nth-child(2){animation-delay:.2s} +.thinking-dots span:nth-child(3){animation-delay:.4s} +@keyframes dot-pulse{0%,80%,100%{opacity:.25;transform:scale(.85)}40%{opacity:1;transform:scale(1)}} +.chat-empty{color:var(--muted);font-size:15px;text-align:center;padding:80px 20px;display:flex;flex-direction:column;align-items:center;gap:12px;flex:1;justify-content:center} +.chat-empty .empty-icon{font-size:48px;opacity:.4} +.chat-empty .empty-title{font-size:20px;font-weight:600;color:var(--text);opacity:.7} +.chat-empty .empty-hint{font-size:13px;color:var(--muted);max-width:400px;line-height:1.6} +/* View modes: chat (default) vs dashboard */ +body.view-dashboard .dashboard-only{display:flex !important} +body.view-dashboard #dashboard-nav{display:flex !important} +body.view-dashboard #dashboard-tabs{display:block !important} +body.view-dashboard #main-chat-view{display:none !important} +body.view-dashboard .chat-only{display:none !important} +body:not(.view-dashboard) .dashboard-only{display:none !important} +body:not(.view-dashboard) #dashboard-nav{display:none !important} +body:not(.view-dashboard) #dashboard-tabs{display:none !important} +body:not(.view-dashboard) #main-chat-view{display:flex !important;flex-direction:row} +body:not(.view-dashboard) .chat-only{display:flex !important} +#main-chat-view{height:calc(100vh - 56px);padding:0} +#chat-main-area .chat-scroll-area{flex:1;overflow-y:auto;width:100%;display:flex;flex-direction:column;align-items:center;min-height:0} +#chat-main-area .chat-messages{width:100%;max-width:820px;padding:20px 24px;display:flex;flex-direction:column;gap:16px;flex:1} +#chat-main-area .chat-bottom{width:100%;display:flex;flex-direction:column;align-items:center;padding:0 24px 20px;flex-shrink:0} +#chat-main-area .chat-input-wrap{width:100%;max-width:820px;background:var(--surface);border:1.5px solid var(--border);border-radius:16px;transition:border-color .15s,box-shadow .15s} +body.light #chat-main-area .chat-input-wrap{border-color:#e2e8f0;background:#fff;box-shadow:0 2px 8px rgba(0,0,0,.08)} +body.light #chat-main-area .chat-input-row textarea::placeholder{color:#94a3b8} +body.light #chat-main-area .chat-input-row textarea{color:#1e293b} +#chat-main-area .chat-input-wrap:focus-within{border-color:var(--accent2);box-shadow:0 0 0 2px rgba(99,179,237,.12)} +#chat-main-area .chat-attachments{display:flex;gap:8px;padding:10px 14px 0;flex-wrap:wrap} +#chat-main-area .chat-attachments:empty{display:none} +.attach-preview{position:relative;display:inline-flex;align-items:center;gap:6px;background:var(--surface2);border:1px solid var(--border);border-radius:8px;padding:6px 10px;font-size:12px;color:var(--text);max-width:200px} +.attach-preview img{width:40px;height:40px;border-radius:4px;object-fit:cover} +.attach-preview .attach-name{overflow:hidden;text-overflow:ellipsis;white-space:nowrap} +.attach-remove{position:absolute;top:-6px;right:-6px;width:18px;height:18px;border-radius:50%;background:var(--danger-bg);color:#fc8181;border:none;cursor:pointer;font-size:11px;display:flex;align-items:center;justify-content:center;line-height:1} +#chat-main-area .chat-input-row{display:flex;gap:0;align-items:flex-end;padding:8px 8px 8px 14px} +#chat-main-area .chat-input-row textarea{flex:1;background:transparent;border:none;padding:6px 4px;color:var(--text);font-size:14px;resize:none;line-height:1.5;max-height:160px;overflow-y:auto;font-family:inherit} +#chat-main-area .chat-input-row textarea:focus{outline:none} +#chat-main-area .chat-input-actions{display:flex;gap:4px;align-items:center;flex-shrink:0} +.chat-action-btn{background:none;border:none;color:var(--muted);cursor:pointer;padding:8px;border-radius:8px;font-size:16px;transition:.15s;display:flex;align-items:center;justify-content:center} +.chat-action-btn:hover{background:var(--surface2);color:var(--text)} +.chat-action-btn svg{width:20px;height:20px} +#chat-main-area .chat-send-btn{background:none;border:none;color:#10b981;cursor:pointer;padding:6px;display:flex;align-items:center;justify-content:center;transition:.15s;flex-shrink:0} +#chat-main-area .chat-send-btn:hover{color:#059669;transform:scale(1.15)} +#chat-main-area .chat-send-btn:disabled{opacity:.3;cursor:not-allowed;transform:none} +#chat-main-area .chat-send-btn svg{width:28px;height:28px} +#chat-main-area .chat-footer{display:flex;gap:10px;align-items:center;width:100%;max-width:820px;padding:8px 0 0;flex-wrap:wrap} +#chat-main-area .chat-footer select{background:var(--surface2);border:1px solid var(--border);color:var(--text);padding:4px 10px;border-radius:8px;font-size:12px;cursor:pointer} +#chat-main-area .chat-footer select:focus{outline:none;border-color:var(--accent2)} +#chat-main-area .chat-footer .footer-hint{font-size:11px;color:var(--muted);margin-left:auto} +/* Chat sidebar */ +#chat-sidebar{width:260px;background:var(--surface);border-right:1px solid var(--border);display:flex;flex-direction:column;height:calc(100vh - 56px);flex-shrink:0;transition:width .2s} +#chat-sidebar .sidebar-header{padding:12px;display:flex;gap:8px;align-items:center;border-bottom:1px solid var(--border)} +#chat-sidebar .sidebar-new-btn{flex:1;background:var(--surface2);border:1px solid var(--border);color:var(--text);padding:8px 12px;border-radius:8px;cursor:pointer;font-size:13px;display:flex;align-items:center;gap:6px;transition:.15s} +#chat-sidebar .sidebar-new-btn:hover{border-color:var(--accent);color:var(--accent)} +#chat-sidebar .sidebar-new-btn svg{width:16px;height:16px;flex-shrink:0} +#chat-sidebar .sidebar-toggle-btn{background:none;border:none;color:var(--muted);cursor:pointer;padding:6px;border-radius:6px;display:flex;align-items:center} +#chat-sidebar .sidebar-toggle-btn:hover{background:var(--surface2);color:var(--text)} +#chat-sidebar .sidebar-toggle-btn svg{width:18px;height:18px} +.chat-list{flex:1;overflow-y:auto;padding:8px} +.chat-list-item{padding:10px 12px;border-radius:8px;cursor:pointer;font-size:13px;color:var(--muted);display:flex;align-items:center;gap:8px;transition:.1s;position:relative;margin-bottom:2px} +.chat-list-item:hover{background:var(--surface2);color:var(--text)} +.chat-list-item.active{background:var(--surface2);color:var(--text);font-weight:500} +.chat-list-item .chat-item-title{flex:1;overflow:hidden;text-overflow:ellipsis;white-space:nowrap} +.chat-list-item .chat-item-agent{font-size:10px;color:var(--muted);opacity:.7} +.chat-list-item .chat-item-delete{display:none;background:none;border:none;color:var(--muted);cursor:pointer;padding:2px 4px;border-radius:4px;font-size:14px} +.chat-list-item:hover .chat-item-delete{display:block} +.chat-list-item .chat-item-delete:hover{color:#fc8181;background:var(--danger-bg)} +.chat-list-date{font-size:11px;color:var(--muted);padding:8px 12px 4px;font-weight:600;text-transform:uppercase;letter-spacing:.03em} +#sidebar-collapse-btn{display:none;position:fixed;left:8px;top:64px;background:var(--surface);border:1px solid var(--border);color:var(--muted);cursor:pointer;padding:6px;border-radius:6px;z-index:10} +#sidebar-collapse-btn:hover{color:var(--text);border-color:var(--accent)} +#sidebar-collapse-btn svg{width:18px;height:18px} +body.sidebar-collapsed #chat-sidebar{display:none} +body.sidebar-collapsed #sidebar-collapse-btn{display:flex} +#chat-main-area{flex:1;display:flex;flex-direction:column;align-items:center;min-width:0} +.header-compact{padding:8px 20px} +.dashboard-toggle{background:var(--surface2);border:1px solid var(--border);color:var(--muted);padding:5px 12px;border-radius:6px;cursor:pointer;font-size:12px;transition:.15s;display:flex;align-items:center;gap:5px} +.dashboard-toggle:hover{border-color:var(--accent);color:var(--accent)} +.dashboard-toggle.active{border-color:var(--accent);color:var(--accent);background:var(--accent2);color:#fff} -
+

🧬 BioClaw

-
+
-
Groups
-
Tasks
-
Models
@@ -140,7 +265,8 @@

🧬 BioClaw

-
Chats
- @@ -151,7 +277,7 @@

🧬 BioClaw

-