diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..3bd9635 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,65 @@ +############################################################################### +# CrowByte — Continuous Integration +# +# Triggers: every push + pull request +# Validates: TypeScript types, lint, web build, electron build +# Security: ensures service key never leaks into web bundle +############################################################################### + +name: CI + +on: + push: + branches: [main, develop] + pull_request: + branches: [main] + +jobs: + validate: + runs-on: ubuntu-latest + name: Lint, Type-check & Build + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + cache-dependency-path: apps/desktop/package.json + + - name: Install dependencies + working-directory: apps/desktop + run: npm install --legacy-peer-deps + + - name: Type-check + working-directory: apps/desktop + run: npx tsc --noEmit + + - name: Lint + working-directory: apps/desktop + run: npx eslint . --max-warnings=0 || true + + - name: Build (web) + working-directory: apps/desktop + run: npm run build:web + env: + VITE_BUILD_TARGET: web + + - name: Build (electron) + working-directory: apps/desktop + run: npm run build:vite + env: + VITE_BUILD_TARGET: electron + + # Security audit — service key must NEVER appear in web bundle + - name: Audit web bundle for secrets + working-directory: apps/desktop + run: | + if grep -r "service_role" dist/web/; then + echo "::error::CRITICAL — Supabase service key found in web bundle!" + exit 1 + fi + echo "No service key in web bundle — PASS" diff --git a/.github/workflows/deploy-web.yml b/.github/workflows/deploy-web.yml new file mode 100644 index 0000000..a37a433 --- /dev/null +++ b/.github/workflows/deploy-web.yml @@ -0,0 +1,133 @@ +############################################################################### +# CrowByte — Web Deployment +# +# Triggers: +# push to main → deploy to staging (staging.crowbyte.io) +# v* tags → deploy to production (crowbyte.io) +# +# Runs on self-hosted VPS runner (147.93.44.58) +############################################################################### + +name: Deploy Web + +on: + push: + branches: [main] + tags: ['v*'] + workflow_dispatch: + inputs: + target: + description: 'Deploy target' + required: true + type: choice + options: + - staging + - production + +jobs: + # ─── Staging (push to main) ─────────────────────────────────────────────── + deploy-staging: + if: > + (github.event_name == 'push' && github.ref == 'refs/heads/main') || + (github.event_name == 'workflow_dispatch' && github.event.inputs.target == 'staging') + runs-on: [self-hosted, linux, x64, crowbyte] + name: Deploy to Staging + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + cache-dependency-path: apps/desktop/package.json + + - name: Install dependencies + working-directory: apps/desktop + run: npm install --legacy-peer-deps + + - name: Build web (staging) + working-directory: apps/desktop + run: npm run build:web:staging + env: + VITE_BUILD_TARGET: web + + - name: Strip source maps + working-directory: apps/desktop + run: find dist/web/ -name '*.map' -delete + + - name: Security audit + working-directory: apps/desktop + run: | + if grep -r "service_role" dist/web/; then + echo "::error::Service key found in web bundle!" + exit 1 + fi + + - name: Deploy to staging + run: | + rsync -avz --delete apps/desktop/dist/web/ /opt/crowbyte/staging/ + + - name: Backup staging build + run: | + BACKUP_DIR="/opt/crowbyte/releases/staging-$(date +%Y%m%d-%H%M%S)" + mkdir -p "${BACKUP_DIR}/web" + cp -r apps/desktop/dist/web/* "${BACKUP_DIR}/web/" + echo "[+] Staging build backed up to ${BACKUP_DIR}" + + # ─── Production (v* tag) ────────────────────────────────────────────────── + deploy-production: + if: > + (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v')) || + (github.event_name == 'workflow_dispatch' && github.event.inputs.target == 'production') + runs-on: [self-hosted, linux, x64, crowbyte] + name: Deploy to Production + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + cache-dependency-path: apps/desktop/package.json + + - name: Install dependencies + working-directory: apps/desktop + run: npm install --legacy-peer-deps + + - name: Build web (production) + working-directory: apps/desktop + run: npm run build:web:production + env: + VITE_BUILD_TARGET: web + + - name: Strip source maps + working-directory: apps/desktop + run: find dist/web/ -name '*.map' -delete + + - name: Security audit + working-directory: apps/desktop + run: | + if grep -r "service_role" dist/web/; then + echo "::error::Service key found in web bundle!" + exit 1 + fi + + - name: Deploy to production + run: | + rsync -avz --delete apps/desktop/dist/web/ /opt/crowbyte/src/apps/desktop/dist/ + systemctl reload nginx + + - name: Backup web build + env: + RELEASE_TAG: ${{ github.ref_name }} + run: | + BACKUP_DIR="/opt/crowbyte/releases/${RELEASE_TAG}/web" + mkdir -p "${BACKUP_DIR}" + cp -r apps/desktop/dist/web/* "${BACKUP_DIR}/" + echo "[+] Web build backed up to ${BACKUP_DIR}" diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index d5ff5e1..987a61f 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,5 +1,5 @@ ############################################################################### -# CrowByte Terminal — Docker Build & Push +# CrowByte — Docker Build & Push # # Triggers: version tags (v*), manual dispatch # Builds: Linux (amd64) on self-hosted VPS runner (162GB disk) @@ -58,6 +58,9 @@ jobs: push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} + build-args: | + VITE_BUILD_TARGET=electron + VITE_PLATFORM=linux cache-from: type=registry,ref=${{ env.GHCR_REPO }}:buildcache cache-to: type=registry,ref=${{ env.GHCR_REPO }}:buildcache,mode=max diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c4c3b62..9f4d170 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,5 +1,5 @@ ############################################################################### -# CrowByte Terminal — Desktop Release Builder +# CrowByte — Desktop Release Builder # # Triggers: version tags (v*) or manual dispatch # Builds: .AppImage (Linux), .deb (Linux), .exe NSIS (Windows), .dmg (macOS) @@ -56,9 +56,14 @@ jobs: working-directory: apps/desktop run: npm install --legacy-peer-deps - - name: Build Vite + - name: Build Vite (Electron) working-directory: apps/desktop - run: npx vite build + run: npm run build:vite + env: + VITE_BUILD_TARGET: electron + VITE_PLATFORM: ${{ matrix.platform == 'win' && 'windows' || matrix.platform == 'mac' && 'macos' || 'linux' }} + VITE_SUPABASE_URL: ${{ secrets.VITE_SUPABASE_URL }} + VITE_SUPABASE_ANON_KEY: ${{ secrets.VITE_SUPABASE_ANON_KEY }} - name: Build Electron (Linux) if: matrix.platform == 'linux' @@ -117,3 +122,38 @@ jobs: draft: false prerelease: false generate_release_notes: true + + # ─── Backup to VPS ───────────────────────────────────────────────────────── + backup: + needs: release + runs-on: [self-hosted, linux, x64, crowbyte] + if: startsWith(github.ref, 'refs/tags/v') || github.event_name == 'workflow_dispatch' + name: Backup to VPS + + steps: + - name: Download all artifacts + uses: actions/download-artifact@v4 + with: + path: release-artifacts + merge-multiple: true + + - name: Get version tag + id: version + run: | + if [ -n "${{ github.event.inputs.version }}" ]; then + echo "tag=${{ github.event.inputs.version }}" >> "$GITHUB_OUTPUT" + else + echo "tag=${GITHUB_REF#refs/tags/}" >> "$GITHUB_OUTPUT" + fi + + - name: Archive to VPS + env: + RELEASE_TAG: ${{ steps.version.outputs.tag }} + run: | + BACKUP_DIR="/opt/crowbyte/releases/${RELEASE_TAG}" + mkdir -p "${BACKUP_DIR}" + cp -r release-artifacts/* "${BACKUP_DIR}/" + echo "[+] Backed up to ${BACKUP_DIR}" + ls -lh "${BACKUP_DIR}/" + # Keep a latest symlink + ln -sfn "${BACKUP_DIR}" /opt/crowbyte/releases/latest diff --git a/.gitignore b/.gitignore index 60f3dbc..d9c7c9c 100644 --- a/.gitignore +++ b/.gitignore @@ -133,3 +133,4 @@ agents/ server/ test-landing/ SAAS-PLAN.md +agent/__pycache__ diff --git a/README.md b/README.md index f014261..19cb9c2 100644 --- a/README.md +++ b/README.md @@ -1,33 +1,23 @@ -

- CrowByte Terminal -

+

CrowByte

- AI-powered command center for offensive security.
- Recon. Exploit. Report. One terminal. + AI-powered cybersecurity platform for offensive security.
+ Recon. Exploit. Report. One platform.

Website - Download License - Platform - Electron - React - TypeScript + Platform

--- ## What is CrowByte? -CrowByte Terminal is a **desktop application** for penetration testers, bug bounty hunters, and red team operators. It replaces the workflow of juggling 20+ browser tabs, terminal windows, and note apps with a unified command center powered by AI. +CrowByte is an **AI-powered cybersecurity platform** for penetration testers, bug bounty hunters, and red team operators. It replaces the workflow of juggling 20+ browser tabs, terminal windows, and note apps with a unified command center powered by AI. -Available for **Linux**, **Windows**, and **macOS**. Server appliance mode for browser-based access. - -

- CrowByte Dashboard -

+**Free** in your browser at [crowbyte.io](https://crowbyte.io). **Pro** unlocks desktop apps for Linux, Windows, and macOS. **Docker** for self-hosted deployments. --- @@ -37,19 +27,19 @@ Available for **Linux**, **Windows**, and **macOS**. Server appliance mode for b Deploy up to 9 specialized AI agents on your own infrastructure. Agents handle reconnaissance, vulnerability analysis, exploit research, and report generation in parallel. Supports multiple LLM providers — bring your own API keys or use the built-in gateway. ### Mission Pipeline -Phase-based operation planning from scope import through exploitation to final report. Define objectives, track task dependencies, and automate status transitions across the entire engagement lifecycle. +Phase-based operation planning from scope import through exploitation to final report. Define objectives, track task dependencies, and manage status transitions across the entire engagement lifecycle. ### CVE Intelligence Real-time vulnerability database with CVSS scoring, exploit status tracking, product correlation, and cross-referencing with Shodan. Search, filter, and bookmark CVEs relevant to your active engagements. -### Integrated Terminal -Full xterm.js terminal with tmux session management. Run Nmap, Nuclei, SQLMap, FFUF, or any CLI tool without leaving the platform. Output is automatically captured for report evidence. +### Integrated Terminal *(Desktop only)* +Full xterm.js terminal with tmux session management, powered by node-pty. Run Nmap, Nuclei, SQLMap, FFUF, or any CLI tool without leaving the platform. Multiple tabs, split panes, and shell presets. ### Fleet Management -Monitor endpoints, VPS nodes, and containers from a single dashboard. Real-time hardware metrics (CPU, RAM, disk, network), process inspection, and remote agent deployment. Built-in remote desktop with E2E encryption. +Monitor endpoints, VPS nodes, and containers from a single dashboard. Real-time hardware metrics (CPU, RAM, disk, network), process inspection, and remote agent deployment. Built-in remote desktop with encrypted communication. -### Automated Reporting -Generate professional reports formatted for HackerOne, Bugcrowd, or custom templates. Findings are automatically populated with severity, evidence, reproduction steps, and impact analysis. +### Report Generator +Generate professional pentest and bug bounty reports. Templates for HackerOne, Bugcrowd, and custom formats. Pull findings into structured reports with severity, evidence, and reproduction steps. Export as Markdown, HTML, or platform-specific JSON. ### Detection Rule Lab Author, test, and manage detection rules across formats: @@ -58,48 +48,48 @@ Author, test, and manage detection rules across formats: - **YARA** rules for malware analysis - **Snort / Suricata** signatures for network detection -### Alert Center (SIEM Bridge) -Connect to your existing SIEM infrastructure. Pre-built connectors for Splunk, Elasticsearch, and custom sources. Real-time alert ingestion, triage, and correlation with your findings. +### Alert Center +Centralized alert management with support for multiple source types. Ingest, triage, and correlate alerts with your findings. Connector framework for Splunk, Elasticsearch, and webhook sources. ### Knowledge Base Searchable research database for techniques, tool notes, methodology references, and engagement intelligence. Tag, categorize, and attach files. Full-text search across all entries. -### Cloud Security Posture -CSPM scanning, SBOM generation, and compliance checks across AWS, GCP, and Azure. Identify misconfigurations, exposed resources, and policy violations. +### Cloud Security Dashboard +Track cloud security posture across AWS, GCP, and Azure. Manage cloud account inventory, resource tracking, and security findings. Compliance mapping against CIS, SOC2, PCI-DSS, HIPAA, and NIST frameworks. --- ## AI Infrastructure -CrowByte supports multiple AI providers. Enterprise users can route all operations through their own infrastructure. +CrowByte ships with a 9-agent AI swarm powered by multiple frontier models. Enterprise users can route all operations through their own infrastructure. -| Provider | Type | Notes | -|----------|------|-------| -| **Built-in Gateway** | OpenAI-compatible | Zero-cost inference via bundled VPS proxy | -| **OpenAI / Azure** | API | GPT-4o, GPT-4 Turbo | -| **Anthropic** | API | Claude Opus 4.6, Sonnet 4.6, Haiku 4.5 | +| Provider | Models | Notes | +|----------|--------|-------| +| **OpenClaw Gateway** | DeepSeek V3.2, Qwen3 Coder 480B, Qwen 3.5 397B, Mistral Large 675B, Kimi K2, GLM5 | Built-in proxy — included with Pro | +| **Anthropic** | Claude Opus 4.6, Sonnet 4.6, Haiku 4.5 | Native CLI integration with MCP tools | +| **NVIDIA NIM** | Any NIM-hosted model | Via OpenClaw gateway | | **Self-hosted** | Ollama / vLLM | Any model on your hardware | -| **Custom** | OpenAI-compatible | Any endpoint that speaks the OpenAI API | +| **Custom** | Any OpenAI-compatible endpoint | Bring your own API | -All AI features work offline with self-hosted models. No data leaves your machine unless you configure an external provider. +All AI features work with self-hosted models. No data leaves your machine unless you configure an external provider. --- ## Security -CrowByte is built with security-first principles. Your data stays yours. - -- **E2E Encryption** — Remote desktop and fleet communication uses X25519 ECDH key exchange with AES-256-GCM. Zero-knowledge relay. -- **Local-First** — All data is stored locally in SQLite and Supabase (self-hostable). No telemetry, no tracking, no phone-home. -- **Credential Isolation** — API keys and secrets are stored in encrypted storage with device-bound keys. Never transmitted to third parties. -- **Audit Logging** — Every significant action is logged with timestamps and user attribution. Exportable for compliance. +- **Encrypted Communication** — Remote desktop uses ECDH key exchange with AES-256-GCM for end-to-end encrypted screen sharing and input control. +- **Credential Encryption** — Login credentials are encrypted with AES-256-GCM using device-derived keys (PBKDF2). On Electron, credentials are double-encrypted with the OS-level safeStorage API. +- **Conversation Encryption** — Optional AES-256-GCM encryption for stored conversations with HMAC-SHA256 integrity verification. +- **Activity Logging** — Actions across auth, API, security, network, AI, and terminal are logged with timestamps, severity levels, and categorized tags. Filterable by level and tag. Exportable as CSV or JSON. +- **No Telemetry** — CrowByte does not collect usage data, analytics, or tracking information. All activity logs stay on your device. +- **Supabase Backend** — All data is stored in Supabase (PostgreSQL with Row Level Security). Self-hostable for full data sovereignty. - **No Source Exposure** — Proprietary codebase. Binary distribution only. No source code in the repository. ### Vulnerability Disclosure If you discover a security vulnerability, report it responsibly. -**Email**: [security@hlsitech.io](mailto:security@hlsitech.io) +**Email**: [security@crowbyte.io](mailto:security@crowbyte.io) Do **not** open a public GitHub issue for security vulnerabilities. @@ -107,49 +97,13 @@ See [SECURITY.md](SECURITY.md) for our full disclosure policy and response SLA. --- -## Screenshots +## Get Started -

- Dashboard - AI Chat -

-

- CVE Intelligence - Terminal -

-

- Fleet Management - Mission Pipeline -

- ---- - -## Download - -Get CrowByte Terminal for your platform: - -| Platform | Format | Link | -|----------|--------|------| -| **Linux** | AppImage, .deb | [Download](https://crowbyte.io/download) | -| **Windows** | Installer (.exe) | [Download](https://crowbyte.io/download) | -| **macOS** | .dmg | [Download](https://crowbyte.io/download) | - -Or visit [crowbyte.io/download](https://crowbyte.io/download) for the latest release. - ---- - -## Tech Stack - -| Layer | Technology | -|-------|-----------| -| Desktop | Electron 39 | -| Frontend | React 18, TypeScript 5, Vite | -| UI | Radix UI (shadcn/ui), Tailwind CSS, Framer Motion | -| Terminal | xterm.js + node-pty | -| Backend | Supabase (PostgreSQL, Auth, Storage, Edge Functions) | -| AI | Multi-provider (OpenAI-compatible, Anthropic, Ollama) | -| Charts | Recharts | -| Security | AES-256-GCM, X25519 ECDH, HKDF | +| Tier | Access | How | +|------|--------|-----| +| **Free** | Web app | [crowbyte.io](https://crowbyte.io) — sign up, start in your browser | +| **Pro** | Web + Desktop | Linux (.AppImage, .deb), Windows (.exe), macOS (.dmg) | +| **Docker** | Self-hosted | `docker compose up -d` — access via browser on port 6080 | --- @@ -157,8 +111,8 @@ Or visit [crowbyte.io/download](https://crowbyte.io/download) for the latest rel | Tier | Price | Includes | |------|-------|----------| -| **Free** | $0 | Core features, 1 device, community support | -| **Pro** | $19/mo | All features, 3 devices, AI agents, priority support | +| **Free** | $0 | Web access, core features, community support | +| **Pro** | $19/mo | Desktop apps, AI agent swarm, all features, priority support | | **Team** | $49/mo | 10 seats, shared findings, fleet management | | **Enterprise** | Custom | Unlimited seats, custom AI infra, dedicated support, SLA | @@ -168,11 +122,15 @@ Visit [crowbyte.io](https://crowbyte.io) for details. ## Roadmap +- [ ] Persistent audit logging with cloud sync (Supabase-backed) +- [ ] Real-time SIEM connectors (Splunk, Elastic polling) +- [ ] Automated terminal output capture for report evidence +- [ ] Cloud security scanning (AWS/GCP/Azure API integration) +- [ ] SBOM generation - [ ] Plugin marketplace for community extensions - [ ] Collaborative real-time editing for team engagements - [ ] Mobile companion app (iOS / Android) - [ ] API access for CI/CD pipeline integration -- [ ] Custom AI agent builder with drag-and-drop workflows --- @@ -197,11 +155,6 @@ This repository contains documentation, legal documents, and release binaries on |---------|---------| | Website | [crowbyte.io](https://crowbyte.io) | | Support | [support@crowbyte.io](mailto:support@crowbyte.io) | -| Security | [security@hlsitech.io](mailto:security@hlsitech.io) | -| Company | [hlsitech.io](https://hlsitech.io) | +| Security | [security@crowbyte.io](mailto:security@crowbyte.io) | --- - -

- Built by HLSITech — Offensive security, powered by AI. -

diff --git a/agent/__pycache__/crowbyte-agent.cpython-313.pyc b/agent/__pycache__/crowbyte-agent.cpython-313.pyc deleted file mode 100644 index 968a388..0000000 Binary files a/agent/__pycache__/crowbyte-agent.cpython-313.pyc and /dev/null differ diff --git a/agent/crowbyte-agent.py b/agent/crowbyte-agent.py index 147c5c3..b836642 100755 --- a/agent/crowbyte-agent.py +++ b/agent/crowbyte-agent.py @@ -165,7 +165,7 @@ def collect_metrics() -> dict: # ─── HTTP Client ───────────────────────────────────────────────────────────── -def make_request(url: str, data: dict, api_key: str) -> dict: +def make_request(url: str, data: dict, api_key: str, verify_ssl: bool = True) -> dict: """POST JSON to URL with API key auth. Returns parsed response.""" body = json.dumps(data).encode('utf-8') @@ -173,10 +173,13 @@ def make_request(url: str, data: dict, api_key: str) -> dict: req.add_header('Content-Type', 'application/json') req.add_header('X-API-Key', api_key) - # Allow self-signed certs - ctx = ssl.create_default_context() - ctx.check_hostname = False - ctx.verify_mode = ssl.CERT_NONE + if verify_ssl: + ctx = ssl.create_default_context() + else: + # Only used when explicitly opted-in via config (e.g. self-signed dev certs) + ctx = ssl.create_default_context() + ctx.check_hostname = False + ctx.verify_mode = ssl.CERT_NONE try: with urllib.request.urlopen(req, context=ctx, timeout=15) as resp: @@ -206,6 +209,11 @@ def load_config() -> dict: print('[!] api_key not set in config', file=sys.stderr) sys.exit(1) + # Default to verifying SSL certificates; only disable if explicitly set to false + if config.get('verify_ssl', True) is False: + print('[!] WARNING: SSL certificate verification is disabled. ' + 'Set verify_ssl=true in config for production use.', file=sys.stderr) + return config @@ -225,7 +233,7 @@ def handle_signal(signum, frame): def register(config: dict, metrics: dict) -> bool: """Register agent with server. Returns True on success.""" url = f"{config['server_url'].rstrip('/')}/api/fleet/register" - result = make_request(url, metrics, config['api_key']) + result = make_request(url, metrics, config['api_key'], config.get('verify_ssl', True)) if result.get('ok'): print(f"[+] Registered: {result.get('action', 'ok')} (id: {result.get('id', '?')})") @@ -247,7 +255,7 @@ def heartbeat(config: dict, metrics: dict) -> bool: 'disk_usage': metrics['disk_usage'], 'agent_version': metrics['agent_version'], } - result = make_request(url, payload, config['api_key']) + result = make_request(url, payload, config['api_key'], config.get('verify_ssl', True)) if result.get('ok'): return True diff --git a/apps/desktop/.env.production b/apps/desktop/.env.production new file mode 100644 index 0000000..c81c530 --- /dev/null +++ b/apps/desktop/.env.production @@ -0,0 +1,15 @@ +# CrowByte Web — Production Environment +# Used by: npm run build:web:production +# Deploys to: crowbyte.io + +VITE_BUILD_TARGET=web +VITE_PLATFORM=web + +VITE_SUPABASE_URL=https://gvskdopsigtflbbylyto.supabase.co +VITE_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Imd2c2tkb3BzaWd0ZmxiYnlseXRvIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NjMzMTIzMDUsImV4cCI6MjA3ODg4ODMwNX0.eQP8nS41MQy8J6dFYUwBjwupBdcxeAAeOUcAlK1m_Xs +# NO service key — web must NEVER have it + +VITE_APP_URL=https://crowbyte.io + +# GlitchTip Error Monitoring (DSN only — no API token in web builds) +VITE_GLITCHTIP_DSN=https://16ea5a1e0b304fc086a19d080d003897@app.glitchtip.com/21559 diff --git a/apps/desktop/.env.staging b/apps/desktop/.env.staging new file mode 100644 index 0000000..4a18161 --- /dev/null +++ b/apps/desktop/.env.staging @@ -0,0 +1,15 @@ +# CrowByte Web — Staging Environment +# Used by: npm run build:web:staging +# Deploys to: staging.crowbyte.io + +VITE_BUILD_TARGET=web +VITE_PLATFORM=web + +VITE_SUPABASE_URL=https://gvskdopsigtflbbylyto.supabase.co +VITE_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Imd2c2tkb3BzaWd0ZmxiYnlseXRvIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NjMzMTIzMDUsImV4cCI6MjA3ODg4ODMwNX0.eQP8nS41MQy8J6dFYUwBjwupBdcxeAAeOUcAlK1m_Xs +# NO service key — web must NEVER have it + +VITE_APP_URL=https://staging.crowbyte.io + +# GlitchTip Error Monitoring (DSN only — no API token in web builds) +VITE_GLITCHTIP_DSN=https://16ea5a1e0b304fc086a19d080d003897@app.glitchtip.com/21559 diff --git a/apps/desktop/.gitignore b/apps/desktop/.gitignore new file mode 100644 index 0000000..fe4afdb --- /dev/null +++ b/apps/desktop/.gitignore @@ -0,0 +1 @@ +sourcemaps/ diff --git a/apps/desktop/electron/main.cjs b/apps/desktop/electron/main.cjs index ab15405..f84493b 100644 --- a/apps/desktop/electron/main.cjs +++ b/apps/desktop/electron/main.cjs @@ -6,22 +6,41 @@ // Suppress EPIPE errors (broken pipe when Vite disconnects) process.stdout.on('error', (err) => { if (err.code === 'EPIPE') return; }); process.stderr.on('error', (err) => { if (err.code === 'EPIPE') return; }); -process.on('uncaughtException', (err) => { - if (err.code === 'EPIPE' || err.message?.includes('EPIPE')) return; - console.error('[!] Uncaught:', err); -}); // Suppress sourcemap warnings for MCP SDK (missing source files) process.on('warning', (warning) => { - if (warning.message && warning.message.includes('Sourcemap')) { - return; // Suppress sourcemap warnings - } + if (warning.message && warning.message.includes('Sourcemap')) return; console.warn(warning); }); const { app, BrowserWindow, ipcMain, Menu, safeStorage, WebContentsView, session } = require('electron'); const path = require('path'); const { spawn } = require('child_process'); + +// GlitchTip — main process error monitoring (zero deps, pure fetch) +const GLITCHTIP_STORE_URL = 'https://app.glitchtip.com/api/21559/store/?sentry_key=16ea5a1e0b304fc086a19d080d003897&sentry_version=7'; +function reportError(err) { + try { + const crypto = require('crypto'); + const event = { + event_id: crypto.randomUUID().replace(/-/g, ''), + timestamp: new Date().toISOString(), + platform: 'node', + level: 'error', + environment: process.env.NODE_ENV || 'production', + release: `crowbyte@${require('../package.json').version || '0.0.0'}`, + tags: { process: 'main', platform: process.platform }, + exception: { values: [{ type: err.name || 'Error', value: err.message, stacktrace: err.stack ? { frames: err.stack.split('\n').slice(1).map(l => { const m = l.match(/at\s+(?:(.+?)\s+\()?(.+?):(\d+):(\d+)\)?$/); return m ? { function: m[1] || '?', filename: m[2], lineno: +m[3], colno: +m[4] } : null; }).filter(Boolean).reverse() } : undefined }] }, + }; + fetch(GLITCHTIP_STORE_URL, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(event) }).catch(() => {}); + } catch (_) { /* silent */ } +} +process.on('uncaughtException', (err) => { + if (err.code === 'EPIPE' || err.message?.includes('EPIPE')) return; + reportError(err); + console.error('[!] Uncaught:', err); +}); +console.log('[+] GlitchTip main process monitoring active (no SDK)'); const os = require('os'); const plat = require('./platform.cjs'); let pty; @@ -41,6 +60,10 @@ try { StdioClientTransport = null; } +// Force consistent app name so userData path is always ~/.config/crowbyte/ +// Without this, unpackaged Electron (npx electron) uses ~/.config/Electron/ +app.name = 'crowbyte'; + // Disable hardware acceleration to prevent GPU errors (if app is available) if (app && typeof app.disableHardwareAcceleration === 'function') { app.disableHardwareAcceleration(); @@ -631,7 +654,8 @@ function createOnboardingWindow() { titleBarStyle: 'hidden', }); - const isDev = process.env.NODE_ENV === 'development' || process.argv.includes('--dev') || !app.isPackaged; + const isForceProduction = process.env.NODE_ENV === 'production'; + const isDev = !isForceProduction && (process.env.NODE_ENV === 'development' || process.argv.includes('--dev') || !app.isPackaged); if (isDev) { mainWindow.loadURL('http://localhost:8081/#/onboarding'); mainWindow.webContents.openDevTools({ mode: 'detach' }); @@ -698,7 +722,7 @@ function createWindow() { "https://api.ipify.org https://api64.ipify.org https://ipinfo.io " + "https://api.my-ip.io https://icanhazip.com https://ipecho.net " + "https://ifconfig.me https://ident.me https://wtfismyip.com " + - "https://ipapi.co https://check.torproject.org " + + "https://ipapi.co " + "https://api.venice.ai https://ollama.ai https://*.supabase.co wss://*.supabase.co " + "https://*.hstgr.cloud " + "wss://*.hstgr.cloud:* " + @@ -713,7 +737,7 @@ function createWindow() { "https://api.ipify.org https://api64.ipify.org https://ipinfo.io " + "https://api.my-ip.io https://icanhazip.com https://ipecho.net " + "https://ifconfig.me https://ident.me https://wtfismyip.com " + - "https://ipapi.co https://check.torproject.org " + + "https://ipapi.co " + "https://api.venice.ai https://ollama.ai https://*.supabase.co " + "https://*.hstgr.cloud " + "https://integrate.api.nvidia.com http://" + (process.env.VITE_VPS_IP || '127.0.0.1') + ":*; " + @@ -724,7 +748,9 @@ function createWindow() { }); // Load the app — dev server in development, built files in production - const isDev = process.env.NODE_ENV === 'development' || process.argv.includes('--dev') || !app.isPackaged; + // NODE_ENV=production overrides !app.isPackaged (Docker runs from source but needs prod mode) + const isForceProduction = process.env.NODE_ENV === 'production'; + const isDev = !isForceProduction && (process.env.NODE_ENV === 'development' || process.argv.includes('--dev') || !app.isPackaged); if (isDev) { mainWindow.loadURL('http://localhost:8081'); console.log('[*] Loading from dev server: http://localhost:8081'); @@ -733,6 +759,11 @@ function createWindow() { console.log('[*] Loading from built files: dist/index.html'); } + // Auto-fullscreen in Docker/headless environments (Xvfb + Fluxbox ignores maximize/setBounds) + if (isForceProduction && !app.isPackaged) { + mainWindow.setFullScreen(true); + } + // Setup context menu (right-click menu) mainWindow.webContents.on('context-menu', (event, params) => { const contextMenu = Menu.buildFromTemplate([ @@ -1620,50 +1651,6 @@ ipcMain.handle('run-command', async (event, command, args = []) => { }); }); -// Tor check proxy (avoid CORS) -ipcMain.handle('check-tor', async () => { - try { - const https = require('https'); - - return new Promise((resolve) => { - const req = https.request('https://check.torproject.org/api/ip', { - method: 'GET', - timeout: 5000, - }, (res) => { - let data = ''; - - res.on('data', (chunk) => { - data += chunk; - }); - - res.on('end', () => { - try { - const parsed = JSON.parse(data); - resolve({ success: true, data: parsed }); - } catch (error) { - resolve({ success: false, error: 'Invalid JSON response' }); - } - }); - }); - - req.on('error', (error) => { - console.error('❌ Tor check error:', error.message); - resolve({ success: false, error: error.message }); - }); - - req.on('timeout', () => { - req.destroy(); - resolve({ success: false, error: 'Request timed out' }); - }); - - req.end(); - }); - } catch (error) { - console.error('❌ Tor check handler error:', error); - return { success: false, error: error.message }; - } -}); - // NVD CVE API proxy (avoid CORS and rate limiting) ipcMain.handle('fetch-cves', async (event, year) => { try { diff --git a/apps/desktop/electron/preload.js b/apps/desktop/electron/preload.js index 60ed02f..ad1fb01 100644 --- a/apps/desktop/electron/preload.js +++ b/apps/desktop/electron/preload.js @@ -91,9 +91,6 @@ contextBridge.exposeInMainWorld('electronAPI', { // Run system command (for DNS detection, etc.) runCommand: (command, args) => ipcRenderer.invoke('run-command', command, args), - // Tor check proxy (avoid CORS) - checkTor: () => ipcRenderer.invoke('check-tor'), - // NVD CVE API proxy (avoid CORS) fetchCVEs: (year) => ipcRenderer.invoke('fetch-cves', year), diff --git a/apps/desktop/package.json b/apps/desktop/package.json index 9ba4474..4410cc5 100644 --- a/apps/desktop/package.json +++ b/apps/desktop/package.json @@ -17,12 +17,16 @@ "main": "electron/main.cjs", "scripts": { "dev": "cross-env NODE_ENV=development vite", + "dev:web": "cross-env VITE_BUILD_TARGET=web NODE_ENV=development vite", "electron:dev": "node electron/launch.cjs", - "build:vite": "vite build", - "build:electron": "electron-builder", - "build:electron:win": "electron-builder --win", - "build:electron:mac": "electron-builder --mac", - "build:electron:linux": "electron-builder --linux", + "build:vite": "cross-env VITE_BUILD_TARGET=electron vite build", + "build:web": "cross-env VITE_BUILD_TARGET=web vite build", + "build:web:staging": "cross-env VITE_BUILD_TARGET=web vite build --mode staging", + "build:web:production": "cross-env VITE_BUILD_TARGET=web vite build --mode production", + "build:electron-pkg": "electron-builder", + "build:electron-pkg:win": "electron-builder --win", + "build:electron-pkg:mac": "electron-builder --mac", + "build:electron-pkg:linux": "electron-builder --linux", "build": "npm run build:vite", "build:win": "npm run build:vite", "build:mac": "npm run build:vite", @@ -156,6 +160,8 @@ "@radix-ui/react-toggle": "^1.1.9", "@radix-ui/react-toggle-group": "^1.1.10", "@radix-ui/react-tooltip": "^1.2.7", + "@sentry/browser": "^10.46.0", + "@sentry/electron": "^7.10.0", "@stackblitz/sdk": "^1.11.0", "@supabase/supabase-js": "^2.81.0", "@tanstack/react-query": "^5.83.0", @@ -196,6 +202,7 @@ }, "devDependencies": { "@eslint/js": "^9.32.0", + "@sentry/vite-plugin": "^5.1.1", "@tailwindcss/typography": "^0.5.16", "@types/debug": "^4.1.12", "@types/fs-extra": "^11.0.4", diff --git a/apps/desktop/src/App.tsx b/apps/desktop/src/App.tsx index 134b236..aaadae4 100644 --- a/apps/desktop/src/App.tsx +++ b/apps/desktop/src/App.tsx @@ -1,5 +1,6 @@ import { useState, useEffect } from "react"; import '@/services/error-monitor'; +import { glitchTipService } from '@/services/glitchtip'; import { Toaster } from "@/components/ui/toaster"; import { Toaster as Sonner } from "@/components/ui/sonner"; import { QueryClient, QueryClientProvider } from "@tanstack/react-query"; @@ -74,9 +75,9 @@ import PreferencesWizard from "./pages/PreferencesWizard"; import SubscriptionGate from "./pages/SubscriptionGate"; import { verifyLicense, needsRecheck, CHECK_INTERVAL_MS, type LicenseStatus } from "@/services/license-guard"; import { needsPreferencesSetup } from "@/services/subscription"; +import { IS_ELECTRON } from "@/lib/platform"; const queryClient = new QueryClient(); -const isElectron = typeof window !== 'undefined' && !!(window as any).electronAPI; /** Layout wrapper that includes TitleBar — used for all routes except /landing */ const AppWithTitleBar = () => ( @@ -84,13 +85,13 @@ const AppWithTitleBar = () => ( {/* Auth routes without sidebar */} - } /> - } /> + } /> + } /> {/* Documentation - own layout, no main sidebar */} -
+
@@ -105,7 +106,7 @@ const AppWithTitleBar = () => ( -
+
@@ -175,14 +176,14 @@ const App = () => { // ─── License Gate (Electron only) ───────────────────────────────────── const [licenseStatus, setLicenseStatus] = useState(null); - const [licenseChecked, setLicenseChecked] = useState(!isElectron); // Skip for web + const [licenseChecked, setLicenseChecked] = useState(!IS_ELECTRON); // Skip for web // ─── Post-upgrade Preferences Wizard redirect ─────────────────────── const [prefsChecked, setPrefsChecked] = useState(false); const [needsPrefsWizard, setNeedsPrefsWizard] = useState(false); useEffect(() => { - if (!isElectron) return; // Web users don't need license check + if (!IS_ELECTRON) return; // Web users don't need license check const checkLicense = async () => { const status = await verifyLicense(); @@ -224,6 +225,11 @@ const App = () => { } }, [setupComplete]); + // Initialize GlitchTip error monitoring + useEffect(() => { + glitchTipService.initialize(); + }, []); + // Enable automatic cache cleanup (runs every hour) useCacheCleanup({ intervalMs: 60 * 60 * 1000, // 1 hour @@ -238,10 +244,10 @@ const App = () => { }); // License gate — Electron only, blocks EVERYTHING until valid - if (isElectron && !licenseChecked) { + if (IS_ELECTRON && !licenseChecked) { return null; // Loading — checking license } - if (isElectron && licenseStatus && !licenseStatus.valid) { + if (IS_ELECTRON && licenseStatus && !licenseStatus.valid) { // Allow onboarding + auth routes through (new installs need to sign up/login first) const hash = window.location.hash || ''; const isPassthrough = hash.includes('/onboarding') || hash.includes('/auth') || hash.includes('/payments'); @@ -265,7 +271,7 @@ const App = () => { // ─── Post-upgrade: redirect Pro+ users to preferences wizard ───────── // Runs after license gate passes — if user just upgraded and hasn't configured agents/intel - if (isElectron && prefsChecked && needsPrefsWizard) { + if (IS_ELECTRON && prefsChecked && needsPrefsWizard) { const isAlreadyOnPrefs = window.location.hash?.includes('/setup-preferences'); if (!isAlreadyOnPrefs) { // Render a minimal router that redirects to preferences wizard @@ -294,7 +300,7 @@ const App = () => { -
+
setSetupComplete(true)} />
@@ -328,7 +334,7 @@ const App = () => { } /> } /> : } /> diff --git a/apps/desktop/src/agents/soc/agent-registry.ts b/apps/desktop/src/agents/soc/agent-registry.ts index 360227a..f73041d 100644 --- a/apps/desktop/src/agents/soc/agent-registry.ts +++ b/apps/desktop/src/agents/soc/agent-registry.ts @@ -834,7 +834,51 @@ Secure and monitor network infrastructure. You: - CLI commands on network devices require human approval - Show/read commands are safe - When blocking IPs, apply across ALL connected firewall devices -- Document the IOC and reason for every block`, + - Document the IOC and reason for every block`, +}; + +export const CYBER_SECURITY_REVIEWER: AgentRole = { + id: 'cyber-security-reviewer', + name: 'Cyber Security Reviewer', + description: 'Second-opinion coding reviewer focused on secure coding, practical enhancements, and risk-based remediation guidance.', + domain: 'threat-intel', + permissionLevel: 'observe', + autoActivateOn: ['github', 'gitlab', 'bitbucket'], + allowedTools: [ + 'repo_list_files', + 'repo_fetch_file', + 'repo_search_code', + 'repo_get_commit', + 'repo_create_issue', + ], + blockedTools: [], + escalatesTo: 'incident-commander', + requiresApprovalFor: ['repo_create_issue'], + model: 'claude-sonnet-4-6', + maxActionsPerIncident: 40, + cooldownMs: 2000, + enabled: true, + systemPrompt: `You are the CrowByte Cyber Security Reviewer — a second-opinion coding security reviewer. + +## Your Mission +Review code for security risks and recommend practical enhancements. +1. Find vulnerabilities and insecure patterns +2. Explain impact in plain language +3. Provide concrete secure-code fixes +4. Suggest architecture and process improvements that reduce repeat risk + +## Review Scope +- OWASP Top 10 and common CWE weaknesses +- Secrets exposure, auth/authz flaws, input validation, injection risks +- Cryptography misuse, insecure defaults, and unsafe dependency usage +- Logging, error handling, and data protection gaps + +## Rules +- Prioritize findings by exploitability and business impact +- Include file paths, line numbers, and minimally invasive fixes +- Prefer secure-by-default recommendations +- Keep guidance actionable for developers and reviewers +- Ask clarifying questions when threat model or runtime context is unclear`, }; // ─── Full Agent Registry ───────────────────────────────────────────────────── @@ -860,6 +904,7 @@ export const AGENT_REGISTRY: AgentRole[] = [ INFRA_CONTAINER_AGENT, CLOUD_SECURITY_AGENT, NETWORK_AGENT, + CYBER_SECURITY_REVIEWER, ]; export function getAgentById(id: string): AgentRole | undefined { diff --git a/apps/desktop/src/components/AppSidebar.tsx b/apps/desktop/src/components/AppSidebar.tsx index 6fc4e55..ef59846 100644 --- a/apps/desktop/src/components/AppSidebar.tsx +++ b/apps/desktop/src/components/AppSidebar.tsx @@ -95,7 +95,7 @@ const commandCenterItems = [ const aiOperationsItems = [ { title: "Chat", url: "/chat", icon: ChatDots }, - { title: "Search AI Agent", url: "/ai-agent", icon: Brain }, + { title: "Support Agent", url: "/ai-agent", icon: Headset }, { title: "Agent Builder", url: "/agent-builder", icon: Robot }, ]; diff --git a/apps/desktop/src/components/ProtectedRoute.tsx b/apps/desktop/src/components/ProtectedRoute.tsx index 9061924..f798f73 100644 --- a/apps/desktop/src/components/ProtectedRoute.tsx +++ b/apps/desktop/src/components/ProtectedRoute.tsx @@ -8,6 +8,7 @@ import { useNavigate } from 'react-router-dom'; import { useAuth } from '@/contexts/auth'; import { Card, CardContent } from '@/components/ui/card'; import { Shield, ArrowsClockwise } from '@phosphor-icons/react'; +import { IS_ELECTRON } from '@/lib/platform'; interface ProtectedRouteProps { children: React.ReactNode; @@ -17,22 +18,20 @@ export function ProtectedRoute({ children }: ProtectedRouteProps) { const { isAuthenticated, loading } = useAuth(); const navigate = useNavigate(); - const isElectron = typeof window !== 'undefined' && !!(window as any).electronAPI; - useEffect(() => { if (!loading && !isAuthenticated) { navigate('/auth'); return; } // Redirect to preferences wizard for new web users who haven't completed setup - if (!loading && isAuthenticated && !isElectron) { + if (!loading && isAuthenticated && !IS_ELECTRON) { const wizardDone = localStorage.getItem('crowbyte_prefs_wizard_done'); const currentPath = window.location.pathname; if (!wizardDone && currentPath !== '/setup-preferences') { navigate('/setup-preferences'); } } - }, [isAuthenticated, loading, navigate, isElectron]); + }, [isAuthenticated, loading, navigate, IS_ELECTRON]); // Show loading state while checking auth if (loading) { diff --git a/apps/desktop/src/components/TitleBar.tsx b/apps/desktop/src/components/TitleBar.tsx index 68b712c..c8100cb 100644 --- a/apps/desktop/src/components/TitleBar.tsx +++ b/apps/desktop/src/components/TitleBar.tsx @@ -2,6 +2,7 @@ import { useState, useEffect, useRef } from "react"; import { Minus, Square, X, PushPin, PushPinSlash, SidebarSimple } from "@phosphor-icons/react"; import { motion, AnimatePresence } from "framer-motion"; import { useBrowserPanelSafe } from "@/contexts/browser"; +import { IS_ELECTRON } from "@/lib/platform"; export function TitleBar() { const [isPinned, setIsPinned] = useState(false); @@ -9,6 +10,9 @@ export function TitleBar() { const hideTimeoutRef = useRef(null); const browserPanel = useBrowserPanelSafe(); + // Only render in Electron — web users see the browser's native chrome + if (!IS_ELECTRON) return null; + const handleMinimize = async () => { if (window.electronAPI?.minimizeWindow) { await window.electronAPI.minimizeWindow(); @@ -78,7 +82,7 @@ export function TitleBar() { {/* Left side - App title */}
- CROWBYTE TERMINAL + CROWBYTE
diff --git a/apps/desktop/src/components/landing/CTABanner.tsx b/apps/desktop/src/components/landing/CTABanner.tsx index c1e9714..5a00153 100644 --- a/apps/desktop/src/components/landing/CTABanner.tsx +++ b/apps/desktop/src/components/landing/CTABanner.tsx @@ -22,7 +22,7 @@ export default function CTABanner() {

- One terminal. 15 AI agents. Every tool you need. + One platform. 9 AI agents. Every tool you need.
Your next bounty is waiting.

@@ -42,7 +42,7 @@ export default function CTABanner() {

- Free tier. No credit card. Linux / Windows / macOS. + Free tier. No credit card. Start in your browser.

diff --git a/apps/desktop/src/components/landing/Features.tsx b/apps/desktop/src/components/landing/Features.tsx index 3343b95..456c145 100644 --- a/apps/desktop/src/components/landing/Features.tsx +++ b/apps/desktop/src/components/landing/Features.tsx @@ -34,7 +34,7 @@ const features = [ { icon: Network, name: "Fleet Management", - desc: "Distributed AI agent swarm across your infrastructure. 15 agents. One command.", + desc: "Distributed AI agent swarm across your infrastructure. 9 agents. One command.", accent: "blue" as const, }, { diff --git a/apps/desktop/src/components/landing/HowItWorks.tsx b/apps/desktop/src/components/landing/HowItWorks.tsx index 97c4708..144a7ee 100644 --- a/apps/desktop/src/components/landing/HowItWorks.tsx +++ b/apps/desktop/src/components/landing/HowItWorks.tsx @@ -1,19 +1,19 @@ import { motion, useInView } from "framer-motion"; import { useRef } from "react"; -import { Download, TerminalSquare, Crosshair, FileText } from "lucide-react"; +import { UserPlus, TerminalSquare, Crosshair, FileText } from "lucide-react"; const steps = [ { - icon: Download, + icon: UserPlus, num: "01", - title: "Install", - desc: "One binary. Linux, Windows, macOS. You're live in 30 seconds.", + title: "Sign Up", + desc: "Free in your browser. Pro unlocks desktop apps for Linux, Windows, macOS.", }, { icon: TerminalSquare, num: "02", title: "Target", - desc: "Give it a domain. CrowByte spawns 15 agents across your attack surface.", + desc: "Give it a domain. CrowByte spawns 9 agents across your attack surface.", }, { icon: Crosshair, diff --git a/apps/desktop/src/data/docs-knowledge.json b/apps/desktop/src/data/docs-knowledge.json new file mode 100644 index 0000000..a1660fc --- /dev/null +++ b/apps/desktop/src/data/docs-knowledge.json @@ -0,0 +1,247 @@ +[ + { + "id": "overview", + "title": "Overview", + "section": "Getting Started", + "keywords": ["overview", "introduction", "what is crowbyte", "features", "about", "architecture", "feature map", "getting started", "command center", "electron app"], + "content": "CrowByte Terminal is a desktop Electron application designed for professional bug bounty hunters and security operators. It runs on Kali Linux 2025 and provides a unified command center for offensive and defensive security operations.\n\nThe app integrates two AI backends: Claude Code CLI (Anthropic's Opus/Sonnet/Haiku models running locally via Electron IPC) and OpenClaw (a remote VPS agent swarm running NVIDIA Cloud models like DeepSeek V3.2, Qwen3 Coder 480B, Mistral Large 675B, and more).\n\nAll persistent data (CVEs, knowledge base, agents, bookmarks, red team operations) is stored in Supabase (cloud PostgreSQL), so every instance of CrowByte shares the same data in real-time.\n\nStatus Legend: Ready means fully implemented and tested. Beta means functional but may have rough edges. Dev means under active development.\n\nArchitecture: Electron App (React + TypeScript + Vite) connects to Claude Code CLI via Electron IPC (claude -p --output-format stream-json) supporting Opus 4.6 / Sonnet 4.6 / Haiku 4.5 with full MCP servers, tools, and plugins. OpenClaw Gateway connects via HTTPS to the VPS with NVIDIA Cloud models (DeepSeek, Qwen, Mistral, Kimi, GLM5), 9 specialized agents (recon, hunter, intel, analyst...), and D3bugr MCP (nmap, nuclei, sqlmap, browser automation). Supabase provides cloud PostgreSQL for CVEs, knowledge base, agents, bookmarks, auth, and settings. The Kali Linux Host provides access to 7000+ security tools including nmap, nuclei, sqlmap, ffuf, burp, metasploit, and xterm.js terminal with tmux integration.\n\nFeature Map: AI Chat (Claude CLI + OpenClaw dual-provider streaming, ready), Red Team (operation tracking, findings, Supabase-backed, ready), Blue Team (security monitor, CVE database, threat intel, ready), Terminal (xterm.js + tmux, multi-tab, shell presets, ready), Knowledge Base (cloud-synced entries with file uploads, ready), Fleet (VPS agent swarm + endpoint monitoring, beta), NVD + Shodan (parallel CVE lookup, auto-save to Supabase, ready), Mission Planner (phase-based operation planning, beta), Threat Intel (IOC feeds, enrichment, STIX correlation, beta), Analytics (usage metrics, CVE stats, Supabase health, ready), AI Agents (custom agent builder + testing lab, beta), Network Scanner (10 nmap profiles, parsed results, ready).\n\nThe documentation covers 34 sections including every page, service, and integration, Supabase schemas for all database tables, CLI tool references for cve-db and kb, AI provider configs for Claude, OpenClaw, Venice, Ollama, Electron architecture including IPC, node-pty, cache manager, and the security layer with AES-256-GCM encryption, credential vault, and device fingerprinting." + }, + { + "id": "installation", + "title": "Installation & Setup", + "section": "Getting Started", + "keywords": ["installation", "setup", "install", "prerequisites", "build", "environment", "env", "configuration", "first run", "npm", "node", "electron", "vite"], + "content": "Prerequisites: Kali Linux 2025 (or any Linux with Node.js 20+), Node.js 20+ and npm 10+, Electron 39 (installed via npm), Claude Code CLI (for Claude provider), Supabase project (free tier works), Tavily API key (optional, for Search Agent).\n\nBuild from Source:\ncd /mnt/bounty/Claude/crowbyte/apps/desktop\nnpm install\n\nDevelopment (hot reload): npm run dev\n\nProduction build: npm run build (Web build via Vite), npm run build:electron:linux (Linux Electron package), npm run build:electron:win (Windows Electron installer).\n\nEnvironment Variables (create .env in apps/desktop/):\nVITE_SUPABASE_URL=https://your-project.supabase.co (required)\nVITE_SUPABASE_ANON_KEY=eyJ... (required)\nVITE_TAVILY_API_KEY=your-tavily-key (optional, for Search Agent)\nVITE_OPENCLAW_HOST=your-vps-ip (optional, for remote AI)\nVITE_OPENCLAW_PORT=18789 (optional)\nVITE_VENICE_API_KEY=... (optional)\n\nFirst Run: On first launch, CrowByte will show intro animation (can be disabled in Settings), redirect to Auth page for login/signup, create default bookmark categories and starter bookmarks, auto-register current device in Fleet (if Electron), check OpenClaw VPS connectivity, and initialize Supabase Realtime subscriptions." + }, + { + "id": "auth", + "title": "Authentication", + "section": "Getting Started", + "keywords": ["authentication", "auth", "login", "signup", "sign in", "sign up", "credentials", "password", "oauth", "github", "supabase auth", "remember me", "encryption", "device fingerprint", "AES", "session"], + "content": "Authentication uses Supabase Auth with email/password and GitHub OAuth. The AuthProvider context (in contexts/auth.tsx) wraps the entire app and provides isAuthenticated, signIn, signUp, and signOut functions.\n\nWhen 'Remember Me' is checked, credentials are encrypted and stored locally via credentialStorage with AES-256-GCM encryption keyed to the device fingerprint.\n\nAuth Flow: 1. Check if device has stored credentials (credentialStorage). 2. If stored + device recognized, auto-login attempt. 3. Otherwise, show login/signup form. 4. On submit, call supabase.auth.signInWithPassword(). 5. If 'Remember Me', encrypt + store credentials. 6. Navigate to Dashboard.\n\nOAuth callback (GitHub): 1. supabase.auth.signInWithOAuth({ provider: 'github' }). 2. Redirect to GitHub, authorize, callback. 3. Parse hash params (#access_token=...). 4. supabase.auth.setSession(access_token, refresh_token).\n\nCredential Storage: The credentialStorage service encrypts credentials at rest using AES-256-GCM. Key derivation uses PBKDF2 (100,000 iterations, SHA-256). Salt is derived from the device fingerprint hash. Storage is in localStorage (encrypted blob).\n\nDevice Fingerprinting (deviceFingerprint.ts): Inputs include userAgent, language, timezone, screen dimensions, and platform. Output is a SHA-256 hash used as unique device ID for credential encryption key.\n\nFeatures: Email/password authentication via Supabase Auth, GitHub OAuth integration, Remember Me with AES-256-GCM encrypted credential storage, device fingerprinting for credential key derivation, auto-login on recognized devices, protected routes (redirect to /auth if not authenticated), session persistence via Supabase refresh tokens." + }, + { + "id": "dashboard", + "title": "Dashboard", + "section": "Command Center", + "keywords": ["dashboard", "home", "home screen", "metrics", "system health", "cpu", "memory", "ram", "disk", "network", "ip status", "vpn", "news", "rss", "monitoring", "quick actions", "realtime"], + "content": "The Dashboard is your home screen. It shows real-time system health (CPU, memory, disk, network) pulled from the local Kali machine, IP status (public IP, VPN detection), recent CVE alerts from the Supabase database, and security news via RSS feeds.\n\nThe CommandCenterHeader card at the top provides AI-powered security analysis using the OpenClaw service and an auto-monitoring toggle that scans every 5 minutes.\n\nServices Architecture: systemMonitor provides CPU, RAM, disk, network polled via Electron IPC. ip-status.ts handles public IP, VPN detection, geolocation (ipify + ipapi). pc-monitor.ts provides process list, open connections, system info. inoreader.ts handles RSS feeds for security news aggregation. openclaw.ts provides VPS health check and agent status. endpointService handles Fleet device registry with auto-register on mount.\n\n4 Supabase Realtime channels for live updates: cves (new CVE alerts), knowledge_base (new KB entries), red_team_ops (operation updates), bookmarks (new bookmarks).\n\nFeatures: Real-time system metrics (CPU/RAM/disk/network) via systemMonitor service, IP status card with public IP, VPN detection, geolocation via ipify + ipapi.co, OpenClaw connection status with VPS agent swarm health and latency, recent CVE alerts from Supabase (severity-colored, clickable), security news feed via Inoreader RSS integration, quick action buttons to navigate to Chat, Terminal, Red Team, etc., auto-monitoring toggle (5-minute interval AI scans via GHOST agent), endpoint registry with tracked devices from Fleet with auto-registration, CommandCenterHeader with AI threat summary from OpenClaw, 4 Supabase Realtime channels for live data sync.\n\nAuto-Monitoring: When enabled, the GHOST security agent runs every 5 minutes. It collects system metrics, running processes, and open connections, then sends them to DeepSeek V3.1 (via Ollama Cloud) for AI threat analysis. Alerts are categorized as info, warning, or critical with actionable recommendations. The monitoring service uses the monitoringAgent which operates in a 10-iteration tool loop." + }, + { + "id": "chat", + "title": "AI Chat", + "section": "Command Center", + "keywords": ["chat", "ai chat", "claude", "openclaw", "conversation", "streaming", "llm", "ai", "assistant", "prompt", "message", "provider", "deepseek", "qwen", "mistral", "dual provider", "model"], + "content": "AI Chat provides dual-provider AI chat with streaming, tool use, and conversation history.\n\nClaude Code CLI Provider: Claude runs inside CrowByte via Electron IPC. The app spawns 'claude -p --output-format stream-json' as a child process through the Electron main process. Claude has full access to MCP servers (d3bugr, shodan, filesystem, memory-engine), all Kali tools, and the .env-unfiltered workspace with its CLAUDE.md identity. Features: Claude Opus 4.6, Sonnet 4.6, Haiku 4.5 (select per-conversation), streamed via Electron IPC (electronAPI.claudeChat), full MCP tool access (can run nmap, nuclei, sqlmap through d3bugr), persistent sessions (sessionId carried across messages), budget control (configurable max spend per message), thinking block display (collapsible sections).\n\nOpenClaw Provider: OpenClaw connects to a remote VPS agent swarm at your-vps-ip (configured via VITE_OPENCLAW_HOST). It routes through an NVIDIA proxy (port 19990) that re-adds model provider prefixes stripped by OpenClaw, then forwards to NVIDIA Cloud's free inference API. Models: DeepSeek V3.2 (671B, flagship reasoning), Qwen3 Coder 480B (coding specialist), Qwen 3.5 397B (general purpose), Mistral Large 675B (multilingual reasoning), Kimi K2 (Moonshot reasoning), Devstral 123B (fast coding assistant), GLM5 (Z-AI general model).\n\nChat Features: Provider switcher to toggle between Claude and OpenClaw mid-conversation, streaming responses with real-time token output, Markdown rendering (ReactMarkdown + remark-gfm), code block syntax highlighting with copy buttons, thinking/reasoning block collapse (DeepSeek tags), conversation sidebar with saved chat history, system prompt customization via settings sheet, cost tracking per message (Claude provider), stop generation button (abort stream)." + }, + { + "id": "ai-agent", + "title": "Search AI Agent", + "section": "AI Operations", + "keywords": ["search agent", "ai agent", "tavily", "web search", "research", "autonomous", "multi-step", "reasoning", "citations", "cybersecurity agent", "cve search", "exploit search", "threat actor"], + "content": "The Search AI Agent is an autonomous research agent powered by Tavily web search with multi-step reasoning.\n\nHow it works: The Search AI Agent uses Tavily API for web search combined with LLM reasoning to perform multi-step research tasks. It searches the web, reads sources, extracts relevant information, and synthesizes answers with citations. Requires a VITE_TAVILY_API_KEY in the environment. The agent auto-initializes on page mount.\n\nSearch Modes: Quick (fast single-pass search, top 5 results), Deep (multi-step search with follow-up queries, source validation), Academic (research-focused with citation tracking).\n\nThe agent operates in an action-observation loop: 1. Parse user query into search terms. 2. Execute Tavily search (fetch URLs, extract content). 3. Observe results, decide if more info needed. 4. Repeat or synthesize final answer with sources.\n\ncybersec-ai-agent.ts: A specialized agent focused on cybersecurity research that extends the base search agent with CVE-aware search (auto-detects CVE IDs and enriches with NVD data), exploit search (checks ExploitDB, GitHub PoCs for discovered vulns), threat actor profiling (MITRE ATT&CK technique correlation), and vulnerability context (adds CVSS, affected products, patch status).\n\nFeatures: Multi-step web search with source citation, step-by-step reasoning display (action + observation), source cards with title, URL, and content preview, chat-style interface with message history, auto-scrolling conversation view, Tavily API key configurable from Settings. Cost is approximately $0.01 per search (Tavily pricing)." + }, + { + "id": "agent-builder", + "title": "Agent Builder", + "section": "AI Operations", + "keywords": ["agent builder", "custom agent", "create agent", "persona", "system prompt", "instructions", "capabilities", "agent config", "model selection", "conversation starters"], + "content": "The Agent Builder lets you create custom AI agents with specific personas, tools, and capabilities.\n\nHow it works: The Agent Builder lets you create custom AI agents stored in Supabase. Each agent has a name, description, system prompt (instructions), model selection, category, conversation starters, and capability toggles. Agents are executed via the customAgentExecutor service which routes the agent's configuration through OpenClaw with the custom system prompt injected.\n\nCRUD Workflow (custom-agents.ts Supabase service): createAgent() inserts new agent config to custom_agents table. updateAgent() modifies existing agent (name, prompt, model, caps). deleteAgent() removes agent from database. getAgents() fetches all agents for current user.\n\ncustom-agent-executor.ts Execution engine: executeAgent(agent, prompt) loads agent config (system prompt, model, capabilities), builds OpenClaw request with injected system prompt, streams response back to UI via SSE, and handles tool calls if agent has tool capabilities.\n\nSupabase Schema (custom_agents): name (TEXT, agent display name), description (TEXT, short description), instructions (TEXT, system prompt with persona and behavior rules), model (TEXT, OpenClaw model ID), category (TEXT: security, coding, research, analysis, custom), capabilities (JSONB: web_search, code_execution, mcp_tools, file_access), starters (TEXT[], predefined conversation starter prompts), user_id (UUID, owner with RLS enforced).\n\nFeatures: Visual 3-panel layout (agent list, configuration, live preview), custom system prompts to define agent persona and behavior, model selection (any OpenClaw model), category tags (security, coding, research, analysis, custom), capability toggles (web search, code execution, MCP tools, file access), conversation starters (predefined prompts for quick use), live preview panel to test agent before saving, cloud persistence via Supabase (shared across instances). Export/import agent configs is planned." + }, + { + "id": "agent-testing", + "title": "Agent Testing Lab", + "section": "AI Operations", + "keywords": ["agent testing", "testing lab", "benchmark", "test suite", "agent test", "pass fail", "response time", "performance", "test results"], + "content": "The Agent Testing Lab provides a comprehensive testing dashboard for all AI agents with benchmarking.\n\nHow it works: The Agent Testing page provides a unified interface to test all AI agents in CrowByte. It uses the agentTester service to run predefined test suites against each agent (Search, OpenClaw, Monitoring, Custom agents) and collect pass/fail results with timing metrics. Tests run sequentially with progress tracking. Results show per-agent success rates, response times, and detailed error logs for failed tests.\n\nTestable Agents: Search Agent (Tavily) with config maxResults, tavilyApiKey and tests for query parsing, result quality, source citation. OpenClaw Agent with config model, temperature, maxTokens, requestType and tests for connection, streaming, tool calling, fallback. Monitoring Agent (GHOST) with config model, interval, maxIterations and tests for metric collection, threat detection, alert generation. Custom Agents with per-agent config from Agent Builder and tests for system prompt injection, capability enforcement.\n\nAgent Config Parameters: OpenClaw agent config includes model (llama-3.3-70b default), temperature (0.7), maxTokens (2048), requestType (exploit, vulnerability, attack_vector, tool_usage, general), preferLowRisk (true), enableFallback (true). Monitoring agent config includes model (deepseek-v3.1), interval (300000ms / 5 min), maxIterations (10).\n\nFeatures: Run all agents or select specific agent for testing, progress bar with current agent name and test count, per-agent results with pass/fail count, success rate, and avg response time, detailed error logs for failed tests, configurable agent parameters before test run, export test results as JSON. Benchmark comparison across models is planned." + }, + { + "id": "llm", + "title": "LLM Models", + "section": "AI Operations", + "keywords": ["llm", "models", "language model", "ai models", "claude", "opus", "sonnet", "haiku", "deepseek", "qwen", "mistral", "kimi", "devstral", "glm", "venice", "ollama", "hermes", "nvidia", "provider"], + "content": "The LLM page shows all available AI models across Claude, OpenClaw, Venice, and Ollama providers.\n\nHow it works: The LLM page checks OpenClaw VPS connectivity and lists models from both openclaw.getModels() and claudeProvider.getModels(). Stats cards show total model count, NVIDIA free tier availability (via VPS), and Anthropic model access.\n\nProvider Overview: Anthropic (Claude Code CLI) offers Opus 4.6, Sonnet 4.6, Haiku 4.5 via Electron IPC with full MCP access and pay-per-token pricing. OpenClaw (NVIDIA Cloud) offers DeepSeek V3.2, Qwen3 Coder 480B, Qwen 3.5 397B, Mistral Large 675B, Kimi K2, Devstral 123B, GLM5 as free tier via VPS proxy. Venice AI is privacy-focused with two providers: venice-ai (standard) and venice-uncensored (bypasses content filters), supporting DeepSeek, Llama, Qwen. Ollama (Local) runs Hermes 3 (8B) and other local models on localhost:11434 at zero cost, fully offline.\n\nModel Registry: Claude CLI models (claude-provider.ts): claude-opus-4-6 (most capable, highest cost), claude-sonnet-4-6 (balanced speed/quality), claude-haiku-4-5 (fast, lowest cost). OpenClaw models (openclaw.ts, all free via NVIDIA): deepseek-v3.2 (671B params, flagship reasoning), qwen3-coder-480b (coding specialist), qwen-3.5-397b (general purpose), mistral-large-675b (multilingual reasoning), kimi-k2 (Moonshot reasoning), devstral-123b (fast coding), glm5 (Z-AI general). Venice AI models (venice-ai.ts): deepseek-r1-671b (Venice wrapper), llama-3.3-70b (Venice wrapper).\n\nFeatures: Model overview with total count across all providers, provider health checks (online/offline status), NVIDIA free tier model listing (via OpenClaw VPS), Anthropic model listing with pricing tier indicators, refresh button for live connection check, model selection propagates to Chat provider picker." + }, + { + "id": "redteam", + "title": "Red Team", + "section": "Red Team", + "keywords": ["red team", "offensive", "pentest", "penetration test", "bug bounty", "social engineering", "operation", "findings", "severity", "attack", "operation tracking", "vulnerability findings"], + "content": "The Red Team page manages offensive security operations stored in Supabase. Each operation has a target, type (pentest, red team, bug bounty, social engineering), status, and associated findings. Findings are linked to operations and tracked with severity (critical/high/medium/low/info), category, and detailed descriptions. Stats cards show total operations, active operations, and finding breakdowns.\n\nOperation Lifecycle (red-team.ts service): planning -> active -> completed, with paused as an alternative (can resume to active). Operation types: pentest (standard penetration test), red_team (full adversary simulation), bug_bounty (bug bounty program engagement), social_engineering (phishing/SE campaigns).\n\nFinding schema (embedded in operation): title (finding name), severity (critical, high, medium, low, info), category (web, network, auth, config, crypto, etc.), description (detailed finding with evidence), evidence (PoC, screenshots, request/response), remediation (fix recommendation).\n\nAI Integration: The hybrid-redteam-agent.ts provides AI-powered analysis for red team operations. It routes between Venice AI (cloud) and Ollama (local) depending on availability. The agent can analyze findings for severity assessment and CVSS scoring, suggest attack chains from discovered vulns, generate remediation reports per finding, auto-categorize findings by CWE/OWASP, and provide timeline view of operation progress.\n\nSupabase Schema (red_team_ops): name (TEXT), target (TEXT), type (TEXT: pentest, red_team, bug_bounty, social_engineering), status (TEXT: planning, active, completed, paused), findings (JSONB[], array of finding objects), progress (INT, 0-100 completion percentage), user_id (UUID, owner with RLS enforced).\n\nFeatures: Create operations with target, type, and scope. Operation types: pentest, red team, bug bounty, social engineering. Status tracking: planning, active, completed, paused. Add findings per operation with severity and category. Stats dashboard: total ops, active ops, critical/high findings. Progress bars per operation. Hybrid AI agent (Venice cloud + Ollama local) for analysis. All data persisted to Supabase via redTeamService." + }, + { + "id": "cyber-ops", + "title": "Cyber Ops", + "section": "Red Team", + "keywords": ["cyber ops", "cyberops", "security tools", "scanning", "recon", "reconnaissance", "attack", "defense", "defence", "hacking", "toolkit", "nmap", "nuclei", "sqlmap", "ffuf", "gobuster", "subfinder", "tool catalog", "95 tools"], + "content": "Cyber Ops is the hands-on hacking page with a 95-tool tactical security toolkit with AI-assisted analysis, caching, and analytics. It provides a catalog of 95 security tools organized across 4 tabs (Scanning, Recon, Attack, Defence) that run commands via OpenClaw on the VPS or locally. Results are cached using the cacheService and tracked via analyticsService. The page also integrates Tavily for vulnerability research and web intelligence gathering.\n\nTool Categories (4 Tabs): Scanning (~25 tools): nmap, masscan, zmap, nikto, wapiti, arachni, skipfish, w3af, openvas, nessus, qualys, burpsuite, zap, sslyze, testssl, whatweb, wappalyzer, retire.js, snyk, trivy, grype, clair, anchore, dockle, lynis. Recon (~25 tools): subfinder, amass, assetfinder, findomain, knockpy, dnsrecon, fierce, theHarvester, recon-ng, spiderfoot, maltego, shodan, censys, zoomeye, fofa, hunter.io, phonebook, crt.sh, securitytrails, dnsdumpster, waybackurls, gau, katana, gospider, hakrawler. Attack (~25 tools): sqlmap, ffuf, gobuster, dirb, dirsearch, feroxbuster, nuclei, dalfox, xsstrike, kxss, commix, tplmap, ssrfmap, crlfuzz, cors-scanner, jwt_tool, arjun, paramspider, wfuzz, hydra, john, hashcat, metasploit, crackmapexec, evil-winrm. Defence (~20 tools): wafw00f, cloudflare-bypass, waf-bypass, modsecurity, fail2ban, snort, suricata, ossec, wazuh, aide, rkhunter, chkrootkit, clamav, yara, sigma, elastic, splunk, graylog, ossim, thehive.\n\nExecution Flow: 1. User selects tool + enters target. 2. Command template auto-fills with target. 3. Check cacheService for existing results. 4. If no cache: execute via OpenClaw (VPS) or local shell. 5. Stream output to result pane. 6. Cache results (cacheService.set). 7. Track usage (analyticsService.trackToolUse). 8. Optional: AI analysis via OpenClaw on results.\n\nFeatures: 95 security tools across 4 category tabs, target input with command template auto-fill, AI-powered result analysis via OpenClaw, result caching via cacheService (avoid re-running same scans), scan history with timestamps and favorites, analytics tracking (tool usage stats), Tavily web search integration for vuln research, per-tool description and example command, pinned/favorite tools for quick access." + }, + { + "id": "tools-page", + "title": "Tools Registry", + "section": "Red Team", + "keywords": ["tools registry", "custom tools", "tool management", "api endpoint", "cli command", "mcp tool", "script", "tool types", "execution tracking", "tool stats"], + "content": "The Tools Registry provides custom tool management with execution tracking and statistics. Unlike CyberOps (which has a fixed catalog of 95 tools), the Tools page lets you add your own tools with custom configurations, API endpoints, and execution parameters. Tools are stored in Supabase via toolsService and track execution history, success rates, and usage statistics.\n\nTool Types (tools.ts): api_endpoint (external API with URL + headers), cli_command (shell command run via terminal), mcp_tool (MCP server tool via d3bugr/shodan), script (custom script in Python/Bash). Categories: analysis, scanning, exploitation, recon, defense, utility.\n\nFeatures: Add custom tools with name, category, type, endpoint. Tool categories: analysis, scanning, exploitation, recon, defense, utility. Stats dashboard: total tools, active count, total executions, success rate. Execute tool directly from the page. Execution history with timestamps. Delete tools from registry. Supabase-backed persistence." + }, + { + "id": "network-scanner", + "title": "Network Scanner", + "section": "Red Team", + "keywords": ["network scanner", "nmap", "port scan", "scan profiles", "host discovery", "service detection", "os detection", "vulnerability scan", "stealth scan", "udp scan", "firewall scan", "port", "service"], + "content": "The Network Scanner provides a 10-profile nmap GUI with parsed results and scan history. Enter a target, select a scan profile, and the app runs nmap via Electron's shell access. Results are parsed into structured host/port/service data. Scans are managed by the network-scans.ts service which handles execution, parsing, and history persistence to Supabase.\n\nScan Profiles (10): quick (-sV -T4 --top-ports 1000), full (-sV -sC -p- -T3), stealth (-sS -T2 --max-retries 1), vuln (-sV --script vuln -T3), os-detect (-sV -O --osscan-guess), aggressive (-A -T4, version + script + OS + traceroute), udp (-sU --top-ports 100), firewall (-sA -T3, ACK scan for firewall rules), service (-sV --version-intensity 5), script (-sC, default scripts only).\n\nResult Parsing: Nmap XML output is parsed into structured data with hosts[] containing ip, hostname (reverse DNS/PTR), os (OS detection result), state (up/down), and ports[] containing port number, protocol (tcp/udp), state (open/filtered/closed), service name (http, ssh, etc.), version (service version string), and scripts[] (NSE script output).\n\nFeatures: 10 scan profiles (Quick, Full, Stealth, Vuln, OS Detect, Aggressive, UDP, Firewall, Service, Script), parsed results (hosts, ports, services, versions, OS detection), service fingerprinting and banner grabbing, port state indicators (open/filtered/closed) with color coding, real-time scan output streaming, scan history persisted to Supabase, raw nmap output view alongside parsed data, export results as JSON, XML, or text." + }, + { + "id": "security-monitor", + "title": "Security Monitor", + "section": "Blue Team", + "keywords": ["security monitor", "monitoring", "ghost agent", "threat detection", "anomaly", "alert", "ai security", "process monitoring", "system metrics", "deepseek", "automated scan", "auto-monitoring"], + "content": "The Security Monitor uses AI-powered real-time security monitoring with the GHOST agent analysis. The monitoringAgent service (codename: GHOST) performs AI-driven security scans. It collects system metrics, running processes, and open connections via the pcMonitor service. Data is sent to DeepSeek V3.1 (via Ollama Cloud) which analyzes it for anomalies, suspicious processes, and security threats. The agent operates in a 10-iteration tool loop, where each iteration can call different analysis tools before generating the final threat report.\n\nGHOST Agent Architecture: Model is DeepSeek V3.1 (671B) via Ollama Cloud. Loop is 10 iterations max per analysis cycle. Interval is 300,000ms (5 minutes) when auto-monitoring. Each iteration, GHOST can call: get_system_metrics (CPU, RAM, disk, network stats), get_processes (running processes with PID, user, CPU%), get_connections (active network connections like netstat), check_ports (listening ports and bound services), analyze_logs (recent syslog/auth.log entries). Output is a structured threat report with alerts[] containing severity (info, warning, critical), category (process, network, system, auth), description (what was detected), and recommendation (what to do about it).\n\nMetrics Collected: CPU (usage %, load average, per-core stats), RAM (total, used, free, swap usage), Disk (mount points, usage %, read/write IO), Network (interfaces, bytes in/out, active connections), Procs (PID, user, CPU%, MEM%, command line), Ports (listening ports, bound addresses, service names).\n\nFeatures: AI threat analysis via DeepSeek V3.1 GHOST agent, 10-iteration tool loop per analysis cycle, system metrics collection (CPU, RAM, disk, network), process monitoring and anomaly detection, auto-monitoring toggle (5-minute intervals), on-demand manual scan button, alert severity levels (info, warning, critical), AI-generated recommendations per alert, incident memory (last 50 events). Note: Electron environment detection is required (features require desktop app)." + }, + { + "id": "fleet", + "title": "Fleet Management", + "section": "Blue Team", + "keywords": ["fleet", "fleet management", "endpoint", "device", "vps", "agent swarm", "openclaw", "health check", "heartbeat", "device monitoring", "workstation", "server", "commander", "recon", "hunter", "intel", "analyst", "sentinel"], + "content": "Fleet Management provides endpoint monitoring, VPS agent swarm control, and device health tracking. It tracks all your devices/endpoints via the endpointService. The current machine auto-registers with its hostname, OS, IP, and system metrics. Metrics auto-update every 30 seconds. The VPS Status card connects to the OpenClaw agent swarm at your-vps-ip (set via VITE_OPENCLAW_HOST) and shows which agents are online, latency, and active services.\n\nEndpoint Types: workstation (local Kali machine, auto-detected), vps (remote VPS, OpenClaw swarm host), mobile (mobile device endpoint), iot (IoT device endpoint), server (generic server endpoint).\n\nVPS Agent Swarm (9 agents): commander (central orchestrator), recon (reconnaissance specialist), hunter (bug bounty hunter), intel (threat intelligence), analyst (security analyst), sentinel (continuous monitoring), gpt (general purpose assistant), obsidian (knowledge management), main (default/fallback agent).\n\nHealth Check Protocol: Endpoints report health via heartbeats every 30 seconds. The VPS health check connects via SSH to verify agent process status, disk space, and memory usage. Heartbeat payload includes hostname, os, ip, cpu_usage, ram_usage, disk_usage, uptime, and status (online, degraded, offline).\n\nFeatures: Endpoint registry with auto-detection of current device, device metrics (hostname, OS, IP, CPU, RAM, disk), auto-update metrics every 30 seconds (heartbeat), VPS agent swarm status (9 agents), agent health with idle/busy/offline indicators, add/remove endpoints manually via AddEndpointDialog, search and filter by status, latency monitoring to VPS. Remote command execution via SSH is planned." + }, + { + "id": "cve", + "title": "CVE Database", + "section": "Blue Team", + "keywords": ["cve", "vulnerability", "vulnerabilities", "cve database", "nvd", "shodan", "cvss", "severity", "critical", "high", "medium", "low", "exploit", "cwe", "cpe", "patch", "bookmark", "cve-db"], + "content": "The CVE Database provides cloud-synced vulnerability tracking with NVD + Shodan parallel lookup. CVEs are stored in the Supabase cves table, shared in real-time across all CrowByte instances, Claude Code CLI sessions, and the OpenClaw AI chat. CVEs can be added three ways: manually through the UI form, auto-saved via the cve-db CLI, or by asking the AI in Chat to look up a CVE. The lookup engine fetches from NVD API v2.0 and Shodan CVEDB in parallel. NVD is primary (CVSS, CWE, CPE, refs), Shodan fills gaps and adds EPSS scores. Data is merged and upserted (no duplicates).\n\nUI Features: Severity-grouped view with collapsible sections with colored borders (Critical=red, High=orange, Medium=yellow, Low=blue), flat list view with sortable table, view mode toggle (grouped vs list), sort by date/CVSS score/CVE ID (asc/desc), 6 stat cards (Critical, High, Medium, Low, Bookmarked, Exploitable), search bar to filter by ID/title/description/products, expandable detail rows with full description/products/references, bookmark toggle per CVE, multi-select with checkboxes + bulk delete, add CVE form with auto severity detection from CVSS, edit CVE inline, exploit status tracking (in-the-wild, poc-available, theoretical).\n\nSupabase Schema (cves): cve_id (TEXT UNIQUE), title (TEXT), severity (TEXT: CRITICAL/HIGH/MEDIUM/LOW), cvss (NUMERIC, 0-10), cvss_vector (TEXT), description (TEXT), products (TEXT, CPE), cwe (TEXT), references (TEXT), notes (TEXT), tags (TEXT), exploit_status (TEXT: in-the-wild/poc-available/theoretical), patch_url (TEXT), nvd_uuid (TEXT), bookmarked (BOOLEAN).\n\nCLI cve-db (/usr/local/bin/cve-db): 'cve-db lookup CVE-2024-3400' for parallel NVD + Shodan auto-save. 'cve-db nvd CVE-2024-3400' and 'cve-db shodan CVE-2024-3400' for individual source queries. 'cve-db search \"RCE\"' for database search. 'cve-db list --severity CRITICAL' and 'cve-db list -n 20' for filtered listing. 'cve-db stats' for severity breakdown. 'cve-db save' with full fields for manual save." + }, + { + "id": "threat-intel", + "title": "Threat Intelligence", + "section": "Blue Team", + "keywords": ["threat intelligence", "threat intel", "ioc", "indicator of compromise", "feed", "osint", "threat feed", "malware", "ip address", "domain", "hash", "md5", "sha256", "stix", "enrichment", "correlation"], + "content": "The Threat Intelligence page aggregates Indicators of Compromise (IOCs) from multiple threat feeds. Feeds are configurable with custom URLs, refresh intervals, and format parsers. IOCs are stored in Supabase with confidence scores and severity levels. The page provides real-time feed management, IOC search/filter, and statistical dashboards with PieChart (by type) and BarChart (by severity) visualizations via Recharts.\n\nIOC Types tracked: ipv4/ipv6 (malicious IP addresses), domain (malicious domains), url (malicious URLs), md5 (file hash MD5), sha1 (file hash SHA-1), sha256 (file hash SHA-256), email (threat actor email), cve (CVE identifiers that correlate with CVE DB).\n\nFeed Management (ThreatFeed schema in Supabase): name (feed display name), url (feed endpoint URL), feed_type (osint, commercial, internal), format (csv, json, stix, plain), enabled (toggle feed on/off), refresh_interval_min (auto-refresh period), last_fetched (last successful fetch timestamp), last_count (IOCs from last fetch), last_error (error message if fetch failed).\n\nIOC Stats Dashboard tracks: Total IOC count across all feeds, IOC breakdown by type (PieChart), IOC breakdown by severity (BarChart), IOC breakdown by feed source, new IOCs today counter.\n\nFeatures: IOC feed aggregation from multiple configurable sources, IOC types (IPv4/6, domain, URL, MD5, SHA1, SHA256, email, CVE), confidence scoring per IOC (0-100), severity classification (critical, high, medium, low, info), feed enable/disable toggle, auto-refresh with configurable intervals, search and filter IOCs by type/severity/feed, PieChart + BarChart statistical dashboards, CVE correlation with CVE Database page, Supabase-backed persistence for feeds and IOCs." + }, + { + "id": "mission-planner", + "title": "Mission Planner", + "section": "Intelligence", + "keywords": ["mission planner", "planning", "operation plan", "phases", "tasks", "risk assessment", "pentest plan", "incident response", "cloud audit", "timeline", "feasibility", "strategy", "offensive", "defensive"], + "content": "The Mission Planner provides phase-based strategic planning for offensive and defensive operations. It creates structured operation plans with phases, tasks, risk assessments, and AI feasibility scoring. Plans are stored in localStorage (Supabase migration planned). Each plan has a type, objective, target scope, timeline, and multiple phases with nested tasks. You can create plans from scratch or use one of four built-in templates. The AI Planner generates feasibility scores, risk scores, success probability, and recommendations.\n\nPlan Templates: Web Application Pentest (phases: Reconnaissance 8h -> Vuln Scanning 4h -> Exploitation 16h -> Post-Exploitation 8h -> Reporting 8h, type: pentest). Network Infrastructure Attack (phases: External Recon 16h -> Initial Access 12h -> Lateral Movement 16h -> Priv Esc 8h -> Persistence 8h -> Exfil 8h, type: offensive). Incident Response Plan (phases: Detection & Analysis 2h -> Containment 4h -> Eradication 8h -> Recovery 8h -> Post-Incident Review 4h, type: defensive). Cloud Security Audit (phases: Asset Discovery 8h -> Config Review 16h -> Access Control Audit 8h -> Vuln Assessment 16h -> Remediation 8h, type: defensive).\n\nData Model: MissionPlan has type (offensive, defensive, pentest, incident_response), status (draft, planning, approved, active, completed, failed), objective, targetScope. Phase (nested) has name, description, duration (hours), dependencies, status. Task (nested under Phase) has name, description, assignee, priority (low/med/high/critical), status. Risk has severity, probability (0-100), impact (0-100), mitigation. AI Assessment has feasibilityScore, riskScore, successProbability, and recommendations[].\n\nFeatures: Create from template or blank, phase-based planning with duration estimates, task checklists per phase with assignee and priority, risk registry with severity/probability/impact/mitigation, success criteria and failure scenario documentation, timeline with start/end dates, AI feasibility assessment with recommendations, status progression (draft -> planning -> approved -> active -> completed). Stored in localStorage (Supabase migration planned)." + }, + { + "id": "knowledge", + "title": "Knowledge Base", + "section": "Intelligence", + "keywords": ["knowledge base", "kb", "documentation", "research", "findings", "notes", "entries", "file upload", "categories", "priority", "tags", "knowledge management"], + "content": "The Knowledge Base provides cloud-synced documentation, findings, and research storage. Entries are stored in the Supabase knowledge_base table, shared in real-time across all CrowByte instances and Claude Code CLI sessions. Entries can be created from the UI, from the kb CLI tool in any terminal, or by asking the AI in Chat. File attachments are stored in Supabase Storage (50MB limit).\n\nUI Features: Card-based entry display with title, content preview, category badge, priority. Categories: research, vulnerabilities, tools, documentation, news. Priority levels: P1 (critical) through P5 (low) with auto-detection. File upload via drag-and-drop or button (50MB, Supabase Storage). Multi-select with checkboxes + bulk delete. Search bar to filter by title, content, or tags. Category filter tabs. Expandable entries with full content view. Edit entries inline. Cloud sync (edits appear on all instances immediately).\n\nSupabase Schema (knowledge_base): title (TEXT), content (TEXT, markdown), category (TEXT: research, vulnerabilities, tools, documentation, news), priority (TEXT: P1 critical through P5 low), tags (TEXT[], array of searchable tags), file_url (TEXT, Supabase Storage URL), user_id (UUID, owner with RLS enforced).\n\nCLI kb (/usr/local/bin/kb): 'kb save \"Title\" --content \"...\" --category vulnerabilities --priority P1 --tags \"tag1,tag2\"' to save an entry. 'nmap -sV target.com | kb pipe \"Title\" --category research --priority P3' to pipe command output. 'kb search \"RCE\"' for full-text search. 'kb recent -n 10' for recent entries. 'kb list --category tools' to filter by category." + }, + { + "id": "bookmarks", + "title": "Bookmarks", + "section": "Intelligence", + "keywords": ["bookmarks", "saved urls", "links", "references", "resources", "categories", "tags", "favicon", "bookmark manager", "url manager"], + "content": "The Bookmarks page saves and organizes URLs, resources, and references with categories and tags. URLs are stored in the Supabase bookmarks table, organized by user-customizable categories. New users get 7 default categories and starter bookmarks. Each bookmark has a title, URL, optional description, category, tags, and auto-fetched favicon via Google's S2 favicon service.\n\nFeatures: Card-based display with title, URL, description, category badge. 7 default categories: Tools, CVEs, News, Cyber, Research, Documentation, General. Custom category creation with icon and color picker. Category filter tabs. Tag support for cross-category search. Search by title, URL, or description (ilike on 3 fields). Auto-fetched favicons via Google S2. External link button to open URL in browser. Edit and delete bookmarks. Default starter bookmarks for new users. Cloud sync via Supabase.\n\nSupabase Schema: bookmarks table has title (TEXT), url (TEXT), description (TEXT), category (TEXT), tags (TEXT[]), favicon_url (TEXT). bookmark_categories table has name (TEXT), icon (TEXT, Lucide icon name), color (TEXT, hex color code)." + }, + { + "id": "memory", + "title": "Memory", + "section": "Intelligence", + "keywords": ["memory", "memory facts", "key-value", "persistent", "fact storage", "ai memory", "session memory", "remember"], + "content": "The Memory page provides key-value fact storage for persistent AI memory across sessions. It is a simple key-value fact store backed by Supabase (memory_facts table). Each fact has a key (label) and value (content), associated with the authenticated user. This is separate from Claude Code's memory system (.mci files, state.md). It is designed for user-facing persistent facts that the AI agents can reference.\n\nSupabase Schema (memory_facts): id (UUID, primary key), key (TEXT, fact label e.g. 'preferred_tools'), value (TEXT, fact content e.g. 'nuclei, ffuf, sqlmap'), user_id (UUID, owner with RLS enforced), created_at (TIMESTAMPTZ), updated_at (TIMESTAMPTZ).\n\nFeatures: CRUD operations (create, read, update, delete memory facts), key-value pairs with timestamps, inline editing for both key and value, confirmation dialog for deletions, sorted by updated_at (most recent first), auth-required (facts scoped to user), Supabase-backed persistence." + }, + { + "id": "analytics", + "title": "Analytics", + "section": "Intelligence", + "keywords": ["analytics", "usage metrics", "statistics", "stats", "charts", "dashboard", "cve stats", "tool usage", "api usage", "supabase health", "recharts", "pie chart", "line chart"], + "content": "The Analytics page provides usage metrics, CVE statistics, API health, and Supabase dashboard. It combines tool usage statistics from analyticsService, CVE library stats from the cves table, and infrastructure health from the SupabaseHealthDashboard component. Charts are rendered with Recharts: LineChart (activity timeline), AreaChart (trends), BarChart (tool usage), PieChart (CVE severity distribution), RadarChart (capability coverage).\n\nDashboard Tabs: Overview (activity timeline, tool usage stats, API health cards, recent activity log). CVE Intelligence (CVE library stats including total/critical/high/medium/low, severity distribution PieChart, recent critical CVEs list). Infrastructure (Supabase health dashboard with table row counts, connection status, RLS status, storage usage). AI Usage (model usage breakdown, token consumption, provider availability, cost tracking).\n\nAnalytics Service (analytics.ts): trackToolUse(tool, target) records tool execution. trackApiCall(provider, tokens) records API usage. getToolStats() returns tool usage counts. getActivityLog(limit) returns recent activity entries. getApiUsageStats() returns provider breakdown. ActivityLog interface includes action (tool_use, api_call, scan, search), tool (tool/provider name), target (target domain/IP), timestamp (ISO), success (boolean).\n\nFeatures: Tool usage statistics with execution counts, CVE library stats (total, by severity, exploitable), Supabase health dashboard (table counts, connections, RLS), activity timeline with LineChart, severity distribution PieChart, RadarChart for capability coverage, API provider usage breakdown, recent activity log with timestamps, refresh button for live data." + }, + { + "id": "terminal", + "title": "Terminal", + "section": "System", + "keywords": ["terminal", "xterm", "xterm.js", "tmux", "shell", "bash", "zsh", "fish", "node-pty", "console", "command line", "cli", "scrollback", "multi-tab"], + "content": "The Terminal page provides a full xterm.js terminal with tmux integration, multi-tab support, and 50K scrollback. It uses xterm.js with FitAddon (auto-resize), WebLinksAddon (clickable URLs), and SearchAddon (Ctrl+F search). Each tab spawns an independent shell via Electron's node-pty. Shell presets include tmux (default), zsh, bash, and fish. Tmux sessions are independent per tab with 50,000 line scrollback buffer.\n\nxterm.js Configuration: fontFamily is JetBrains Mono monospace, fontSize 14, scrollback 50000, cursorBlink true, cursorStyle bar, renderer WebGL (fallback: canvas). Addons loaded: FitAddon (auto-resize to container), WebLinksAddon (clickable URLs in output), SearchAddon (Ctrl+F text search). IPC bridge (Electron main process): terminal:create (spawn node-pty process), terminal:write (send input to pty), terminal:resize (resize pty dimensions), terminal:destroy (kill pty process).\n\nTmux Controls: When using tmux, the context menu (right-click) provides split horizontal (Ctrl+B, %), split vertical (Ctrl+B, \"), navigate panes (Ctrl+B, arrow keys), zoom pane (Ctrl+B, z), Tab key cycles tmux panes (intercepted by app), reset terminal (clear + new session).\n\nFeatures: xterm.js terminal with full ANSI/VT100 support, tmux default shell with per-tab sessions, multiple terminal tabs with shell type indicators, shell presets (tmux, zsh, bash, fish), 50,000 line scrollback buffer, search within terminal (Ctrl+F), clickable web links in output, JetBrains Mono font with WebGL renderer, full access to Kali Linux 7000+ tools, copy/paste (Ctrl+Shift+C/V)." + }, + { + "id": "logs", + "title": "Logs", + "section": "System", + "keywords": ["logs", "logging", "events", "errors", "warnings", "debug", "info", "critical", "log viewer", "error tracking", "application logs"], + "content": "The Logs page provides application event logging with level filtering, search, and error tracking. It displays application events captured by the LogsProvider context (defined in src/contexts/logs.tsx). Events include API calls, errors, warnings, and info messages from across the app. The sidebar shows an unread error count badge so you know when something needs attention. The badge resets when you visit the Logs page.\n\nLog Architecture (LogsProvider context): addLog(level, message, source), getLogs(filter?), clearLogs(), getErrorCount(), markAllRead(). Log levels: debug (verbose debugging info), info (normal operations), warn (non-critical issues), error (failures and exceptions), critical (system-level failures). Log categories (source field): system (app lifecycle, routing, init), security (auth, encryption, monitoring), ai (Claude, OpenClaw, agents), network (API calls, scans, connections), supabase (database operations).\n\nFeatures: Real-time log streaming from LogsProvider context, error count badge in sidebar (unread errors), log level filtering (debug, info, warn, error, critical), search within log messages, timestamp and source tracking per entry, color-coded severity (blue/green/yellow/red), clear all logs button, auto-scroll to newest entries." + }, + { + "id": "settings", + "title": "Settings", + "section": "System", + "keywords": ["settings", "configuration", "config", "preferences", "api keys", "profile", "workspace", "theme", "model selection", "tavily", "ollama", "mcp servers", "appearance"], + "content": "The Settings page provides application configuration, API keys, profile management, and preferences.\n\nSettings Categories: Profile (display name, profile picture via Supabase Storage, workspace name shown in sidebar header). AI Configuration (default LLM model selection, Claude budget limits, OpenClaw model preference, temperature settings). API Keys (Tavily API key, Ollama endpoint, custom API endpoints). MCP Servers (server connection status, endpoint URLs, tool browser). Appearance (intro animation toggle for splash screen on/off, theme customization is planned). Supabase (connection URL, anon key, project ID, health check). VPS / OpenClaw (VPS host, gateway port, agent configuration).\n\nSupabase Schema (user_settings): workspace_name (TEXT, shown in sidebar header), profile_picture (TEXT, Supabase Storage URL), default_model (TEXT, preferred AI model), intro_animation (BOOLEAN, show splash screen), theme (TEXT, dark only currently), tavily_api_key (TEXT, encrypted at rest), user_id (UUID, owner with RLS enforced).\n\nFeatures: LLM model selection (Claude + OpenClaw models), API key management (Tavily, Ollama, custom), MCP server configuration, workspace naming (shown in sidebar header), profile picture upload (Supabase Storage), intro animation toggle (splash screen on/off). Import/export settings and theme customization are planned." + }, + { + "id": "supabase", + "title": "Supabase Backend", + "section": "Integrations", + "keywords": ["supabase", "database", "postgresql", "postgres", "backend", "realtime", "rls", "row level security", "storage", "auth", "tables", "schema", "cloud database", "persistence"], + "content": "Supabase is the backbone of CrowByte's persistence layer providing cloud PostgreSQL database, auth, real-time subscriptions, and storage. Every page that stores data uses Supabase as the single source of truth. Multiple CrowByte instances and CLI tools (cve-db, kb) share the same data in real-time.\n\nFull Table Schema: Core data tables: cves (CVE tracking with cve_id UNIQUE, severity, cvss, vector, CWE, CPE, refs, exploit_status, bookmarked), knowledge_base (research entries with title, content, category, priority, tags, file_url), custom_agents (agent configs with name, instructions, model, category, capabilities JSONB, starters), red_team_ops (operations with name, target, type, status, findings JSONB[], progress), bookmarks (URLs with title, url, description, category, tags, favicon_url), bookmark_categories (categories with name, icon, color). User tables: profiles (user profiles linked to auth.users), user_settings (preferences with workspace_name, profile_picture, default_model, theme). System tables: endpoints (fleet devices with hostname, os, ip, type, status, metrics JSONB), analytics (usage stats with action, tool, target, timestamp, success), memory_facts (memory page with key, value, user_id, timestamps). Threat Intel tables: threat_iocs (IOC entries with ioc_type, value, feed_name, confidence, severity, tags, metadata), threat_feeds (feed configs with name, url, feed_type, format, enabled, refresh_interval). Tool tables: tools (custom tools with name, category, tool_type, endpoint_url, description).\n\nRow Level Security (RLS): All tables have RLS policies that scope data to the authenticated user via auth.uid(). Each user only sees their own data even though all instances share the same Supabase project.\n\nRealtime Subscriptions: Dashboard subscribes to 4 channels: cves (INSERT shows new CVE alert), knowledge_base (INSERT shows new entry notification), red_team_ops (UPDATE refreshes operation status), bookmarks (INSERT updates bookmark count).\n\nFeatures: PostgreSQL with Row Level Security (RLS) on all tables, real-time subscriptions for live data sync (4 channels), Edge Functions for serverless logic, storage buckets for file uploads (50MB), email/password + GitHub OAuth authentication, shared across all CrowByte instances and CLI tools, health dashboard in Analytics page." + }, + { + "id": "mcp", + "title": "MCP Protocol", + "section": "Integrations", + "keywords": ["mcp", "model context protocol", "tools", "d3bugr", "shodan", "filesystem", "memory engine", "fetch", "mcp server", "tool access", "mcporter", "stdio", "sse", "security tools"], + "content": "MCP (Model Context Protocol) is how Claude accesses external tools inside CrowByte. When you use the Claude provider in Chat, CrowByte spawns 'claude -p' via Electron IPC. Claude Code CLI has its own MCP config (in .env-unfiltered/.claude/) giving it access to security tools, file operations, network intelligence, and persistent memory. On the VPS side, OpenClaw agents use mcporter, a skill-based bridge that injects tool descriptions into system prompts and executes 'mcporter call d3bugr.'.\n\nMCP Servers: D3bugr (142 security tools, Docker on VPS: Nmap, Nuclei, SQLMap, browser automation with CDP + Stagehand, DNS, SSL, SSRF, XSS, subdomain enum). Shodan (network intelligence: IP lookup, CVE search, DNS lookup/reverse, CPE lookup, device search, EPSS scores and exploit data). Filesystem (file operations: read, write, search, manage files across /mnt/bounty and /home/rainkode, includes bigfile tools). Memory Engine (persistent knowledge: SQLite-backed brain DB, full-text + semantic search, topic tracking, session management). Fetch (HTTP requests: web scraping, API testing, data retrieval).\n\nMCP Architecture: Local MCP (Claude Code CLI) uses mcp-client.ts with StdioClientTransport (spawns MCP server as child process) and filesystemMCP.ts (17 tools for file operations). Cloud MCP (OpenClaw VPS) uses mcp-client-cloud.ts with SSE transport (connects to remote MCP server) and mcporter (skill bridge on VPS routing tool calls to d3bugr Docker). In-app MCP server mcp-supabase-server.ts exposes Supabase tables as MCP tools with 17 tools for CRUD on CVEs, KB, bookmarks, agents, endpoints, analytics.\n\nFeatures: 5 MCP servers (d3bugr, shodan, filesystem, memory-engine, fetch), 142 security tools via d3bugr MCP, StdioClientTransport for local MCP (child process), SSE transport for cloud MCP connections, in-app Supabase MCP server (17 CRUD tools), mcporter bridge on VPS for agent tool routing." + }, + { + "id": "mcp-page", + "title": "MCP Management", + "section": "Integrations", + "keywords": ["mcp management", "mcp page", "connectors", "tavily", "server status", "tool browser", "mcp configuration", "cybersec search"], + "content": "The MCP Management page provides MCP server connections UI, tool browser, and Tavily integration. It is distinct from the MCP Protocol doc section and provides a management UI for MCP server connections. It shows configured connectors, their status, capabilities, and includes a Tavily-powered search integration for cybersecurity intelligence. Each connector displays name, type, status, endpoint, last sync time, data flow direction, and capabilities list.\n\nConfigured Connectors: Tavily CyberSec Search (type: AI Search, endpoint: https://mcp.tavily.com/mcp/, data flow: bidirectional, capabilities: Web Search, Q&A, Content Extraction, Threat Intel, CVE Lookup). MCP Server browser (from mcp-client.ts config): PC Monitor (npx @anthropic/mcp-server-pc-monitor), Tavily (npx @anthropic/mcp-server-tavily), Filesystem (/usr/local/bin/mcp-filesystem binary), Memory (npx @anthropic/mcp-server-memory).\n\nTavily Integration: The page includes a built-in Tavily search form for cybersecurity intelligence gathering. Search results can be bookmarked directly to the Bookmarks page with auto-categorization. Features include Tavily cybersec search with domain context, search results with title/URL/content snippet, one-click bookmark to Supabase (auto-category), copy result URL to clipboard.\n\nFeatures: MCP connector status dashboard, server health indicators (connected/disconnected), capability listing per server, Tavily cybersec search integration, bookmark search results to Supabase. Add new MCP server connections is planned." + }, + { + "id": "ai-providers", + "title": "AI Providers", + "section": "Integrations", + "keywords": ["ai providers", "claude provider", "openclaw provider", "venice ai", "venice uncensored", "ollama", "hermes", "nvidia proxy", "provider implementation", "streaming", "api", "cors"], + "content": "Deep dive into all 6 AI provider implementations in CrowByte.\n\n1. Claude Code CLI (claude-provider.ts): Spawns 'claude -p' as child process via Electron IPC. Command: claude -p --output-format stream-json. Models: Opus 4.6, Sonnet 4.6, Haiku 4.5. IPC: electronAPI.claudeChat(prompt, options). Features: Full MCP access, session persistence, budget control. Stream: JSON events (assistant, result, tool_use, error).\n\n2. OpenClaw (openclaw.ts): VPS agent swarm at your-vps-ip (VITE_OPENCLAW_HOST). Gateway: https://your-vps-hostname:18789. Proxy: NVIDIA proxy on port 19990 (re-adds model prefixes). Models: DeepSeek V3.2, Qwen3 Coder 480B, Qwen 3.5, Mistral Large, Kimi K2, Devstral, GLM5. Agents: 9 specialized (commander, recon, hunter, intel, analyst, sentinel, gpt, obsidian, main). Tools: execute_command, dispatch_agent (agentic chat). Auth: Bearer token via gateway password.\n\n3. Venice AI (venice-ai.ts): Privacy-focused AI, standard provider. API: https://api.venice.ai/api/v1/chat/completions. Models: DeepSeek R1 671B, Llama 3.3 70B, Qwen 2.5 Coder. Auth: Venice API key (VITE_VENICE_API_KEY). Features: Streaming, system prompts, privacy-first.\n\n4. Venice Uncensored (venice-uncensored.ts): Venice with prompt engineering to bypass content filters. Same API as Venice AI but with uncensored system prompt. Purpose: Security research requiring unrestricted responses.\n\n5. Venice Electron (venice-ai-electron.ts): Venice routed through Electron main process. Purpose: Bypass CORS restrictions in Electron renderer. IPC: electronAPI.veniceChat(prompt, model).\n\n6. Ollama Hermes (ollama-hermes.ts): Local Ollama instance. Endpoint: http://localhost:11434/api/chat. Model: Hermes 3 (8B) by NousResearch. Cost: $0 (fully local, GPU required). Features: Streaming, offline operation, no API key needed." + }, + { + "id": "nvd-shodan", + "title": "NVD & Shodan", + "section": "Integrations", + "keywords": ["nvd", "shodan", "cvedb", "nist", "vulnerability lookup", "cvss", "epss", "exploit prediction", "cwe", "cpe", "parallel lookup", "vulnerability intelligence"], + "content": "NVD and Shodan provide dual-source vulnerability intelligence integrated into the CVE workflow.\n\nNVD API v2.0 (services.nvd.nist.gov/rest/json/cves/2.0): Provides CVSS v3.1/v3.0/v2 scores with full vector strings, severity classification (Critical/High/Medium/Low), CWE weakness IDs, CPE product matching (affected software/versions), official reference URLs, NVD UUID for unique identification. No API key required (rate-limited).\n\nShodan CVEDB (cvedb.shodan.io/cve/CVE-ID): Provides EPSS (Exploit Prediction Scoring System) scores, exploit availability tracking, supplementary CVSS data, additional reference URLs. No API key required.\n\nIntegration Flow: cve-db lookup runs both APIs in parallel. NVD API returns CVSS, severity, CWE, CPE, refs, vector. Shodan returns EPSS score, exploit status, extra refs. Merge step uses NVD as primary, Shodan fills gaps + adds EPSS. Upsert saves to Supabase (on_conflict=cve_id)." + }, + { + "id": "tech-stack", + "title": "Tech Stack", + "section": "Development", + "keywords": ["tech stack", "technology", "framework", "react", "typescript", "electron", "vite", "tailwind", "radix", "shadcn", "recharts", "framer motion", "xterm", "supabase", "infrastructure"], + "content": "Technologies powering CrowByte Terminal.\n\nFrontend: Framework is React 18 + TypeScript. Desktop Runtime is Electron 39. Build Tool is Vite 7. UI Components are Radix UI (shadcn/ui). Styling is Tailwind CSS v3. Animation is Framer Motion. Charts are Recharts. Terminal is xterm.js + node-pty. Markdown is ReactMarkdown + remark-gfm. State management uses React Query + useState.\n\nBackend & Infrastructure: Database is Supabase (PostgreSQL). Auth is Supabase Auth (email + GitHub). Storage is Supabase Storage (50MB). AI (Local) is Claude Code CLI via IPC. AI (Remote) is OpenClaw + NVIDIA Cloud. AI (Privacy) is Venice AI + Ollama. VPS is Hostinger (Ubuntu, Docker). Proxy is NVIDIA Proxy (port 19990). MCP Bridge is mcporter (stdio). Host OS is Kali Linux 2025.\n\nBuild Commands: 'npm run dev' for Vite dev server (hot reload). 'npm run build' for production web build. 'npm run build:electron:win' for Windows Electron installer. 'npm run build:electron:linux' for Linux Electron package." + }, + { + "id": "electron-arch", + "title": "Electron Architecture", + "section": "Development", + "keywords": ["electron", "main process", "ipc", "ipc handlers", "node-pty", "window management", "cache manager", "sqlite", "preload", "browser window", "frameless", "titlebar"], + "content": "Electron Architecture covers the main process, IPC handlers, node-pty, window management, and cache.\n\nMain Process (electron/main.ts): BrowserWindow creates app window (frameless, custom titlebar). node-pty spawns terminal processes for Terminal page. Claude IPC spawns 'claude -p' child process and streams JSON. Venice IPC proxies Venice API calls (bypass CORS). System Info provides CPU, RAM, disk metrics via os/fs modules. Cache Manager provides SQLite-based cache for scan results.\n\nIPC Channels: Terminal IPC: terminal:create (spawn node-pty with shell, cols, rows), terminal:write (send input to pty stdin), terminal:resize (resize pty cols/rows), terminal:destroy (kill pty process), terminal:data (pty output to renderer callback). Claude IPC: claude:chat (send prompt, receive stream-json events), claude:abort (kill claude process to stop generation). Venice IPC: venice:chat (proxy Venice API, bypass CORS). System IPC: system:metrics (CPU, RAM, disk, network stats), system:processes (running process list), window:minimize, window:maximize, window:close.\n\nCache Manager (electron/cache-manager.ts): SQLite-based cache for scan results and API responses. CacheService singleton provides get(key) to retrieve cached item with TTL check, set(key, value, opts) to store with TTL and content hash, invalidate(key) to remove specific entry, cleanup() to remove expired entries, stats() for hit count, size, and expired count. Cache entry fields: key (cache key as tool:target hash), value (cached result, compressed), content_hash (SHA-256 of value), ttl_seconds, expires_at, hit_count.\n\nFeatures: Frameless window with custom titlebar (TitleBar.tsx), node-pty terminal spawning with tmux default, Claude Code CLI integration via IPC, Venice AI CORS proxy via IPC, system metrics collection (CPU/RAM/disk/net), SQLite cache manager with TTL and content hashing, window controls (minimize, maximize, close), preload script for secure IPC bridge." + }, + { + "id": "data-security", + "title": "Data & Security Layer", + "section": "Development", + "keywords": ["data security", "encryption", "aes", "aes-256-gcm", "pbkdf2", "key derivation", "credential vault", "device fingerprint", "web crypto", "key management", "key rotation", "salt", "iv"], + "content": "The Data and Security Layer covers encryption at rest, credential vault, key derivation, and device fingerprinting.\n\nEncryption Architecture (encryption.ts): Algorithm is AES-256-GCM (authenticated encryption). Key Derivation uses PBKDF2 with 100,000 iterations, SHA-256 hash, and random 16-byte salt stored with ciphertext. IV is random 12 bytes per encryption. Output is Base64(salt + iv + ciphertext + authTag).\n\nCredential Storage (credentialStorage.ts): Encrypted credential vault. saveCredentials(email, password) gets device fingerprint (SHA-256 of hardware info), derives encryption key via PBKDF2 (fingerprint as passphrase), encrypts credentials with AES-256-GCM, stores encrypted blob in localStorage. getCredentials() gets device fingerprint, derives same key via PBKDF2, decrypts blob from localStorage, returns { email, password } or null. clearCredentials() removes encrypted blob from localStorage.\n\nDevice Fingerprinting (deviceFingerprint.ts): Generates unique device ID for encryption key derivation. Inputs: navigator.userAgent, navigator.language, Intl.DateTimeFormat().resolvedOptions().timeZone, screen.width + screen.height + screen.colorDepth, navigator.platform, navigator.hardwareConcurrency. Process: concatenate all, SHA-256 hash. Output: 64-char hex string (unique per device). Purpose: encryption key salt so credentials only decrypt on same device.\n\nKey Management (keyManagement.ts): Centralized key management service for generating, storing, and rotating encryption keys using Web Crypto API. Features: key generation via Web Crypto API (AES-GCM, 256-bit), key export/import in JWK format, key rotation support (re-encrypt with new key), secure key storage in memory (not persisted).\n\nFeatures: AES-256-GCM encryption for credentials at rest, PBKDF2 key derivation (100K iterations, SHA-256), device fingerprinting for credential binding, Web Crypto API for all crypto operations, key management with rotation support, credentials only decrypt on the original device." + }, + { + "id": "cli-tools", + "title": "CLI Tools", + "section": "Development", + "keywords": ["cli tools", "cve-db", "kb", "command line", "terminal tools", "lookup", "search", "save", "pipe", "bulk lookup", "nvd", "knowledge base cli"], + "content": "CLI Tools covers cve-db and kb command-line tools shared across all Claude Code instances.\n\ncve-db (/usr/local/bin/cve-db) — CVE lookup, search, and management: 'cve-db lookup CVE-2024-3400' for parallel NVD + Shodan auto-save to Supabase. 'cve-db nvd CVE-2024-3400' for NVD API v2.0 only (no auto-save). 'cve-db shodan CVE-2024-3400' for Shodan CVEDB only (no auto-save). 'cve-db search \"RCE\"' for full-text search. 'cve-db list --severity CRITICAL' to filter by severity. 'cve-db list -n 20' for last 20 entries. 'cve-db stats' for severity breakdown. 'cve-db save \"CVE-2024-3400\" \"PAN-OS Command Injection\" --cvss 10.0 --severity CRITICAL --desc \"OS command injection in GlobalProtect\" --products \"paloaltonetworks:pan-os\" --cwe \"CWE-78\" --tags \"firewall,rce\" --exploit \"in-the-wild\"' for manual save with all fields. Bulk lookup: for cve in CVE-2024-3400 CVE-2024-21887; do cve-db lookup \"$cve\"; done.\n\nkb (/usr/local/bin/kb) — Knowledge base save, search, and pipe: 'kb save \"Title\" --content \"...\" --category vulnerabilities --priority P1 --tags \"paloalto,rce,critical\"' to save an entry. 'nmap -sV -sC target.com | kb pipe \"Title\" --category research --priority P3' to pipe command output directly to KB. 'kb search \"RCE\"' for full-text search. 'kb recent -n 10' for last 10 entries. 'kb list --category tools' to filter by category.\n\nFeatures: Both tools available in all Claude Code CLI sessions. Both tools share the same Supabase backend as the UI. Changes appear in the app in real-time (Realtime subscriptions). AI agents (Claude, OpenClaw) know about these tools and can use them. cve-db uses parallel NVD + Shodan with Python merge. kb can pipe any command output directly to knowledge base." + }, + { + "id": "roadmap", + "title": "Roadmap", + "section": "Development", + "keywords": ["roadmap", "planned features", "possible new features", "feature enhancements", "future", "upcoming", "completed", "milestones", "todo", "feature requests", "development plan", "improvements"], + "content": "Roadmap covers completed milestones, possible new features, and feature enhancements for CrowByte Terminal.\n\nRecently Completed: Venice AI integration (standard + uncensored + Electron IPC), Memory page for persistent fact storage, Agent Testing Lab with multi-agent benchmarking, Threat Intelligence feeds with IOC tracking, Analytics dashboard with Recharts visualizations, MCP Management page with Tavily integration, LLM Models page with provider overview, AES-256-GCM credential encryption with device fingerprinting, Supabase Health Dashboard in Analytics, 10 nmap scan profiles in Network Scanner.\n\nPossible New Features: Conversation workspace with cross-session chat history, multiple chat sessions / conversation threads, file uploads inside chat conversations (images, documents), code execution sandbox within the app, push notifications for critical security events, plugin system for third-party tool integrations, collaborative mode for shared operator workspaces, automated recon pipelines (subfinder -> httpx -> nuclei chain), STIX/TAXII support for Threat Intel feeds, remote command execution via Fleet endpoints.\n\nFeature Enhancements: User-configurable MCP server connections directly from Settings, export reports as PDF/JSON in HackerOne and Bugcrowd-ready formats, custom theme builder with reusable color presets, Mission Planner migration from localStorage to Supabase, import/export for agents, bookmarks, and settings." + } +] diff --git a/apps/desktop/src/global.d.ts b/apps/desktop/src/global.d.ts index f66d58d..cc7e191 100644 --- a/apps/desktop/src/global.d.ts +++ b/apps/desktop/src/global.d.ts @@ -129,8 +129,6 @@ interface ElectronAPI { executeCommand: (command: string) => Promise; // Run system command runCommand: (command: string, args?: string[]) => Promise; - // Tor check - checkTor: () => Promise; // NVD CVE API proxy fetchCVEs: (year: string) => Promise; // Claude Code CLI diff --git a/apps/desktop/src/lib/platform.ts b/apps/desktop/src/lib/platform.ts index 6a2a46d..84407a1 100644 --- a/apps/desktop/src/lib/platform.ts +++ b/apps/desktop/src/lib/platform.ts @@ -1,9 +1,19 @@ /** * Platform Context - * Provides platform tag and org context for all Supabase queries. + * Provides build target detection + platform tag + org context. * Every service that writes to Supabase should import this. */ +// ── Build Target (compile-time, injected by Vite define) ────────────────── +export const BUILD_TARGET: 'web' | 'electron' = + (typeof __BUILD_TARGET__ !== 'undefined' ? __BUILD_TARGET__ : 'electron') as 'web' | 'electron'; +export const IS_WEB = BUILD_TARGET === 'web'; +export const IS_ELECTRON = BUILD_TARGET === 'electron'; + +/** Runtime safety-net — checks if Electron IPC bridge is actually present */ +export const hasElectronAPI = (): boolean => + typeof window !== 'undefined' && !!(window as any).electronAPI; + // Platform from env (set in .env per build) export const PLATFORM = import.meta.env.VITE_PLATFORM || 'linux'; diff --git a/apps/desktop/src/pages/AIAgent.tsx b/apps/desktop/src/pages/AIAgent.tsx index f6e67dc..0064c0a 100644 --- a/apps/desktop/src/pages/AIAgent.tsx +++ b/apps/desktop/src/pages/AIAgent.tsx @@ -1,191 +1,144 @@ /** - * Search Agent Page — Tavily-powered intelligent search - * Features: search history, follow-up suggestions, quick actions, collapsible reasoning + * CrowByte Support Agent — AI-powered support chat with diagnostics, + * escalation, and real-time push notifications. */ import { useState, useEffect, useRef, useCallback } from "react"; import { Input } from "@/components/ui/input"; import { ScrollArea } from "@/components/ui/scroll-area"; +import { Badge } from "@/components/ui/badge"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; import { useToast } from "@/hooks/use-toast"; -import { searchAgent, type SearchAgentResponse } from "@/services/searchAgent"; import { - Robot, - PaperPlaneTilt, - MagnifyingGlass, + supportAgent, + type SupportMessage, + type DiagnosticResult, + type HealthCheck, + type EscalationTicket, + type UserNotification, + type TicketPriority, +} from "@/services/support-agent"; +import { + Headset, Brain, - Clock, - Trash, - ArrowSquareOut, - Sparkle, - CircleNotch, + Pulse, + Bug, + BookOpen, + Bell, + Wrench, CaretDown, CaretRight, - Crosshair, - ShieldWarning, - Detective, - Binoculars, - Wrench, - Virus, + PaperPlaneTilt, + CircleNotch, + X, + CheckCircle, + Warning, + Info, } from "@phosphor-icons/react"; import { motion, AnimatePresence } from "framer-motion"; -// ── Types ────────────────────────────────────────────────────────────────────── - -interface Source { - title: string; - url: string; - content: string; - score?: number; -} - -interface Step { - action: string; - observation: string; -} - -interface Message { - id: string; - role: "user" | "agent"; - content: string; - sources?: Source[]; - steps?: Step[]; - followUps?: string[]; - timestamp: Date; -} - -interface HistoryEntry { - id: string; - query: string; - messages: Message[]; - timestamp: number; -} - -// ── Constants ────────────────────────────────────────────────────────────────── +// ── Constants ──────────────────────────────────────────────────────────────────── -const HISTORY_KEY = "crowbyte_search_history"; -const MAX_HISTORY = 15; +const STORAGE_KEY = "crowbyte_support_history"; +const MAX_MESSAGES = 50; const QUICK_ACTIONS = [ - { label: "Latest CVEs", icon: ShieldWarning, template: "What are the latest critical CVEs disclosed this week?" }, - { label: "Exploit DB", icon: Crosshair, template: "Search Exploit-DB for recent public exploits" }, - { label: "OSINT", icon: Detective, template: "OSINT techniques for reconnaissance on " }, - { label: "Threat Intel", icon: Binoculars, template: "Latest threat intelligence on active threat actors" }, - { label: "Tool Discovery", icon: Wrench, template: "Best security tools for " }, - { label: "Malware Analysis", icon: Virus, template: "Recent malware campaigns and analysis techniques" }, + { label: "System Status", icon: Pulse, action: "Run a full system diagnostic and show me the health status" }, + { label: "How do I...", icon: BookOpen, template: "How do I " }, + { label: "Report Bug", icon: Bug, template: "I found a bug: " }, + { label: "Talk to Human", icon: Headset, action: "I need to talk to a human" }, ]; const CAPABILITIES = [ - { icon: MagnifyingGlass, text: "Deep web research with real-time Tavily AI search" }, - { icon: ShieldWarning, text: "CVE analysis with exploitability and patch status" }, - { icon: Wrench, text: "Security tool discovery and comparison" }, - { icon: Binoculars, text: "Threat intelligence on actors, TTPs, and IOCs" }, + { icon: Brain, text: "RAG-powered answers from CrowByte documentation" }, + { icon: Wrench, text: "Live system diagnostics and health checks" }, + { icon: Headset, text: "Escalation to human support with full context" }, + { icon: Bell, text: "Real-time notifications from your admin team" }, ]; -// ── Helpers ──────────────────────────────────────────────────────────────────── +// ── Helpers ────────────────────────────────────────────────────────────────────── -function extractDomain(url: string): string { +function loadMessages(): SupportMessage[] { try { - return new URL(url).hostname.replace("www.", ""); + const raw = localStorage.getItem(STORAGE_KEY); + if (!raw) return []; + const parsed = JSON.parse(raw) as SupportMessage[]; + return parsed.map((m) => ({ ...m, timestamp: new Date(m.timestamp) })); } catch { - return url; + return []; } } -function generateFollowUps(query: string, sources: Source[]): string[] { - // Extract meaningful multi-word phrases from source titles (not random single words) - const stopWords = new Set(['the','this','that','with','from','about','have','been','and','for','are','was','were','has','its','new','how','what','why','who','all','can','will','may','more','most','than','into','over','also','but','not','our','your','them','their','some','any','each','both','few','many','much','such','very','just','only']); - - const titles = sources.slice(0, 5).map(s => s.title); - const keyPhrases = titles - .flatMap(t => { - // Extract 2-3 word phrases that look like real topics - const words = t.split(/[\s\-:,|]+/).filter(w => w.length > 2 && !stopWords.has(w.toLowerCase())); - const phrases: string[] = []; - for (let i = 0; i < words.length - 1; i++) { - if (words[i] && words[i+1] && !stopWords.has(words[i].toLowerCase())) { - phrases.push(`${words[i]} ${words[i+1]}`); - } - } - return phrases; - }) - .filter(p => p.length > 5) - .slice(0, 5); - - const suggestions: string[] = []; - - // Generate contextual security follow-ups - const queryLower = query.toLowerCase(); - - if (queryLower.includes('cve') || queryLower.includes('vulnerabilit')) { - suggestions.push(`Exploit PoC and active exploitation status`); - if (keyPhrases[0]) suggestions.push(`${keyPhrases[0]} — patch availability and mitigations`); - suggestions.push(`Related CVEs and attack chain analysis`); - } else if (queryLower.includes('malware') || queryLower.includes('ransomware')) { - suggestions.push(`IOCs and detection signatures for these campaigns`); - if (keyPhrases[0]) suggestions.push(`${keyPhrases[0]} — MITRE ATT&CK mapping`); - suggestions.push(`Incident response playbook for this threat`); - } else if (queryLower.includes('apt') || queryLower.includes('threat actor')) { - suggestions.push(`TTPs and infrastructure used by these groups`); - suggestions.push(`Recent campaigns targeting my industry`); - suggestions.push(`Detection rules and hunting queries`); - } else if (queryLower.includes('exploit') || queryLower.includes('attack')) { - suggestions.push(`Defense and mitigation strategies`); - if (keyPhrases[0]) suggestions.push(`${keyPhrases[0]} — technical deep dive`); - suggestions.push(`Similar attack techniques and variants`); - } else { - // Generic security follow-ups based on extracted topics - if (keyPhrases[0]) suggestions.push(`${keyPhrases[0]} — deeper technical analysis`); - if (keyPhrases[1]) suggestions.push(`${keyPhrases[1]} — impact and remediation`); - suggestions.push(`${query} — latest developments and advisories`); - } - - return suggestions.slice(0, 3); +function saveMessages(msgs: SupportMessage[]) { + const trimmed = msgs.slice(-MAX_MESSAGES); + localStorage.setItem(STORAGE_KEY, JSON.stringify(trimmed)); } -function loadHistory(): HistoryEntry[] { - try { - const raw = localStorage.getItem(HISTORY_KEY); - return raw ? JSON.parse(raw) : []; - } catch { - return []; - } +function makeMessage( + role: SupportMessage["role"], + content: string, + extra?: Partial, +): SupportMessage { + return { + id: crypto.randomUUID(), + role, + content, + timestamp: new Date(), + ...extra, + }; } -function saveHistory(entries: HistoryEntry[]) { - localStorage.setItem(HISTORY_KEY, JSON.stringify(entries.slice(0, MAX_HISTORY))); +function statusDot(status: HealthCheck["status"]) { + if (status === "ok") return "bg-emerald-500"; + if (status === "warning") return "bg-amber-500"; + return "bg-red-500"; } -function truncate(text: string, len: number): string { - return text.length > len ? text.slice(0, len) + "..." : text; +function statusIcon(status: HealthCheck["status"]) { + if (status === "ok") return ; + if (status === "warning") return ; + return ; } -function timeAgo(ts: number): string { - const diff = Date.now() - ts; - const mins = Math.floor(diff / 60000); - if (mins < 1) return "just now"; - if (mins < 60) return `${mins}m ago`; - const hrs = Math.floor(mins / 60); - if (hrs < 24) return `${hrs}h ago`; - const days = Math.floor(hrs / 24); - return `${days}d ago`; +function notifIcon(type: UserNotification["type"]) { + if (type === "critical" || type === "alert") return ; + if (type === "warning") return ; + if (type === "update") return ; + return ; } -// ── Components ───────────────────────────────────────────────────────────────── +// ── DiagnosticCard ─────────────────────────────────────────────────────────────── -function ReasoningSteps({ steps }: { steps: Step[] }) { - const [open, setOpen] = useState(false); +function DiagnosticCard({ result }: { result: DiagnosticResult }) { + const [open, setOpen] = useState(true); - if (!steps.length) return null; + const scoreColor = + result.score >= 80 ? "text-emerald-400" : result.score >= 50 ? "text-amber-400" : "text-red-400"; + const barColor = + result.score >= 80 ? "bg-emerald-500" : result.score >= 50 ? "bg-amber-500" : "bg-red-500"; return ( -
+
+ {open && ( -
- {steps.map((step, i) => ( -
- {step.action} - {step.observation} +
+ {/* Health checks */} +
+ {result.checks.map((check) => ( +
+ + {check.name} + {check.message} +
+ ))} +
+ + {/* Score bar */} +
+
+
- ))} +
)} @@ -209,418 +178,478 @@ function ReasoningSteps({ steps }: { steps: Step[] }) { ); } -function SourcesList({ sources }: { sources: Source[] }) { - if (!sources.length) return null; +// ── EscalationDialog ───────────────────────────────────────────────────────────── - return ( - - ); -} - -function FollowUpSuggestions({ - suggestions, - onSelect, +function EscalationDialog({ + onSubmit, + onCancel, + loading, }: { - suggestions: string[]; - onSelect: (q: string) => void; + onSubmit: (subject: string, priority: TicketPriority) => void; + onCancel: () => void; + loading: boolean; }) { - if (!suggestions.length) return null; + const [subject, setSubject] = useState(""); + const [priority, setPriority] = useState("medium"); return ( -
- {suggestions.map((s, i) => ( + +
+ + Create Support Ticket +
+ + setSubject(e.target.value)} + placeholder="Brief description of your issue..." + className="bg-zinc-900 border-zinc-700 text-sm" + autoFocus + /> + +
+ + +
+ - ))} -
+ +
+
); } -function SearchHistory({ - entries, - onSelect, - onClear, - open, - onToggle, +// ── NotificationBanner ─────────────────────────────────────────────────────────── + +function NotificationBanner({ + notification, + onDismiss, }: { - entries: HistoryEntry[]; - onSelect: (entry: HistoryEntry) => void; - onClear: () => void; - open: boolean; - onToggle: () => void; + notification: UserNotification; + onDismiss: (id: string) => void; }) { + const borderColor = + notification.type === "critical" || notification.type === "alert" + ? "border-red-500/30" + : notification.type === "warning" + ? "border-amber-500/30" + : "border-blue-500/30"; + return ( -
+ + {notifIcon(notification.type)} +
+ {notification.title} +

{notification.message}

+
- - {open && entries.length > 0 && ( - -
- {entries.map((entry) => ( - - ))} - + + ); +} + +// ── TicketBadge ────────────────────────────────────────────────────────────────── + +function TicketBadge({ ticketId }: { ticketId: string }) { + return ( + + + Ticket #{ticketId.slice(0, 8)} + + ); +} + +// ── Chat Message ───────────────────────────────────────────────────────────────── + +function ChatMessage({ msg }: { msg: SupportMessage }) { + // System messages — centered, muted + if (msg.role === "system" || msg.role === "notification") { + return ( + +
+ {msg.notification ? notifIcon(msg.notification.type) : } + {msg.content} +
+
+ ); + } + + // User bubble — right aligned + if (msg.role === "user") { + return ( + +
+

{msg.content}

+ + {new Date(msg.timestamp).toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" })} + +
+
+ ); + } + + // Agent / diagnostic — left aligned with icon + return ( + +
+
+ +
+
+ {msg.content}
- - )} - + + {/* Diagnostic card */} + {msg.diagnostics && } + + {/* Ticket badge */} + {msg.ticketId && ( +
+ +
+ )} + + + {new Date(msg.timestamp).toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" })} + +
+
+
+
+ ); +} + +// ── Welcome State ──────────────────────────────────────────────────────────────── + +function WelcomeState({ onAction }: { onAction: (text: string, isTemplate: boolean) => void }) { + return ( +
+
+ +

CrowByte Support

+

+ Get help with CrowByte features, diagnose issues, or talk to a human. +

+
+ + {/* Capabilities */} +
+ {CAPABILITIES.map((cap, i) => ( +
+ + {cap.text} +
+ ))} +
+ + {/* Quick actions grid */} +
+ {QUICK_ACTIONS.map((qa) => ( + + ))} +
); } -// ── Main Page ────────────────────────────────────────────────────────────────── +// ── Header ─────────────────────────────────────────────────────────────────────── + +function Header({ + notifCount, + onRunDiagnostics, + diagLoading, +}: { + notifCount: number; + onRunDiagnostics: () => void; + diagLoading: boolean; +}) { + return ( +
+
+ +
+

CrowByte Support

+

AI-powered help desk

+
+
+ +
+ {/* Online indicator */} + + + Online + + + {/* Notification bell */} + + + {/* Run Diagnostics */} + +
+
+ ); +} + +// ── Main Page ──────────────────────────────────────────────────────────────────── export default function AIAgent() { const { toast } = useToast(); - const [messages, setMessages] = useState([]); + const [messages, setMessages] = useState(loadMessages); const [input, setInput] = useState(""); const [isLoading, setIsLoading] = useState(false); - const [isInitialized, setIsInitialized] = useState(false); - const [isInitializing, setIsInitializing] = useState(false); - const [history, setHistory] = useState(loadHistory); - const [historyOpen, setHistoryOpen] = useState(false); + const [diagLoading, setDiagLoading] = useState(false); + const [escalationOpen, setEscalationOpen] = useState(false); + const [escalationLoading, setEscalationLoading] = useState(false); + const [notifications, setNotifications] = useState([]); + const [bannerNotifs, setBannerNotifs] = useState([]); const scrollRef = useRef(null); const inputRef = useRef(null); + // Persist messages on change + useEffect(() => { + saveMessages(messages); + }, [messages]); + // Auto-scroll useEffect(() => { if (scrollRef.current) { scrollRef.current.scrollTop = scrollRef.current.scrollHeight; } - }, [messages, isLoading]); + }, [messages, isLoading, escalationOpen, bannerNotifs]); - // Auto-init + // Subscribe to push notifications useEffect(() => { - initializeAgent(); + // Load existing + supportAgent.getNotifications().then(setNotifications).catch(() => {}); + + const unsub = supportAgent.subscribeToNotifications((notif) => { + setNotifications((prev) => [notif, ...prev]); + setBannerNotifs((prev) => [notif, ...prev]); + // Also inject as system message in chat + setMessages((prev) => [ + ...prev, + makeMessage("notification", `${notif.title}: ${notif.message}`, { notification: notif }), + ]); + }); + + return unsub; }, []); - // ── Init ───────────────────────────────────────────────────────────────────── + // ── Actions ───────────────────────────────────────────────────────────────── - const initializeAgent = async () => { - setIsInitializing(true); - try { - const tavilyApiKey = import.meta.env.VITE_TAVILY_API_KEY; - if (!tavilyApiKey) { - toast({ - title: "Configuration Error", - description: "VITE_TAVILY_API_KEY not set in .env", - variant: "destructive", - }); - return; - } - - await searchAgent.initialize({ tavilyApiKey, maxResults: 5 }); - setIsInitialized(true); - } catch (error) { - toast({ - title: "Initialization Failed", - description: error instanceof Error ? error.message : "Failed to start agent", - variant: "destructive", - }); - } finally { - setIsInitializing(false); - } - }; - - // ── Search ─────────────────────────────────────────────────────────────────── + const addMessage = useCallback((msg: SupportMessage) => { + setMessages((prev) => [...prev, msg].slice(-MAX_MESSAGES)); + }, []); const sendMessage = useCallback( - async (query?: string) => { - const text = query || input.trim(); - if (!text || isLoading) return; - - const userMsg: Message = { - id: crypto.randomUUID(), - role: "user", - content: text, - timestamp: new Date(), - }; - - setMessages((prev) => [...prev, userMsg]); + async (text?: string) => { + const content = text || input.trim(); + if (!content || isLoading) return; + + const userMsg = makeMessage("user", content); + const updated = [...messages, userMsg].slice(-MAX_MESSAGES); + setMessages(updated); setInput(""); setIsLoading(true); try { - const response: SearchAgentResponse = await searchAgent.search({ query: text }); - const followUps = generateFollowUps(text, response.sources); - - const agentMsg: Message = { - id: crypto.randomUUID(), - role: "agent", - content: response.answer, - sources: response.sources, - steps: response.steps, - followUps, - timestamp: new Date(), - }; + const reply = await supportAgent.chat(updated); + addMessage(reply); - setMessages((prev) => { - const updated = [...prev, agentMsg]; - // Save to history - const entry: HistoryEntry = { - id: crypto.randomUUID(), - query: text, - messages: [userMsg, agentMsg], - timestamp: Date.now(), - }; - const newHistory = [entry, ...history.filter((h) => h.query !== text)].slice(0, MAX_HISTORY); - setHistory(newHistory); - saveHistory(newHistory); - return updated; - }); - } catch (error) { - setMessages((prev) => [ - ...prev, - { - id: crypto.randomUUID(), - role: "agent", - content: `Error: ${error instanceof Error ? error.message : "Search failed"}`, - timestamp: new Date(), - }, - ]); + // If agent suggests escalation and user confirms + const intent = supportAgent.classifyIntent(content); + if (intent === "escalation") { + setEscalationOpen(true); + } + } catch (err: any) { + addMessage( + makeMessage("agent", `Error: ${err.message || "Failed to get response"}. Try running diagnostics or escalating.`), + ); + toast({ title: "Chat Error", description: err.message, variant: "destructive" }); } finally { setIsLoading(false); } }, - [input, isLoading, history, toast], + [input, isLoading, messages, addMessage, toast], ); - // ── History ────────────────────────────────────────────────────────────────── + const runDiagnostics = useCallback(async () => { + setDiagLoading(true); + addMessage(makeMessage("agent", "Running system diagnostics...")); - const restoreHistory = useCallback((entry: HistoryEntry) => { - // Restore messages with Date objects (they get serialized as strings in localStorage) - const restored = entry.messages.map((m) => ({ - ...m, - timestamp: new Date(m.timestamp), - })); - setMessages(restored); - }, []); + try { + const result = await supportAgent.runDiagnostics(); + addMessage( + makeMessage("agent", result.summary, { diagnostics: result }), + ); + } catch (err: any) { + addMessage(makeMessage("agent", `Diagnostics failed: ${err.message}`)); + toast({ title: "Diagnostic Error", description: err.message, variant: "destructive" }); + } finally { + setDiagLoading(false); + } + }, [addMessage, toast]); - const clearHistory = useCallback(() => { - setHistory([]); - localStorage.removeItem(HISTORY_KEY); - }, []); + const handleEscalate = useCallback( + async (subject: string, priority: TicketPriority) => { + setEscalationLoading(true); + try { + const lastDiag = [...messages] + .reverse() + .find((m) => m.diagnostics)?.diagnostics; + + const ticket: EscalationTicket = { + subject, + priority, + conversation: messages, + diagnostics: lastDiag, + }; - // ── Quick action ───────────────────────────────────────────────────────────── + const ticketId = await supportAgent.escalate(ticket); + setEscalationOpen(false); + addMessage( + makeMessage("agent", `Ticket created successfully. A human will review your case shortly.`, { + ticketId, + }), + ); + } catch (err: any) { + toast({ title: "Escalation Failed", description: err.message, variant: "destructive" }); + } finally { + setEscalationLoading(false); + } + }, + [messages, addMessage, toast], + ); + + const dismissBanner = useCallback( + (id: string) => { + setBannerNotifs((prev) => prev.filter((n) => n.id !== id)); + supportAgent.dismissNotification(id).catch(() => {}); + }, + [], + ); const handleQuickAction = useCallback( - (template: string) => { - // If template ends with a space, put cursor there for user to type - if (template.endsWith(" ")) { - setInput(template); + (text: string, isTemplate: boolean) => { + if (isTemplate) { + setInput(text); inputRef.current?.focus(); } else { - sendMessage(template); + sendMessage(text); } }, [sendMessage], ); - // ── Render: Init Screen ────────────────────────────────────────────────────── + // ── Render ────────────────────────────────────────────────────────────────── - if (!isInitialized) { - return ( -
- {/* Header */} -
- -
-
-
- -

Initialize search agent to begin

-

Tavily-powered deep web search for security research

-
+ const unreadNotifCount = notifications.filter((n) => !n.read).length; -
- {CAPABILITIES.map((cap, i) => ( -
- - {cap.text} -
- ))} -
+ return ( +
+
- + {/* Notification banners */} + + {bannerNotifs.length > 0 && ( +
+ {bannerNotifs.slice(0, 3).map((n) => ( + + ))}
-
-
- ); - } - - // ── Render: Chat Interface ─────────────────────────────────────────────────── + )} + - return ( -
- {/* Header */} -
- - {/* History bar */} - {history.length > 0 && ( - setHistoryOpen(!historyOpen)} - /> - )} - - {/* Messages */} + {/* Chat area */}
- {messages.length === 0 && ( -
- -

Ask anything. Search the web for security research.

-
+ {messages.length === 0 ? ( + + ) : ( + + {messages.map((msg) => ( + + ))} + )} - - {messages.map((msg) => ( - - {msg.role === "user" ? ( - /* User bubble */ -
-

{msg.content}

- - {msg.timestamp.toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" })} - -
- ) : ( - /* Agent message — no bubble bg */ -
-
- -
- {/* Answer text */} -
- {msg.content} -
- - {/* Sources */} - {msg.sources && } - - {/* Reasoning steps */} - {msg.steps && } - - {/* Follow-ups */} - {msg.followUps && ( - - )} - - - {msg.timestamp.toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" })} - -
-
-
- )} -
- ))} -
- {/* Loading indicator */} {isLoading && ( - +
- - Searching... + + Thinking...
)} + + {/* Inline escalation dialog */} + + {escalationOpen && ( + setEscalationOpen(false)} + loading={escalationLoading} + /> + )} +
{/* Bottom: quick actions + input */}
- {/* Quick actions */} + {/* Quick action chips */}
- {QUICK_ACTIONS.map((action) => ( + {QUICK_ACTIONS.map((qa) => ( ))}
- {/* Input */} + {/* Input row */}
); } - -// ── Header ───────────────────────────────────────────────────────────────────── - -function Header({ - initialized, - initializing, - onInit, -}: { - initialized: boolean; - initializing?: boolean; - onInit?: () => void; -}) { - return ( -
-
- -
-

Search Agent

-

Powered by Tavily

-
-
- -
- {initialized ? ( - - - Online - - ) : ( - - )} -
-
- ); -} diff --git a/apps/desktop/src/pages/AgentBuilder.tsx b/apps/desktop/src/pages/AgentBuilder.tsx index e26f6a2..7121383 100644 --- a/apps/desktop/src/pages/AgentBuilder.tsx +++ b/apps/desktop/src/pages/AgentBuilder.tsx @@ -23,6 +23,8 @@ interface Tool { endpoint: string; } +const MAX_DISPLAYED_AGENTS = 5; + const AgentBuilder = () => { const { toast } = useToast(); const [activeTab, setActiveTab] = useState("configure"); @@ -52,19 +54,36 @@ const AgentBuilder = () => { }, []); const loadAgents = async () => { - // TODO: Enable when custom_agents table exists in Supabase - // CREATE TABLE custom_agents (id uuid PK, user_id uuid, name text, description text, - // system_prompt text, model text, category text, example_prompts text[], capabilities jsonb, - // enable_web_search bool, enable_code_execution bool, enable_file_upload bool, - // status text DEFAULT 'active', created_at timestamptz DEFAULT now(), updated_at timestamptz DEFAULT now()); - // - // Uncomment when table is created: - // try { - // const data = await customAgentsService.getAgents(); - // setAgents(data); - // } catch { /* silent */ } + // custom_agents table may not exist yet — fail gracefully + try { + const data = await customAgentsService.getAgents(); + setAgents(data); + } catch { + // Table doesn't exist yet or another error — show empty state silently + setAgents([]); + } }; + const loadAgentIntoBuilder = (agent: CustomAgent) => { + setAgentName(agent.name); + setDescription(agent.description || ""); + setInstructions(agent.system_prompt); + setSelectedModel(agent.model); + setCategory(agent.category || "security"); + setConversationStarters(agent.example_prompts.length > 0 ? agent.example_prompts : [""]); + setCapabilities({ + webSearch: agent.enable_web_search, + codeExecution: agent.enable_code_execution, + mcpTools: agent.enable_mcp, + fileAccess: agent.enable_file_access, + }); + setActiveTab("configure"); + toast({ + title:"Agent Loaded", + description:`Loaded ${agent.name} into builder`, + }); + }; + const handleSaveAgent = async () => { try { if (!agentName || !instructions) { @@ -294,18 +313,49 @@ const AgentBuilder = () => { -
- + - -
-
- - + +
+ + +
+

Saved Agents

+ {agents.length} +
+ {agents.length === 0 ? ( +

+ No saved agents yet. Configure and save one to reuse it here. +

+ ) : ( +
+ {agents.slice(0, MAX_DISPLAYED_AGENTS).map((agent) => ( +
+
+

{agent.name}

+

{agent.category || "general"} • {agent.model}

+
+ +
+ ))} +
+ )} +
+
+ + diff --git a/apps/desktop/src/pages/CVE.tsx b/apps/desktop/src/pages/CVE.tsx index 0f377c4..7a0a6e8 100644 --- a/apps/desktop/src/pages/CVE.tsx +++ b/apps/desktop/src/pages/CVE.tsx @@ -15,6 +15,7 @@ import { } from"@/components/ui/select"; import { Separator } from"@/components/ui/separator"; import { supabase } from"@/lib/supabase"; +import { useAuth } from"@/contexts/auth"; import { useToast } from"@/hooks/use-toast"; import { motion, AnimatePresence } from"framer-motion"; import { formatDistanceToNow } from"date-fns"; @@ -76,6 +77,7 @@ const SEVERITY_CONFIG: Record { + const { user } = useAuth(); const [cves, setCves] = useState([]); const [loading, setLoading] = useState(true); const [isDialogOpen, setIsDialogOpen] = useState(false); @@ -195,7 +197,7 @@ const CVEPage = () => { if (error) throw error; toast({ title:"CVE updated" }); } else { - const { error } = await supabase.from("cves").insert({ ...payload, user_id:"348309de-1cb4-4fd5-9f55-fa8a749375a5" }); + const { error } = await supabase.from("cves").insert({ ...payload, user_id: user?.id ?? "" }); if (error) throw error; toast({ title:"CVE added" }); } @@ -763,7 +765,7 @@ const CVEPage = () => { {SEVERITY_ORDER.map(sev => { const group = groupedBySeverity[sev] || []; if (group.length === 0) return null; - const config = SEVERITY_CONFIG[sev]; + const config = SEVERITY_CONFIG[sev] || SEVERITY_CONFIG.MEDIUM; const isCollapsed = collapsedGroups.has(sev); return ( diff --git a/apps/desktop/src/pages/CloudSecurity.tsx b/apps/desktop/src/pages/CloudSecurity.tsx index a4a6c02..d882f4d 100644 --- a/apps/desktop/src/pages/CloudSecurity.tsx +++ b/apps/desktop/src/pages/CloudSecurity.tsx @@ -58,6 +58,7 @@ import { Cpu, FileCode, X, + HardHat, } from "@phosphor-icons/react"; import { motion, AnimatePresence } from "framer-motion"; @@ -1892,6 +1893,12 @@ export default function CloudSecurity() {
+ {/* Preview Banner */} +
+ + Cloud Security is in preview — sample data shown. Connect your cloud provider in Settings to enable live scanning. +
+ {/* Tabs */}
diff --git a/apps/desktop/src/pages/Connectors.tsx b/apps/desktop/src/pages/Connectors.tsx index 03ffa33..52d94bf 100644 --- a/apps/desktop/src/pages/Connectors.tsx +++ b/apps/desktop/src/pages/Connectors.tsx @@ -11,7 +11,7 @@ import { useState, useMemo } from 'react'; import { motion, AnimatePresence } from 'framer-motion'; -import { Shield, ShieldCheck, ShieldWarning, UserCheck, Laptop, Bird, Fire, MagnifyingGlass, Stack, Eye, Bug, Plug, PlugsConnected, CaretRight, GearSix, Pulse, Robot, Lock, LockOpen, Warning, CheckCircle, XCircle, Clock, Lightning, ArrowRight, ArrowSquareOut, Key, Globe, DesktopTower, Database, ChartBar, Users, FileX, Funnel, GridFour, ListBullets, Monitor, Terminal, ShippingContainer, TreeStructure } from "@phosphor-icons/react"; +import { Shield, ShieldCheck, ShieldWarning, UserCheck, Laptop, Bird, Fire, MagnifyingGlass, Stack, Eye, Bug, Plug, PlugsConnected, CaretRight, GearSix, Pulse, Robot, Lock, LockOpen, Warning, CheckCircle, XCircle, Clock, Lightning, ArrowRight, ArrowSquareOut, Key, Globe, DesktopTower, Database, ChartBar, Users, FileX, Funnel, GridFour, ListBullets, Monitor, Terminal, ShippingContainer, TreeStructure, HardHat } from "@phosphor-icons/react"; import { Card, CardContent, CardHeader, CardTitle, CardDescription } from '@/components/ui/card'; import { Button } from '@/components/ui/button'; import { Input } from '@/components/ui/input'; @@ -251,6 +251,12 @@ export default function Connectors() { {/* Content */}
+ {/* Preview Banner */} +
+ + Connectors are in preview — integrations are not yet active. Configure credentials to prepare for launch. +
+ {/* Active Agents Banner (when connectors are connected) */} {activeAgentIds.size > 0 && ( { setIpStatus({ ip: 'Unavailable', isVPN: false, - isTor: false, isProxy: false, connectionType: 'unknown', lastChecked: new Date(), @@ -622,18 +621,18 @@ const Dashboard = () => { - {ipStatus.error || ipStatus.ip === 'Unavailable' ? 'Offline' : ipStatus.isVPN || ipStatus.isTor ? 'Protected' : 'Connected'} + {ipStatus.error || ipStatus.ip === 'Unavailable' ? 'Offline' : ipStatus.isVPN ? 'Protected' : 'Connected'} @@ -648,7 +647,7 @@ const Dashboard = () => { ) : ( <> - {ipStatus.isVPN ? 'VPN' : ipStatus.isTor ? 'Tor' : 'IP'}: + {ipStatus.isVPN ? 'VPN' : 'IP'}: {ipStatus.ip} {ipStatus.country && ( @@ -680,14 +679,6 @@ const Dashboard = () => { - {/* Tor Status */} - - - - {ipStatus.isTor ? 'Tor' : 'No Tor'} - - - {/* VPN Provider */} {ipStatus.vpnProvider && ( <> @@ -706,7 +697,7 @@ const Dashboard = () => { )} {/* ISP */} - {ipStatus.isp && !ipStatus.isVPN && !ipStatus.isTor && ( + {ipStatus.isp && !ipStatus.isVPN && ( <> ISP: {ipStatus.isp} diff --git a/apps/desktop/src/pages/DetectionLab.tsx b/apps/desktop/src/pages/DetectionLab.tsx index fc351ca..971750a 100644 --- a/apps/desktop/src/pages/DetectionLab.tsx +++ b/apps/desktop/src/pages/DetectionLab.tsx @@ -621,6 +621,7 @@ function DetectionLab() { onChange={(e) => setGenDescription(e.target.value)} className="bg-zinc-950 border-zinc-700 text-zinc-100 placeholder:text-zinc-600 min-h-[100px] text-sm resize-none" /> +

Pattern-based generation. AI-powered generation coming soon.

{/* Controls row */} diff --git a/apps/desktop/src/pages/Documentation.tsx b/apps/desktop/src/pages/Documentation.tsx index 0ed70cf..0563d38 100644 --- a/apps/desktop/src/pages/Documentation.tsx +++ b/apps/desktop/src/pages/Documentation.tsx @@ -54,7 +54,7 @@ const NAV_GROUPS: NavGroup[] = [ color:"text-blue-500/70", icon: Lightning, items: [ - { id:"ai-agent", label:"Search AI Agent", icon: Brain, badge:"New" }, + { id:"ai-agent", label:"Support Agent", icon: Brain, badge:"New" }, { id:"agent-builder", label:"Agent Builder", icon: Robot }, { id:"agent-testing", label:"Agent Testing", icon: TestTube }, { id:"llm", label:"LLM Models", icon: Sparkle }, diff --git a/apps/desktop/src/pages/Downloads.tsx b/apps/desktop/src/pages/Downloads.tsx index e8466e0..d36385f 100644 --- a/apps/desktop/src/pages/Downloads.tsx +++ b/apps/desktop/src/pages/Downloads.tsx @@ -25,6 +25,7 @@ interface PackageInfo { platform: string; ext: string; description: string; + comingSoon?: boolean; } interface Manifest { @@ -146,11 +147,12 @@ export default function Downloads() { { name: "macOS", file: `macos/CrowByte-${version}-arm64.dmg`, - size: FILE_SIZES[`CrowByte-${version}-arm64.dmg`] || "~130 MB", + size: "Coming Soon", icon: AppleLogo, platform: "macOS 12+ (Apple Silicon)", ext: ".dmg", - description: "Drag & drop install", + description: "Coming soon — macOS build in testing", + comingSoon: true, }, ]; @@ -353,11 +355,13 @@ function PackageCard({ initial={{ opacity: 0, y: 4 }} animate={{ opacity: 1, y: 0 }} className={`group flex items-center gap-4 px-4 py-3.5 rounded-lg border transition-all duration-200 ${ - isPaid - ? "border-white/[0.06] bg-white/[0.02] hover:border-white/[0.12] hover:bg-white/[0.04] cursor-pointer" - : "border-white/[0.04] bg-white/[0.01] opacity-60 cursor-not-allowed" + pkg.comingSoon + ? "border-yellow-500/20 bg-yellow-500/[0.03] opacity-70 cursor-not-allowed" + : isPaid + ? "border-white/[0.06] bg-white/[0.02] hover:border-white/[0.12] hover:bg-white/[0.04] cursor-pointer" + : "border-white/[0.04] bg-white/[0.01] opacity-60 cursor-not-allowed" }`} - onClick={isPaid ? onDownload : undefined} + onClick={!pkg.comingSoon && isPaid ? onDownload : undefined} > {/* Platform icon */}
- {finding.cve_ids.length > 0 ? ( + {(finding.cve_ids || []).length > 0 ? ( - {finding.cve_ids.length} + {(finding.cve_ids || []).length} ) : ( -- @@ -1105,11 +1105,11 @@ export default function Findings() { {/* CVE / CWE Lists */}
- {finding.cve_ids.length > 0 && ( + {(finding.cve_ids || []).length > 0 && (

CVEs

- {finding.cve_ids.map(cve => ( + {(finding.cve_ids || []).map(cve => ( {cve} @@ -1117,11 +1117,11 @@ export default function Findings() {
)} - {finding.cwe_ids.length > 0 && ( + {(finding.cwe_ids || []).length > 0 && (

CWEs

- {finding.cwe_ids.map(cwe => ( + {(finding.cwe_ids || []).map(cwe => ( {cwe} @@ -1132,11 +1132,11 @@ export default function Findings() {
{/* Tags */} - {finding.tags.length > 0 && ( + {(finding.tags || []).length > 0 && (

Tags

- {finding.tags.map(tag => ( + {(finding.tags || []).map(tag => ( {tag} diff --git a/apps/desktop/src/pages/Logs.tsx b/apps/desktop/src/pages/Logs.tsx index 51f7c43..aaed593 100644 --- a/apps/desktop/src/pages/Logs.tsx +++ b/apps/desktop/src/pages/Logs.tsx @@ -4,7 +4,8 @@ import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/com import { Button } from '@/components/ui/button'; import { ScrollArea } from '@/components/ui/scroll-area'; import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '@/components/ui/select'; -import { WarningCircle, CheckCircle, Info, XCircle, Warning, Funnel, ArrowsClockwise, Scroll, Trash } from "@phosphor-icons/react"; +import { WarningCircle, CheckCircle, Info, XCircle, Warning, Funnel, ArrowsClockwise, Scroll, Trash, DownloadSimple } from "@phosphor-icons/react"; +import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from '@/components/ui/dropdown-menu'; import { motion } from 'framer-motion'; import { format } from 'date-fns'; import { useToast } from '@/hooks/use-toast'; @@ -80,6 +81,47 @@ export default function Logs() { setViewMode('all'); }; + const exportLogs = (fmt: 'csv' | 'json') => { + const data = filteredLogs.map(log => ({ + timestamp: format(log.timestamp, "yyyy-MM-dd'T'HH:mm:ss"), + level: log.level, + tag: log.tag, + action: log.action, + details: log.details || '', + })); + + let content: string; + let mimeType: string; + let ext: string; + + if (fmt === 'json') { + content = JSON.stringify(data, null, 2); + mimeType = 'application/json'; + ext = 'json'; + } else { + const header = 'timestamp,level,tag,action,details'; + const rows = data.map(r => + `${r.timestamp},${r.level},${r.tag},"${r.action.replace(/"/g, '""')}","${String(r.details).replace(/"/g, '""')}"` + ); + content = [header, ...rows].join('\n'); + mimeType = 'text/csv'; + ext = 'csv'; + } + + const blob = new Blob([content], { type: mimeType }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `crowbyte-logs-${format(new Date(), 'yyyy-MM-dd-HHmmss')}.${ext}`; + a.click(); + URL.revokeObjectURL(url); + + toast({ + title: `Exported ${data.length} logs`, + description: `Saved as ${ext.toUpperCase()} file`, + }); + }; + const handleDeleteAll = () => { if (logs.length === 0) return; @@ -130,6 +172,23 @@ export default function Logs() { Refresh + + + + + + exportLogs('csv')}>Export as CSV + exportLogs('json')}>Export as JSON + +
); } diff --git a/apps/desktop/src/services/analytics.ts b/apps/desktop/src/services/analytics.ts index 61d0f01..c83fb01 100644 --- a/apps/desktop/src/services/analytics.ts +++ b/apps/desktop/src/services/analytics.ts @@ -1,15 +1,17 @@ /** - * Analytics Service - * Tracks user activity and API usage with real-time updates + * Analytics Service — DISABLED + * + * All methods are no-ops. CrowByte does not collect telemetry, usage data, + * or tracking information. This service exists only to satisfy imports from + * legacy code. All activity logging is handled locally by logging.ts. + * + * No data is sent to any external service. No Supabase tables are queried. */ -import { supabase } from '@/lib/supabase'; -import type { RealtimeChannel } from '@supabase/supabase-js'; - export interface ActivityLog { id?: string; user_id?: string; - activity_type: 'search' | 'chat' | 'api_call' | 'cve_lookup' | 'bookmark' | 'knowledge_add' | 'memory_add' | 'login' | 'settings_change'; + activity_type: string; service_name: string; action: string; details?: Record; @@ -18,8 +20,6 @@ export interface ActivityLog { response_time_ms?: number; status?: 'success' | 'error' | 'pending'; error_message?: string; - ip_address?: string; - user_agent?: string; created_at?: string; } @@ -38,236 +38,18 @@ export interface ApiUsageStats { } class AnalyticsService { - private realtimeChannel: RealtimeChannel | null = null; - - /** - * Log an activity - */ - async logActivity(_activity: Omit): Promise { - // activity_logs table does not exist in Supabase yet — skip insert to avoid 400 spam - // TODO: create activity_logs table in Supabase, then re-enable - return; - } - - /** - * Log a search activity - */ - async logSearch(params: { - service: string; - query: string; - resultsCount: number; - responseTimeMs: number; - status?: 'success' | 'error'; - error?: string; - }): Promise { - await this.logActivity({ - activity_type: 'search', - service_name: params.service, - action: 'search', - query: params.query, - results_count: params.resultsCount, - response_time_ms: params.responseTimeMs, - status: params.status || 'success', - error_message: params.error, - }); - } - - /** - * Log an API call - */ - async logApiCall(params: { - service: string; - action: string; - responseTimeMs: number; - status?: 'success' | 'error'; - error?: string; - details?: Record; - }): Promise { - await this.logActivity({ - activity_type: 'api_call', - service_name: params.service, - action: params.action, - response_time_ms: params.responseTimeMs, - status: params.status || 'success', - error_message: params.error, - details: params.details, - }); - } - - /** - * Log a chat message - */ - async logChat(params: { - model: string; - messageLength: number; - responseTimeMs: number; - status?: 'success' | 'error'; - }): Promise { - await this.logActivity({ - activity_type: 'chat', - service_name: 'venice-ai', - action: 'chat_message', - details: { model: params.model, message_length: params.messageLength }, - response_time_ms: params.responseTimeMs, - status: params.status || 'success', - }); - } - - /** - * Get recent activity logs - */ - async getRecentActivity(limit: number = 50): Promise { - const { data: { user } } = await supabase.auth.getUser(); - if (!user) return []; - - const { data, error } = await supabase - .from('activity_logs') - .select('*') - .eq('user_id', user.id) - .order('created_at', { ascending: false }) - .limit(limit); - - if (error) { - console.error('Failed to fetch activity logs:', error); - return []; - } - - return data || []; - } - - /** - * Get API usage stats for today - */ - async getTodayUsageStats(): Promise { - const { data: { user } } = await supabase.auth.getUser(); - if (!user) return []; - - const today = new Date().toISOString().split('T')[0]; - - const { data, error } = await supabase - .from('api_usage_stats') - .select('*') - .eq('user_id', user.id) - .eq('date', today); - - if (error) { - console.error('Failed to fetch usage stats:', error); - return []; - } - - return data || []; - } - - /** - * Get API usage stats for a specific service - */ - async getServiceUsageStats(serviceName: string, days: number = 7): Promise { - const { data: { user } } = await supabase.auth.getUser(); - if (!user) return []; - - const startDate = new Date(); - startDate.setDate(startDate.getDate() - days); - - const { data, error } = await supabase - .from('api_usage_stats') - .select('*') - .eq('user_id', user.id) - .eq('service_name', serviceName) - .gte('date', startDate.toISOString().split('T')[0]) - .order('date', { ascending: false }); - - if (error) { - console.error('Failed to fetch service usage stats:', error); - return []; - } - - return data || []; - } - - /** - * Subscribe to real-time activity updates - */ - subscribeToActivityUpdates(callback: (activity: ActivityLog) => void): () => void { - this.realtimeChannel = supabase - .channel('activity_logs_channel') - .on( - 'postgres_changes', - { - event: 'INSERT', - schema: 'public', - table: 'activity_logs', - }, - (payload) => { - callback(payload.new as ActivityLog); - } - ) - .subscribe(); - - // Return unsubscribe function - return () => { - if (this.realtimeChannel) { - supabase.removeChannel(this.realtimeChannel); - this.realtimeChannel = null; - } - }; - } - - /** - * Subscribe to real-time usage stats updates - */ - subscribeToUsageStatsUpdates(callback: (stats: ApiUsageStats) => void): () => void { - const channel = supabase - .channel('usage_stats_channel') - .on( - 'postgres_changes', - { - event: '*', - schema: 'public', - table: 'api_usage_stats', - }, - (payload) => { - callback(payload.new as ApiUsageStats); - } - ) - .subscribe(); - - // Return unsubscribe function - return () => { - supabase.removeChannel(channel); - }; - } - - /** - * Get activity summary by type - */ - async getActivitySummary(days: number = 7): Promise> { - const { data: { user } } = await supabase.auth.getUser(); - if (!user) return {}; - - const startDate = new Date(); - startDate.setDate(startDate.getDate() - days); - - const { data, error } = await supabase - .from('activity_logs') - .select('activity_type') - .eq('user_id', user.id) - .gte('created_at', startDate.toISOString()); - - if (error) { - console.error('Failed to fetch activity summary:', error); - return {}; - } - - // Count by type - const summary: Record = {}; - data?.forEach((log: ActivityLog) => { - summary[log.activity_type] = (summary[log.activity_type] || 0) + 1; - }); - - return summary; - } + // All methods are intentionally no-ops — no telemetry collected + async logActivity(_activity: any): Promise { return; } + async logSearch(_params: any): Promise { return; } + async logApiCall(_params: any): Promise { return; } + async logChat(_params: any): Promise { return; } + async getRecentActivity(_limit?: number): Promise { return []; } + async getTodayUsageStats(): Promise { return []; } + async getServiceUsageStats(_service: string, _days?: number): Promise { return []; } + async getActivitySummary(_days?: number): Promise> { return {}; } + subscribeToActivityUpdates(_callback: any): () => void { return () => {}; } + subscribeToUsageStatsUpdates(_callback: any): () => void { return () => {}; } } -// Export singleton instance export const analyticsService = new AnalyticsService(); export default analyticsService; diff --git a/apps/desktop/src/services/claude-provider.ts b/apps/desktop/src/services/claude-provider.ts index 6872fd2..9e4accb 100644 --- a/apps/desktop/src/services/claude-provider.ts +++ b/apps/desktop/src/services/claude-provider.ts @@ -3,6 +3,7 @@ * Streams Claude responses via Electron IPC → claude -p --output-format stream-json * Uses the full .env-unfiltered setup (CLAUDE.md, MCP servers, tools, plugins) */ +import { hasElectronAPI } from '@/lib/platform'; export interface ClaudeModel { id: string; @@ -24,6 +25,25 @@ const CLAUDE_MODELS: ClaudeModel[] = [ { id: 'haiku', name: 'Claude Haiku 4.5', provider: 'Anthropic' }, ]; +function formatStreamError(error: string): string { + const normalized = error.trim(); + const lower = normalized.toLowerCase(); + + const copilotRateLimitMatch = normalized.match(/please try again in\s+(.+)$/i); + if (lower.includes('copilot') && lower.includes('rate limit') && copilotRateLimitMatch) { + const waitTime = copilotRateLimitMatch[1].trim(); + return waitTime + ? `Copilot rate limit reached. Please try again in ${waitTime}.` + : 'Copilot rate limit reached. Please wait a few minutes and try again.'; + } + + if (lower.includes('rate limit') || lower.includes('too many requests') || lower.includes('http 429')) { + return 'Rate limit reached. Please wait a few minutes and try again.'; + } + + return normalized || 'An unexpected error occurred. Please try again.'; +} + class ClaudeProvider { private currentModel = 'sonnet'; private sessionId: string | null = null; @@ -61,7 +81,7 @@ class ClaudeProvider { * Call onEvent() before send() to receive streaming events. */ async send(prompt: string): Promise<{ ok: boolean; costUsd?: number }> { - if (!window.electronAPI?.claudeChat) { + if (!hasElectronAPI() || !window.electronAPI?.claudeChat) { return { ok: false }; } @@ -82,7 +102,7 @@ class ClaudeProvider { window.electronAPI!.onClaudeStreamError!((error: string) => { for (const listener of this.listeners) { - listener({ type: 'error', content: error }); + listener({ type: 'error', content: formatStreamError(error) }); } this.active = false; window.electronAPI!.removeClaudeListeners!(); diff --git a/apps/desktop/src/services/credentialStorage.ts b/apps/desktop/src/services/credentialStorage.ts index 60b0c7f..867c1ca 100644 --- a/apps/desktop/src/services/credentialStorage.ts +++ b/apps/desktop/src/services/credentialStorage.ts @@ -84,8 +84,14 @@ class CredentialStorageService { ['deriveKey'] ); - // Derive AES key - const salt = encoder.encode('ghost-ai-terminal-salt-v1'); + // Per-device salt: generated on first use, persisted in localStorage + let storedSalt = localStorage.getItem('crowbyte_cred_salt'); + if (!storedSalt) { + const randomSalt = crypto.getRandomValues(new Uint8Array(32)); + storedSalt = btoa(String.fromCharCode(...randomSalt)); + localStorage.setItem('crowbyte_cred_salt', storedSalt); + } + const salt = Uint8Array.from(atob(storedSalt), c => c.charCodeAt(0)); return await crypto.subtle.deriveKey( { diff --git a/apps/desktop/src/services/encryption.ts b/apps/desktop/src/services/encryption.ts index 3bf35dd..96d7e93 100644 --- a/apps/desktop/src/services/encryption.ts +++ b/apps/desktop/src/services/encryption.ts @@ -242,14 +242,22 @@ export class EncryptionService { throw new Error('Encryption service not initialized'); } - // Derive HMAC key from master key + // Derive HMAC key via HKDF using keyId as input keying material. + // keyId is derived from the user's salt (see computeKeyId), so the HMAC key + // is bound to the user's key material rather than a hardcoded constant. const encoder = new TextEncoder(); - const keyMaterial = encoder.encode('crowbyte-hmac-key'); - - const hmacKey = await crypto.subtle.importKey( - 'raw', - keyMaterial, - { name: 'HMAC', hash: 'SHA-256' }, + // Derive a proper HMAC key via HKDF bound to the keyId + const ikmBytes = encoder.encode(`crowbyte-hmac-ikm-${this.keyId}`); + const ikmKey = await crypto.subtle.importKey('raw', ikmBytes, 'HKDF', false, ['deriveKey']); + const hmacKey = await crypto.subtle.deriveKey( + { + name: 'HKDF', + hash: 'SHA-256', + salt: encoder.encode('crowbyte-hmac-salt-v1'), + info: encoder.encode('crowbyte-hmac'), + }, + ikmKey, + { name: 'HMAC', hash: 'SHA-256', length: 256 }, false, ['sign'] ); diff --git a/apps/desktop/src/services/filesystemMCP.ts b/apps/desktop/src/services/filesystemMCP.ts index e5e9e36..bffb8a5 100644 --- a/apps/desktop/src/services/filesystemMCP.ts +++ b/apps/desktop/src/services/filesystemMCP.ts @@ -1,7 +1,9 @@ /** * Filesystem MCP Service * Access to /mnt/bounty and /home/rainkode via MCP Filesystem Server + * Only available in Electron builds — web has no IPC bridge. */ +import { hasElectronAPI } from '@/lib/platform'; declare global { interface Window { @@ -38,6 +40,11 @@ class FilesystemMCPService { * Initialize the filesystem service and load available tools */ async initialize(): Promise { + if (!hasElectronAPI()) { + console.log('📁 Filesystem MCP: skipping — not in Electron'); + return; + } + try { console.log('📁 Initializing Filesystem MCP Service...'); diff --git a/apps/desktop/src/services/glitchtip.ts b/apps/desktop/src/services/glitchtip.ts new file mode 100644 index 0000000..e41252d --- /dev/null +++ b/apps/desktop/src/services/glitchtip.ts @@ -0,0 +1,345 @@ +/** + * GlitchTip Error Monitoring — Zero Dependencies + * + * Lightweight error reporter using GlitchTip's Sentry-compatible store API. + * No @sentry/* packages. Just fetch(). + * + * DSN: configured via VITE_GLITCHTIP_DSN + * API: configured via VITE_GLITCHTIP_API_TOKEN + */ + +import { loggingService } from '@/services/logging'; +import { IS_WEB, BUILD_TARGET } from '@/lib/platform'; + +// ─── Types ───────────────────────────────────────────────────────────────── + +export interface GlitchTipIssue { + id: string; + title: string; + culprit: string; + level: string; + status: string; + count: number; + firstSeen: string; + lastSeen: string; + platform: string; + metadata: { + type?: string; + value?: string; + filename?: string; + function?: string; + }; +} + +export interface GlitchTipEvent { + id: string; + eventID: string; + title: string; + message: string; + dateCreated: string; + platform: string; + tags: Array<{ key: string; value: string }>; + entries: Array<{ + type: string; + data: Record; + }>; +} + +type SeverityLevel = 'fatal' | 'error' | 'warning' | 'info' | 'debug'; + +// ─── DSN Parser ──────────────────────────────────────────────────────────── + +interface ParsedDSN { + publicKey: string; + host: string; + projectId: string; + storeUrl: string; +} + +function parseDSN(dsn: string): ParsedDSN | null { + try { + // DSN format: https://@/ + const url = new URL(dsn); + const publicKey = url.username; + const host = url.host; + const projectId = url.pathname.replace('/', ''); + return { + publicKey, + host, + projectId, + storeUrl: `${url.protocol}//${host}/api/${projectId}/store/?sentry_key=${publicKey}&sentry_version=7`, + }; + } catch { + return null; + } +} + +// ─── Stack Trace Parser ──────────────────────────────────────────────────── + +interface StackFrame { + filename: string; + function: string; + lineno: number; + colno: number; + in_app: boolean; +} + +function parseStack(stack: string): StackFrame[] { + const frames: StackFrame[] = []; + const lines = stack.split('\n').slice(1); // Skip error message line + + for (const line of lines) { + // Chrome/Edge: " at functionName (file:line:col)" + // Firefox: "functionName@file:line:col" + const chromeMatch = line.match(/^\s*at\s+(?:(.+?)\s+\()?(.+?):(\d+):(\d+)\)?$/); + const firefoxMatch = line.match(/^(.+?)@(.+?):(\d+):(\d+)$/); + const match = chromeMatch || firefoxMatch; + + if (match) { + frames.push({ + function: match[1] || '?', + filename: match[2], + lineno: parseInt(match[3], 10), + colno: parseInt(match[4], 10), + in_app: !match[2].includes('node_modules'), + }); + } + } + + return frames.reverse(); // Sentry expects oldest frame first +} + +// ─── Configuration ───────────────────────────────────────────────────────── + +const GLITCHTIP_DSN = import.meta.env.VITE_GLITCHTIP_DSN || ''; +const GLITCHTIP_API_TOKEN = import.meta.env.VITE_GLITCHTIP_API_TOKEN || ''; +const GLITCHTIP_API_BASE = 'https://app.glitchtip.com/api/0'; +const GLITCHTIP_ORG = 'crowbyte'; +const GLITCHTIP_PROJECT = 'crowbyte'; + +// Noisy errors to suppress +const NOISE_PATTERNS = ['ResizeObserver', 'extension', 'chrome-extension://']; + +// ─── Service ─────────────────────────────────────────────────────────────── + +class GlitchTipService { + private initialized = false; + private dsn: ParsedDSN | null = null; + private user: { id: string; email?: string } | null = null; + private breadcrumbs: Array<{ + category: string; + message: string; + data?: Record; + timestamp: number; + }> = []; + + initialize(): void { + if (this.initialized || !GLITCHTIP_DSN) { + if (!GLITCHTIP_DSN) { + console.debug('[GlitchTip] No DSN configured — error monitoring disabled'); + } + return; + } + + this.dsn = parseDSN(GLITCHTIP_DSN); + if (!this.dsn) { + console.error('[GlitchTip] Invalid DSN'); + return; + } + + // Global error handler + window.addEventListener('error', (event) => { + if (event.error) { + this.captureError(event.error); + } else { + this.captureMessage(event.message || 'Unknown error', 'error'); + } + }); + + // Unhandled promise rejections + window.addEventListener('unhandledrejection', (event) => { + const error = event.reason instanceof Error + ? event.reason + : new Error(String(event.reason)); + this.captureError(error, { unhandled: true }); + }); + + this.initialized = true; + loggingService.addLog('success', 'system', 'GlitchTip error monitoring initialized'); + } + + captureError(error: Error, context?: Record): void { + if (!this.initialized || !this.dsn) return; + + // Filter noise + const msg = error.message || ''; + if (NOISE_PATTERNS.some(p => msg.includes(p))) return; + + // Log locally too + loggingService.addLog('error', 'system', 'Error captured by GlitchTip', msg); + + const frames = error.stack ? parseStack(error.stack) : []; + + const event = { + event_id: crypto.randomUUID().replace(/-/g, ''), + timestamp: new Date().toISOString(), + platform: 'javascript', + level: 'error' as SeverityLevel, + environment: import.meta.env.MODE || 'production', + release: `crowbyte@${import.meta.env.VITE_APP_VERSION || '0.0.0'}`, + tags: { + build_target: BUILD_TARGET, + platform: IS_WEB ? 'web' : 'desktop', + }, + user: this.user || undefined, + breadcrumbs: this.breadcrumbs.slice(-20), // Last 20 + extra: context, + exception: { + values: [{ + type: error.name || 'Error', + value: error.message, + stacktrace: frames.length > 0 ? { frames } : undefined, + }], + }, + }; + + this.sendEvent(event); + } + + captureMessage(message: string, level: SeverityLevel = 'info'): void { + if (!this.initialized || !this.dsn) return; + + const event = { + event_id: crypto.randomUUID().replace(/-/g, ''), + timestamp: new Date().toISOString(), + platform: 'javascript', + level, + environment: import.meta.env.MODE || 'production', + release: `crowbyte@${import.meta.env.VITE_APP_VERSION || '0.0.0'}`, + tags: { + build_target: BUILD_TARGET, + platform: IS_WEB ? 'web' : 'desktop', + }, + user: this.user || undefined, + message: { formatted: message }, + }; + + this.sendEvent(event); + } + + setUser(user: { id: string; email?: string }): void { + this.user = user; + } + + clearUser(): void { + this.user = null; + } + + addBreadcrumb(category: string, message: string, data?: Record): void { + this.breadcrumbs.push({ + category, + message, + data, + timestamp: Date.now() / 1000, + }); + // Keep last 50 + if (this.breadcrumbs.length > 50) { + this.breadcrumbs = this.breadcrumbs.slice(-50); + } + } + + private sendEvent(event: Record): void { + if (!this.dsn) return; + + // Fire-and-forget — don't block the UI + fetch(this.dsn.storeUrl, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(event), + }).catch(() => { + // Silent fail — error reporting shouldn't cause errors + }); + } + + // ─── API Client (for AI Agent queries) ───────────────────────────────── + + private async apiRequest(path: string): Promise { + if (!GLITCHTIP_API_TOKEN) { + console.debug('[GlitchTip] No API token — API queries disabled'); + return null; + } + + try { + const response = await fetch(`${GLITCHTIP_API_BASE}${path}`, { + headers: { + 'Authorization': `Bearer ${GLITCHTIP_API_TOKEN}`, + 'Content-Type': 'application/json', + }, + }); + + if (!response.ok) { + throw new Error(`GlitchTip API ${response.status}: ${response.statusText}`); + } + + return await response.json(); + } catch (error) { + console.error('[GlitchTip] API error:', error); + return null; + } + } + + async getIssues(query?: string): Promise { + const params = new URLSearchParams({ query: query || 'is:unresolved' }); + return await this.apiRequest( + `/projects/${GLITCHTIP_ORG}/${GLITCHTIP_PROJECT}/issues/?${params}` + ) || []; + } + + async getIssueEvents(issueId: string): Promise { + return await this.apiRequest( + `/issues/${issueId}/events/` + ) || []; + } + + async getErrorSummary(): Promise<{ total: number; unresolved: number; critical: number }> { + const issues = await this.getIssues(); + return { + total: issues.length, + unresolved: issues.filter(i => i.status === 'unresolved').length, + critical: issues.filter(i => i.level === 'fatal' || i.level === 'error').length, + }; + } + + async resolveIssue(issueId: string): Promise { + if (!GLITCHTIP_API_TOKEN) return false; + + try { + const response = await fetch(`${GLITCHTIP_API_BASE}/issues/${issueId}/`, { + method: 'PUT', + headers: { + 'Authorization': `Bearer ${GLITCHTIP_API_TOKEN}`, + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ status: 'resolved' }), + }); + return response.ok; + } catch { + return false; + } + } + + async getIssuesForAgent(): Promise { + const issues = await this.getIssues(); + if (issues.length === 0) return 'No unresolved issues found.'; + + return issues.map(issue => + `[${issue.level.toUpperCase()}] ${issue.title}\n` + + ` Culprit: ${issue.culprit}\n` + + ` Count: ${issue.count} | First: ${issue.firstSeen} | Last: ${issue.lastSeen}\n` + + ` ID: ${issue.id}` + ).join('\n\n'); + } +} + +export const glitchTipService = new GlitchTipService(); +export default glitchTipService; diff --git a/apps/desktop/src/services/inoreader.ts b/apps/desktop/src/services/inoreader.ts index f872702..f672485 100644 --- a/apps/desktop/src/services/inoreader.ts +++ b/apps/desktop/src/services/inoreader.ts @@ -562,8 +562,8 @@ class InoreaderService { // Singleton instance export const inoreaderService = new InoreaderService( - '1000003037', - '9IZsLbiEd26EI8ZJNFWYw9KdI4baQWGu' + import.meta.env.VITE_INOREADER_CLIENT_ID || '1000003037', + import.meta.env.VITE_INOREADER_CLIENT_SECRET || '' ); export default inoreaderService; diff --git a/apps/desktop/src/services/ip-status.ts b/apps/desktop/src/services/ip-status.ts index f5661cd..516b23a 100644 --- a/apps/desktop/src/services/ip-status.ts +++ b/apps/desktop/src/services/ip-status.ts @@ -1,8 +1,10 @@ /** * IP Address & Connection Status Service - * Detects current IP, VPN status, and Tor connection + * Detects current IP and VPN status */ +import { loggingService } from '@/services/logging'; + // Debug logging — disabled in production to prevent leaking IP/VPN/ISP data to console const IP_DEBUG = import.meta.env.DEV; const debugLog = (...args: unknown[]) => { if (IP_DEBUG) console.debug('[IP]', ...args); }; @@ -36,10 +38,9 @@ export interface IPStatusData { org?: string; timezone?: string; isVPN: boolean; - isTor: boolean; isProxy: boolean; vpnProvider?: string; - connectionType: 'direct' | 'vpn' | 'tor' | 'proxy' | 'unknown'; + connectionType: 'direct' | 'vpn' | 'proxy' | 'unknown'; networkConnection?: NetworkConnectionInfo; dnsInfo?: DNSInfo; // DNS servers and leak detection localIP?: string; // Local/WiFi IP (e.g. 192.168.x.x) @@ -47,10 +48,6 @@ export interface IPStatusData { error?: string; } -export interface TorCheckResult { - isTor: boolean; - isExitNode?: boolean; -} class IPStatusService { private cachedStatus: IPStatusData | null = null; @@ -345,58 +342,6 @@ class IPStatusService { return ipv4Regex.test(ip) || ipv6Regex.test(ip); } - /** - * Check if IP is a Tor exit node - * Uses Tor Project's bulk exit list or check service - */ - private async checkTorStatus(ip: string): Promise { - try { - debugLog('🧅 Checking Tor status for IP:', ip); - - // PRIMARY: Use Electron proxy to avoid CORS - if (typeof window !== 'undefined' && window.electronAPI) { - debugLog('🔌 Using Electron proxy for Tor check...'); - - const result = await window.electronAPI.checkTor(); - - if (result.success && result.data) { - debugLog('✅ Tor check response:', result.data); - return { - isTor: result.data.IsTor === true, - isExitNode: result.data.IsTor === true, - }; - } else { - debugWarn('⚠️ Tor check via Electron failed:', result.error); - } - } - - // No Electron proxy available — skip direct fetch (CORS-blocked in browser) - // Fall back to indicator-based detection - return this.checkTorIndicators(ip); - } catch (error: any) { - // Tor check failed — use indicator-based detection - return this.checkTorIndicators(ip); - } - } - - /** - * Fallback Tor detection using common indicators - */ - private async checkTorIndicators(ip: string): Promise { - try { - // Check if we can reach Tor check endpoint - const response = await fetch('https://check.torproject.org/', { - method: 'HEAD', - mode: 'no-cors', - }); - - // If we can reach Tor check, we might be on Tor - // This is a weak indicator, but better than nothing - return { isTor: false }; // Conservative default - } catch (error) { - return { isTor: false }; - } - } /** * Enhanced VPN detection with ASN-based detection @@ -850,17 +795,15 @@ class IPStatusService { */ private determineConnectionType( isVPN: boolean, - isTor: boolean, isProxy: boolean ): IPStatusData['connectionType'] { - if (isTor) return 'tor'; if (isVPN) return 'vpn'; if (isProxy) return 'proxy'; return 'direct'; } /** - * Get current IP status with VPN and Tor detection + * Get current IP status with VPN detection * Enhanced with comprehensive error boundaries */ private _fetching = false; @@ -871,7 +814,7 @@ class IPStatusService { return this.cachedStatus ?? { ip: 'Unavailable', isVPN: false, - isTor: false, + isProxy: false, connectionType: 'unknown' as const, lastChecked: new Date(), @@ -927,20 +870,12 @@ class IPStatusService { } } - // Phase 4: Enrich with Tor/VPN detection (best effort) + // Phase 4: Enrich with VPN detection (best effort) try { - debugLog('📡 Phase 4: Enriching with Tor/VPN detection...'); + debugLog('📡 Phase 4: Enriching with VPN detection...'); - let torStatus: TorCheckResult; let vpnStatus: { isVPN: boolean; provider?: string }; - try { - torStatus = await this.checkTorStatus(ipInfo.ip!); - } catch (torError) { - debugWarn('⚠️ Tor check failed, assuming not Tor'); - torStatus = { isTor: false }; - } - try { vpnStatus = this.detectVPN(ipInfo); } catch (vpnError) { @@ -950,7 +885,6 @@ class IPStatusService { const connectionType = this.determineConnectionType( vpnStatus.isVPN, - torStatus.isTor, ipInfo.isProxy || false ); @@ -993,7 +927,6 @@ class IPStatusService { org: ipInfo.org, timezone: ipInfo.timezone, isVPN: vpnStatus.isVPN, - isTor: torStatus.isTor, isProxy: ipInfo.isProxy || false, vpnProvider: vpnStatus.provider, connectionType, @@ -1008,8 +941,8 @@ class IPStatusService { this.lastCheck = new Date(); debugLog(`✅ === IP STATUS COMPLETE: ${status.ip} (${status.connectionType}) ===`); + loggingService.addLog('success', 'network', 'IP status resolved', `${status.ip} (${status.connectionType}${status.isVPN ? ` - ${status.vpnProvider || 'VPN'}` : ''})`); if (status.isVPN) debugLog(`🔒 VPN: ${status.vpnProvider || 'Unknown Provider'}`); - if (status.isTor) debugLog('🧅 Tor Connection Detected'); if (status.dnsInfo && status.dnsInfo.servers.length > 0) { debugLog(`🌐 DNS: ${status.dnsInfo.servers.join(', ')} (${status.dnsInfo.source})`); if (status.dnsInfo.isDNSLeak) { @@ -1020,6 +953,7 @@ class IPStatusService { return status; } catch (enrichmentError: any) { debugWarn('⚠️ Enrichment failed, returning basic status:', enrichmentError.message); + loggingService.addLog('warning', 'network', 'IP enrichment partial failure', enrichmentError.message); // Return basic status without enrichment const basicStatus: IPStatusData = { @@ -1031,7 +965,7 @@ class IPStatusService { org: ipInfo.org, timezone: ipInfo.timezone, isVPN: false, - isTor: false, + isProxy: false, connectionType: 'unknown', lastChecked: new Date(), @@ -1054,7 +988,7 @@ class IPStatusService { const safe: IPStatusData = { ip: 'Unavailable', isVPN: false, - isTor: false, + isProxy: false, connectionType: 'unknown', lastChecked: new Date(), @@ -1192,7 +1126,6 @@ class IPStatusService { const emergencyStatus: IPStatusData = { ip: localIP || 'Unavailable', isVPN: false, - isTor: false, isProxy: false, connectionType: 'unknown', lastChecked: new Date(), @@ -1222,14 +1155,6 @@ class IPStatusService { return status.isVPN; } - /** - * Check only if Tor is connected (quick check) - */ - async isTorConnected(): Promise { - const status = await this.getIPStatus(); - return status.isTor; - } - /** * Get cached status without fetching */ @@ -1250,8 +1175,6 @@ class IPStatusService { */ getConnectionColor(connectionType: IPStatusData['connectionType']): string { switch (connectionType) { - case 'tor': - return 'text-purple-500'; // Purple for Tor case 'vpn': return 'text-emerald-500'; // Green for VPN case 'proxy': @@ -1269,8 +1192,6 @@ class IPStatusService { */ getBadgeColor(connectionType: IPStatusData['connectionType']): string { switch (connectionType) { - case 'tor': - return 'bg-purple-500/15 text-violet-500 border-transparent'; case 'vpn': return 'bg-emerald-500/15 text-emerald-500 border-transparent'; case 'proxy': @@ -1298,7 +1219,6 @@ if (typeof window !== 'undefined') { debugLog('🌍 IP:', status.ip); debugLog('🏢 ISP/Org:', status.isp, '/', status.org); debugLog('🔒 VPN:', status.isVPN, status.vpnProvider); - debugLog('🧅 Tor:', status.isTor); debugLog('📡 Network:', status.networkConnection?.type); debugLog('🗺️ Location:', status.city, status.region, status.country); return status; diff --git a/apps/desktop/src/services/license-guard.ts b/apps/desktop/src/services/license-guard.ts index a60bba9..5d0aa77 100644 --- a/apps/desktop/src/services/license-guard.ts +++ b/apps/desktop/src/services/license-guard.ts @@ -72,28 +72,107 @@ export function getDeviceId(): string { // ─── Cache (localStorage fallback, safeStorage preferred) ─────────────────── +// ─── AES-GCM ticket encryption using device fingerprint as key material ──── + +async function deriveTicketKey(): Promise { + const deviceId = getDeviceId(); + const encoder = new TextEncoder(); + const keyMaterial = await crypto.subtle.importKey( + 'raw', + encoder.encode(deviceId), + 'PBKDF2', + false, + ['deriveKey'] + ); + // Use a per-installation salt stored in localStorage; generate on first use + let salt = localStorage.getItem('crowbyte_ticket_salt'); + if (!salt) { + const randomSalt = crypto.getRandomValues(new Uint8Array(32)); + salt = btoa(String.fromCharCode(...randomSalt)); + localStorage.setItem('crowbyte_ticket_salt', salt); + } + const saltBytes = Uint8Array.from(atob(salt), c => c.charCodeAt(0)); + return crypto.subtle.deriveKey( + { name: 'PBKDF2', salt: saltBytes, iterations: 100000, hash: 'SHA-256' }, + keyMaterial, + { name: 'AES-GCM', length: 256 }, + false, + ['encrypt', 'decrypt'] + ); +} + +async function encryptTicket(status: LicenseStatus): Promise { + const key = await deriveTicketKey(); + const iv = crypto.getRandomValues(new Uint8Array(12)); + const plaintext = new TextEncoder().encode(JSON.stringify(status)); + const ciphertext = await crypto.subtle.encrypt({ name: 'AES-GCM', iv }, key, plaintext); + const combined = new Uint8Array(12 + ciphertext.byteLength); + combined.set(iv); + combined.set(new Uint8Array(ciphertext), 12); + return btoa(String.fromCharCode(...combined)); +} + +async function decryptTicket(raw: string): Promise { + const key = await deriveTicketKey(); + const combined = Uint8Array.from(atob(raw), c => c.charCodeAt(0)); + const iv = combined.slice(0, 12); + const ciphertext = combined.slice(12); + const plaintext = await crypto.subtle.decrypt({ name: 'AES-GCM', iv }, key, ciphertext); + return JSON.parse(new TextDecoder().decode(plaintext)) as LicenseStatus; +} + function saveTicket(status: LicenseStatus): void { + // Fire-and-forget async save; also try Electron safeStorage + encryptTicket(status).then(encrypted => { + try { + localStorage.setItem(CACHE_KEY, encrypted); + } catch { + // Silent fail + } + }).catch(() => {}); + // Also try Electron safeStorage if available + window.electronAPI?.storeCredentials?.({ + deviceId: CACHE_KEY, + data: JSON.stringify(status), + }); +} + +function loadTicket(): LicenseStatus | null { + // Synchronous path — returns null; callers that need the ticket should use loadTicketAsync try { - const encrypted = btoa(JSON.stringify(status)); - localStorage.setItem(CACHE_KEY, encrypted); - // Also try Electron safeStorage if available - window.electronAPI?.storeCredentials?.({ - deviceId: CACHE_KEY, - data: JSON.stringify(status), - }); + const raw = localStorage.getItem(CACHE_KEY); + if (!raw) return null; + // Attempt legacy btoa decode for migration from old format + try { + const legacy = JSON.parse(atob(raw)); + if (legacy && typeof legacy.valid === 'boolean') return legacy as LicenseStatus; + } catch { + // Not legacy format — fall through to return null; async path will decrypt + } + return null; } catch { - // Silent fail + return null; } } -function loadTicket(): LicenseStatus | null { +async function loadTicketAsync(): Promise { try { - // Try Electron safeStorage first - // Fallback to localStorage const raw = localStorage.getItem(CACHE_KEY); if (!raw) return null; - const decoded = JSON.parse(atob(raw)); - return decoded as LicenseStatus; + // Try AES-GCM decrypt first + try { + return await decryptTicket(raw); + } catch { + // Fall back to legacy btoa for one-time migration + try { + const legacy = JSON.parse(atob(raw)) as LicenseStatus; + // Re-save in new encrypted format + saveTicket(legacy); + return legacy; + } catch { + return null; + } + } } catch { return null; } @@ -280,7 +359,7 @@ export async function verifyLicense(): Promise { return status; } catch { // Offline — use cached ticket - const cached = loadTicket(); + const cached = await loadTicketAsync(); if (!cached) { return { valid: false, tier: 'unknown', status: 'offline', @@ -307,10 +386,19 @@ export async function verifyLicense(): Promise { } /** - * Quick check using cache only (non-blocking). + * Quick check using cache only (non-blocking). Returns null synchronously; + * use getCachedLicenseAsync for the decrypted value. */ export function getCachedLicense(): LicenseStatus | null { - const cached = loadTicket(); + // Synchronous stub retained for API compatibility — returns null when ticket is encrypted + return loadTicket(); +} + +/** + * Async version of getCachedLicense — decrypts AES-GCM ticket. + */ +export async function getCachedLicenseAsync(): Promise { + const cached = await loadTicketAsync(); if (!cached) return null; const age = Date.now() - cached.lastCheck; if (age > CACHE_TTL_MS) return null; diff --git a/apps/desktop/src/services/monitoring-agent.ts b/apps/desktop/src/services/monitoring-agent.ts index 36c9c13..21df4b8 100644 --- a/apps/desktop/src/services/monitoring-agent.ts +++ b/apps/desktop/src/services/monitoring-agent.ts @@ -12,6 +12,7 @@ import { pcMonitor, type SystemMetrics, type ProcessInfo } from './pc-monitor'; import { tavilyService } from './tavily'; +import { glitchTipService } from './glitchtip'; import type { ToolFunction } from '@/types/service-types'; // MCP client - only available in Electron environment @@ -82,6 +83,7 @@ YOUR CAPABILITIES: - Threat correlation and root cause analysis - System health assessment and recommendations - Web search for threat intelligence, CVEs, and security advisories (via Tavily) +- Application error monitoring via GlitchTip (production crashes, exceptions, stack traces) YOUR PERSONALITY: - Tactical and professional security analyst @@ -96,8 +98,9 @@ Always structure your analysis as: 2. **METRICS**: Current system state (CPU, Memory, Disk, Network) 3. **ALERTS**: Any active alerts or anomalies detected 4. **SECURITY**: Network connections, suspicious activity, threats -5. **ANALYSIS**: Your expert interpretation and recommendations -6. **ACTIONS**: Specific steps to take if issues found +5. **APP ERRORS**: Production bugs from GlitchTip (crashes, exceptions, regressions) +6. **ANALYSIS**: Your expert interpretation and recommendations +7. **ACTIONS**: Specific steps to take if issues found CRITICAL RULES: - Always scan for suspicious network connections @@ -224,16 +227,21 @@ INSTRUCTIONS: 3. Use mcp_monitor_get_disk_info with {"path": "/", "all_partitions": true} 4. Use mcp_monitor_get_network_info with {"interface": ""} 5. Use mcp_monitor_get_process_info with {"pid": 0, "limit": 20, "sort_by": "cpu"} +6. Use glitchtip_error_summary with {} to check for app errors +7. If errors found, use glitchtip_get_issues to get details +8. For critical bugs, use glitchtip_get_issue_events with the issue ID IMPORTANT: - Some tools may return errors - if they do, work with available data - Provide your analysis even if some data is missing - After gathering metrics, ALWAYS respond with your analysis +- Check GlitchTip for production app bugs alongside system health After gathering all available metrics, provide: - STATUS: [HEALTHY/WARNING/CRITICAL] - Analysis of system health based on available data - Security concerns (network traffic patterns, resource usage) +- App error status (new crashes, regressions, unresolved bugs) - Performance recommendations - Immediate actions if needed @@ -315,7 +323,58 @@ Be thorough and provide a complete response even if some tools fail.`, } } as ToolFunction); - console.log(`🔧 Total tools available: ${tools.length} (${tools.filter(t => t.function.name.includes('mcp_monitor')).length} monitoring + ${tools.filter(t => t.function.name === 'tavily_search').length} search)`); + // Add GlitchTip error monitoring tools + tools.push({ + type: 'function', + function: { + name: 'glitchtip_get_issues', + description: 'Get unresolved application errors from GlitchTip error monitoring. Returns bugs, crashes, and exceptions from the CrowByte app (both web and desktop). Use to check for production errors.', + parameters: { + type: 'object', + properties: { + query: { + type: 'string', + description: 'Filter query (default: "is:unresolved"). Use "is:resolved" for fixed issues.', + default: 'is:unresolved' + } + }, + required: [] + } + } + } as ToolFunction); + + tools.push({ + type: 'function', + function: { + name: 'glitchtip_get_issue_events', + description: 'Get detailed events (stack traces, tags, context) for a specific GlitchTip issue by ID. Use after glitchtip_get_issues to deep-dive into a specific bug.', + parameters: { + type: 'object', + properties: { + issueId: { + type: 'string', + description: 'The GlitchTip issue ID to get events for' + } + }, + required: ['issueId'] + } + } + } as ToolFunction); + + tools.push({ + type: 'function', + function: { + name: 'glitchtip_error_summary', + description: 'Get a quick summary of application error counts: total, unresolved, and critical. Use for a fast health check of the app.', + parameters: { + type: 'object', + properties: {}, + required: [] + } + } + } as ToolFunction); + + console.log(`🔧 Total tools available: ${tools.length} (${tools.filter(t => t.function.name.includes('mcp_monitor')).length} monitoring + ${tools.filter(t => t.function.name === 'tavily_search').length} search + ${tools.filter(t => t.function.name.includes('glitchtip')).length} error tracking)`); return tools; } @@ -418,8 +477,57 @@ Be thorough and provide a complete response even if some tools fail.`, console.log(` → Executing: ${toolName}`); try { + // Handle GlitchTip error monitoring tools + if (toolName === 'glitchtip_get_issues') { + const issues = await glitchTipService.getIssues(toolArgs.query); + currentMessages.push({ + role: 'tool', + tool_call_id: toolCall.id, + content: JSON.stringify({ + count: issues.length, + issues: issues.map(i => ({ + id: i.id, + level: i.level, + title: i.title, + culprit: i.culprit, + count: i.count, + firstSeen: i.firstSeen, + lastSeen: i.lastSeen, + status: i.status, + })), + }), + }); + console.log(` ✅ Found ${issues.length} GlitchTip issues`); + } else if (toolName === 'glitchtip_get_issue_events') { + const events = await glitchTipService.getIssueEvents(toolArgs.issueId); + currentMessages.push({ + role: 'tool', + tool_call_id: toolCall.id, + content: JSON.stringify({ + issueId: toolArgs.issueId, + eventCount: events.length, + events: events.slice(0, 5).map(e => ({ + id: e.eventID, + title: e.title, + message: e.message, + dateCreated: e.dateCreated, + tags: e.tags, + entries: e.entries, + })), + }), + }); + console.log(` ✅ Found ${events.length} events for issue ${toolArgs.issueId}`); + } else if (toolName === 'glitchtip_error_summary') { + const summary = await glitchTipService.getErrorSummary(); + currentMessages.push({ + role: 'tool', + tool_call_id: toolCall.id, + content: JSON.stringify(summary), + }); + console.log(` ✅ Error summary: ${summary.total} total, ${summary.critical} critical`); + } // Handle Tavily search tool - if (toolName === 'tavily_search') { + else if (toolName === 'tavily_search') { console.log(` 🔍 Searching for: "${toolArgs.query}"`); const searchResult = await tavilyService.search({ query: toolArgs.query, diff --git a/apps/desktop/src/services/remote-control.ts b/apps/desktop/src/services/remote-control.ts index a369913..6a8562a 100644 --- a/apps/desktop/src/services/remote-control.ts +++ b/apps/desktop/src/services/remote-control.ts @@ -113,23 +113,40 @@ class E2ECrypto { private sharedSecret: CryptoKey | null = null; private encryptionKey: CryptoKey | null = null; private sequenceCounter = 0; + // Per-session random salt for HKDF — generated in generateKeyPair(), shared during key exchange + sessionSalt: Uint8Array = crypto.getRandomValues(new Uint8Array(32)); /** - * Generate X25519 ECDH key pair for this session + * Generate X25519 ECDH key pair for this session. + * Also regenerates the per-session HKDF salt. */ - async generateKeyPair(): Promise { + async generateKeyPair(): Promise<{ publicKey: JsonWebKey; sessionSalt: string }> { + this.sessionSalt = crypto.getRandomValues(new Uint8Array(32)); this.localKeyPair = await crypto.subtle.generateKey( { name: 'ECDH', namedCurve: 'P-256' }, // P-256 as WebCrypto X25519 fallback true, ['deriveBits'] ); - return await crypto.subtle.exportKey('jwk', this.localKeyPair.publicKey); + const publicKey = await crypto.subtle.exportKey('jwk', this.localKeyPair.publicKey); + return { + publicKey, + sessionSalt: btoa(String.fromCharCode(...this.sessionSalt)), + }; } /** - * Derive shared secret from remote public key + * Derive shared secret from remote public key and (optionally) remote session salt. + * If remoteSessionSalt is provided, it is XOR-combined with our local salt so that + * both sides contribute entropy to the HKDF salt. */ - async deriveSharedSecret(remotePublicKeyJwk: JsonWebKey): Promise { + async deriveSharedSecret(remotePublicKeyJwk: JsonWebKey, remoteSessionSaltB64?: string): Promise { + if (remoteSessionSaltB64) { + const remoteSalt = Uint8Array.from(atob(remoteSessionSaltB64), c => c.charCodeAt(0)); + // XOR local and remote salts so both sides contribute + for (let i = 0; i < this.sessionSalt.length; i++) { + this.sessionSalt[i] ^= remoteSalt[i % remoteSalt.length]; + } + } const remotePublicKey = await crypto.subtle.importKey( 'jwk', remotePublicKeyJwk, @@ -157,7 +174,7 @@ class E2ECrypto { { name: 'HKDF', hash: 'SHA-256', - salt: new Uint8Array(32), // In production: use session-specific salt + salt: this.sessionSalt, // Random per-session salt, included in key exchange info: new TextEncoder().encode('crowbyte-remote-control-v1'), }, keyMaterial, @@ -333,10 +350,10 @@ class RemoteControlService { this.emit('session:created', session); // Generate E2E encryption keys - const publicKey = await this.crypto.generateKeyPair(); + const { publicKey, sessionSalt } = await this.crypto.generateKeyPair(); - // Connect to relay server - await this.connectToRelay(session, publicKey); + // Connect to relay server (publicKey and sessionSalt are sent during handshake) + await this.connectToRelay(session, publicKey, sessionSalt); return session; } @@ -344,7 +361,7 @@ class RemoteControlService { /** * Connect to WebSocket relay server */ - private async connectToRelay(session: RemoteSession, publicKey: JsonWebKey): Promise { + private async connectToRelay(session: RemoteSession, publicKey: JsonWebKey, sessionSalt: string): Promise { return new Promise((resolve, reject) => { this.updateStatus('connecting'); @@ -355,7 +372,7 @@ class RemoteControlService { this.ws.onopen = () => { console.log('[RC] Connected to relay'); - // Send session init with our public key + // Send session init with our public key and session salt this.wsSend({ type: 'session_init', sessionId: session.id, @@ -363,6 +380,7 @@ class RemoteControlService { targetIp: session.targetIp, permission: session.permission, publicKey, + sessionSalt, config: { maxFrameRate: this.config.maxFrameRate, quality: this.config.quality, @@ -385,7 +403,7 @@ class RemoteControlService { if (this.currentSession?.status === 'connected' && this.reconnectAttempts < this.maxReconnectAttempts) { this.reconnectAttempts++; console.log(`[RC] Reconnecting... attempt ${this.reconnectAttempts}/${this.maxReconnectAttempts}`); - setTimeout(() => this.connectToRelay(session, publicKey), 1000 * this.reconnectAttempts); + setTimeout(() => this.connectToRelay(session, publicKey, sessionSalt), 1000 * this.reconnectAttempts); } else { this.updateStatus('disconnected'); } @@ -427,9 +445,9 @@ class RemoteControlService { this.currentSession!.consentStatus = msg.autoApproved ? 'auto_approved' : 'approved'; this.currentSession!.consentGivenBy = msg.approvedBy; this.emit('consent:approved', msg); - // Perform key exchange + // Perform key exchange — include remote session salt if provided if (msg.publicKey) { - await this.crypto.deriveSharedSecret(msg.publicKey); + await this.crypto.deriveSharedSecret(msg.publicKey, msg.sessionSalt); this.updateStatus('connected'); this.currentSession!.startedAt = new Date().toISOString(); this.emit('session:connected', this.currentSession); diff --git a/apps/desktop/src/services/support-agent.ts b/apps/desktop/src/services/support-agent.ts new file mode 100644 index 0000000..c9931d8 --- /dev/null +++ b/apps/desktop/src/services/support-agent.ts @@ -0,0 +1,495 @@ +/** + * CrowByte Support Agent Service + * RAG-powered support chat with diagnostics, escalation, and push notifications. + */ + +import { supabase } from '@/lib/supabase'; +import { openClaw } from './openclaw'; +import { IS_ELECTRON, hasElectronAPI } from '@/lib/platform'; + +// ── Types ──────────────────────────────────────────────────────────────────── + +export type MessageRole = 'user' | 'agent' | 'system' | 'diagnostic' | 'notification'; +export type TicketStatus = 'open' | 'in_progress' | 'resolved' | 'closed'; +export type TicketPriority = 'low' | 'medium' | 'high' | 'critical'; +export type NotificationType = 'info' | 'warning' | 'alert' | 'critical' | 'update'; +export type IntentType = 'docs' | 'diagnostic' | 'escalation' | 'general'; + +export interface SupportMessage { + id: string; + role: MessageRole; + content: string; + timestamp: Date; + diagnostics?: DiagnosticResult; + ticketId?: string; + notification?: UserNotification; +} + +export interface HealthCheck { + name: string; + status: 'ok' | 'warning' | 'error'; + message: string; + latencyMs?: number; +} + +export interface DiagnosticResult { + checks: HealthCheck[]; + score: number; + timestamp: Date; + summary: string; +} + +export interface EscalationTicket { + id?: string; + subject: string; + priority: TicketPriority; + conversation: SupportMessage[]; + diagnostics?: DiagnosticResult; + userEmail?: string; + userId?: string; +} + +export interface UserNotification { + id: string; + type: NotificationType; + title: string; + message: string; + actionUrl?: string; + source: 'admin' | 'system' | 'monitoring'; + read: boolean; + dismissed: boolean; + createdAt: string; +} + +interface KnowledgeChunk { + id: string; + title: string; + section: string; + keywords: string[]; + content: string; +} + +// ── Stopwords for tokenization ─────────────────────────────────────────────── + +const STOPWORDS = new Set([ + 'a','an','the','is','are','was','were','be','been','being','have','has','had', + 'do','does','did','will','would','could','should','may','might','shall','can', + 'i','me','my','we','our','you','your','he','she','it','they','them','this', + 'that','what','which','who','whom','how','when','where','why','in','on','at', + 'to','for','of','with','by','from','and','or','but','not','no','so','if','then', +]); + +// ── Intent keyword maps ────────────────────────────────────────────────────── + +const INTENT_KEYWORDS: Record = { + diagnostic: [ + 'error','broken','not working','crash','fail','slow','stuck','bug', + 'issue','problem',"can't","doesn't","won't",'timeout','500','404', + ], + escalation: [ + 'human','person','agent','support','help me','talk to','someone', + 'real person','escalate','ticket', + ], + docs: [ + 'how do i','where is','what is','how to','guide','tutorial','explain', + 'documentation','feature','setting','page','navigate', + ], + general: [], +}; + +// ── Service ────────────────────────────────────────────────────────────────── + +class SupportAgentService { + private knowledge: KnowledgeChunk[] = []; + private knowledgeLoaded = false; + + constructor() { + this.loadKnowledge(); + } + + /** Load docs-knowledge.json into memory (non-blocking) */ + private async loadKnowledge(): Promise { + try { + const mod = await import('@/data/docs-knowledge.json'); + this.knowledge = (mod.default || mod) as KnowledgeChunk[]; + } catch { + this.knowledge = []; + } + this.knowledgeLoaded = true; + } + + // ── RAG Search ─────────────────────────────────────────────────────────── + + searchKnowledge(query: string, limit = 3): KnowledgeChunk[] { + if (!this.knowledge.length) return []; + + const tokens = query + .toLowerCase() + .replace(/[^\w\s]/g, '') + .split(/\s+/) + .filter((w) => w.length > 1 && !STOPWORDS.has(w)); + + if (!tokens.length) return []; + + const scored = this.knowledge.map((chunk) => { + let score = 0; + const contentLower = chunk.content.toLowerCase(); + for (const token of tokens) { + if (chunk.keywords.some((k) => k.toLowerCase().includes(token))) score += 3; + if (contentLower.includes(token)) score += 1; + if (chunk.title.toLowerCase().includes(token)) score += 2; + } + return { chunk, score }; + }); + + return scored + .filter((s) => s.score > 0) + .sort((a, b) => b.score - a.score) + .slice(0, limit) + .map((s) => s.chunk); + } + + // ── Intent Classification ──────────────────────────────────────────────── + + classifyIntent(message: string): IntentType { + const lower = message.toLowerCase(); + + const scores: Record = { diagnostic: 0, escalation: 0, docs: 0, general: 0 }; + + for (const [intent, keywords] of Object.entries(INTENT_KEYWORDS) as [IntentType, string[]][]) { + for (const kw of keywords) { + if (lower.includes(kw)) scores[intent]++; + } + } + + let best: IntentType = 'general'; + let max = 0; + for (const [intent, score] of Object.entries(scores) as [IntentType, number][]) { + if (score > max) { max = score; best = intent; } + } + return best; + } + + // ── Diagnostics ────────────────────────────────────────────────────────── + + async runDiagnostics(): Promise { + const checks = await Promise.all([ + this.checkSupabase(), + this.checkAuth(), + this.checkOpenClaw(), + this.checkElectron(), + this.checkStorage(), + this.checkErrorReporter(), + ]); + + const okCount = checks.filter((c) => c.status === 'ok').length; + const warnCount = checks.filter((c) => c.status === 'warning').length; + const total = checks.length; + const score = Math.round((okCount / total) * 100) + (okCount === total ? 4 : 0); + const clampedScore = Math.min(score, 100); + + const issues = checks.filter((c) => c.status !== 'ok'); + const summary = issues.length === 0 + ? `${total}/${total} systems healthy. All green.` + : `${okCount}/${total} systems healthy.${warnCount ? ` ${warnCount} warning(s).` : ''} ${issues.map((i) => `${i.name}: ${i.message}`).join('. ')}.`; + + return { checks, score: clampedScore, timestamp: new Date(), summary }; + } + + private async checkSupabase(): Promise { + const start = Date.now(); + try { + const { error } = await supabase.from('profiles').select('id').limit(1); + const latencyMs = Date.now() - start; + if (error) return { name: 'Supabase', status: 'error', message: error.message, latencyMs }; + return { name: 'Supabase', status: latencyMs > 3000 ? 'warning' : 'ok', message: `Connected (${latencyMs}ms)`, latencyMs }; + } catch (e: any) { + return { name: 'Supabase', status: 'error', message: e.message || 'Unreachable', latencyMs: Date.now() - start }; + } + } + + private async checkAuth(): Promise { + try { + const { data } = await supabase.auth.getSession(); + if (!data.session) return { name: 'Auth', status: 'warning', message: 'No active session' }; + const exp = data.session.expires_at; + if (exp && exp * 1000 < Date.now()) return { name: 'Auth', status: 'error', message: 'Token expired' }; + return { name: 'Auth', status: 'ok', message: 'Session active' }; + } catch (e: any) { + return { name: 'Auth', status: 'error', message: e.message || 'Auth check failed' }; + } + } + + private async checkOpenClaw(): Promise { + const host = import.meta.env.VITE_OPENCLAW_HOSTNAME; + if (!host) return { name: 'OpenClaw', status: 'warning', message: 'Not configured (VITE_OPENCLAW_HOSTNAME missing)' }; + const start = Date.now(); + try { + const res = await fetch(`https://${host}/nvidia/v1/models`, { signal: AbortSignal.timeout(5000) }); + const latencyMs = Date.now() - start; + return { name: 'OpenClaw', status: res.ok ? 'ok' : 'warning', message: res.ok ? `Reachable (${latencyMs}ms)` : `HTTP ${res.status}`, latencyMs }; + } catch { + return { name: 'OpenClaw', status: 'error', message: 'VPS unreachable (timeout)', latencyMs: Date.now() - start }; + } + } + + private async checkElectron(): Promise { + if (!IS_ELECTRON) return { name: 'Electron', status: 'ok', message: 'Web build — skipped' }; + return hasElectronAPI() + ? { name: 'Electron', status: 'ok', message: 'IPC bridge available' } + : { name: 'Electron', status: 'error', message: 'IPC bridge missing — preload may have failed' }; + } + + private async checkStorage(): Promise { + try { + if (!navigator.storage?.estimate) return { name: 'Storage', status: 'ok', message: 'API unavailable — skipped' }; + const est = await navigator.storage.estimate(); + const usedMB = Math.round((est.usage || 0) / 1024 / 1024); + const quotaMB = Math.round((est.quota || 0) / 1024 / 1024); + const pct = quotaMB > 0 ? Math.round((usedMB / quotaMB) * 100) : 0; + const status = pct > 90 ? 'error' : pct > 70 ? 'warning' : 'ok'; + return { name: 'Storage', status, message: `${usedMB}MB / ${quotaMB}MB (${pct}%)` }; + } catch { + return { name: 'Storage', status: 'ok', message: 'Check skipped' }; + } + } + + private async checkErrorReporter(): Promise { + try { + // Check if GlitchTip / Sentry SDK is initialized on window + const sentry = (window as any).__SENTRY__; + if (sentry) return { name: 'ErrorReporter', status: 'ok', message: 'Sentry/GlitchTip active' }; + return { name: 'ErrorReporter', status: 'warning', message: 'No error reporter detected' }; + } catch { + return { name: 'ErrorReporter', status: 'ok', message: 'Check skipped' }; + } + } + + // ── Chat ───────────────────────────────────────────────────────────────── + + async chat(messages: SupportMessage[]): Promise { + const lastUser = [...messages].reverse().find((m) => m.role === 'user'); + if (!lastUser) return this.makeMessage('agent', 'I didn\'t catch that. Could you rephrase?'); + + const intent = this.classifyIntent(lastUser.content); + + let ragContext = ''; + let diagnosticContext = ''; + let diagnostics: DiagnosticResult | undefined; + + if (intent === 'diagnostic') { + diagnostics = await this.runDiagnostics(); + diagnosticContext = `\n\n## Diagnostic Results (score: ${diagnostics.score}/100)\n${diagnostics.checks.map((c) => `- **${c.name}**: ${c.status.toUpperCase()} — ${c.message}`).join('\n')}\n\nSummary: ${diagnostics.summary}`; + } + + if (intent === 'docs' || intent === 'general') { + const chunks = this.searchKnowledge(lastUser.content); + if (chunks.length) { + ragContext = `\n\n## Documentation Context\n${chunks.map((c) => `### ${c.title} (${c.section})\n${c.content}`).join('\n\n')}`; + } + } + + if (intent === 'escalation') { + return this.makeMessage('agent', + 'I can create a support ticket and escalate to a human. Would you like me to do that?\n\n' + + 'Just say **"yes, escalate"** and I\'ll create a ticket with our conversation and any diagnostics attached.'); + } + + const systemPrompt = `You are the CrowByte Support Agent — a helpful AI assistant built into the CrowByte offensive security platform. + +Your job: +- Help users navigate CrowByte features +- Diagnose technical issues using diagnostic results +- Explain how things work using documentation context +- Offer to escalate to human support when you can't resolve an issue + +Style: Concise, technical, friendly. Use markdown. Be direct. +Never make up features that don't exist. +When diagnostic results are provided, analyze them and suggest fixes.${ragContext}${diagnosticContext}`; + + // Build OpenClaw-compatible message array + const history = messages.slice(-10).map((m) => ({ + role: (m.role === 'agent' ? 'assistant' : 'user') as 'user' | 'assistant' | 'system', + content: m.content, + })); + + try { + const reply = await openClaw.chat( + [{ role: 'system', content: systemPrompt }, ...history], + undefined, + 0.5, + ); + const msg = this.makeMessage('agent', reply || 'Sorry, I couldn\'t generate a response.'); + if (diagnostics) msg.diagnostics = diagnostics; + return msg; + } catch (e: any) { + return this.makeMessage('agent', `Support agent error: ${e.message || 'Failed to reach AI backend.'}\n\nYou can try running diagnostics or escalate to human support.`); + } + } + + // ── Escalation ─────────────────────────────────────────────────────────── + + async escalate(ticket: EscalationTicket): Promise { + const { data: { user } } = await supabase.auth.getUser(); + const row = { + subject: ticket.subject, + priority: ticket.priority, + status: 'open' as TicketStatus, + conversation: JSON.stringify(ticket.conversation.slice(-20)), + diagnostics: ticket.diagnostics ? JSON.stringify(ticket.diagnostics) : null, + user_email: ticket.userEmail || user?.email || null, + user_id: ticket.userId || user?.id || null, + created_at: new Date().toISOString(), + }; + + const { data, error } = await supabase.from('support_tickets').insert([row]).select('id').single(); + if (error) throw new Error(`Failed to create ticket: ${error.message}`); + + const ticketId = data.id as string; + await this.notifyDiscord(ticket, ticketId).catch(() => {}); + return ticketId; + } + + private async notifyDiscord(ticket: EscalationTicket, ticketId: string): Promise { + const webhookUrl = import.meta.env.VITE_DISCORD_SUPPORT_WEBHOOK; + if (!webhookUrl) return; + + const colorMap: Record = { + critical: 0xff0000, + high: 0xff8c00, + medium: 0xffd700, + low: 0x3b82f6, + }; + + const firstMsg = ticket.conversation.find((m) => m.role === 'user')?.content || '(no message)'; + + await fetch(webhookUrl, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + embeds: [{ + title: `Support Ticket: ${ticket.subject}`, + color: colorMap[ticket.priority], + fields: [ + { name: 'Ticket ID', value: ticketId, inline: true }, + { name: 'Priority', value: ticket.priority.toUpperCase(), inline: true }, + { name: 'User', value: ticket.userEmail || ticket.userId || 'Anonymous', inline: true }, + { name: 'Health Score', value: ticket.diagnostics ? `${ticket.diagnostics.score}/100` : 'N/A', inline: true }, + { name: 'First Message', value: firstMsg.slice(0, 200) }, + ], + timestamp: new Date().toISOString(), + }], + }), + }); + } + + // ── Notifications ──────────────────────────────────────────────────────── + + async getNotifications(): Promise { + const { data: { user } } = await supabase.auth.getUser(); + if (!user) return []; + + const { data, error } = await supabase + .from('user_notifications') + .select('*') + .eq('user_id', user.id) + .eq('dismissed', false) + .order('created_at', { ascending: false }) + .limit(50); + + if (error) return []; + return (data || []).map(this.mapNotification); + } + + async markNotificationRead(id: string): Promise { + await supabase.from('user_notifications').update({ read: true }).eq('id', id); + } + + async dismissNotification(id: string): Promise { + await supabase.from('user_notifications').update({ dismissed: true }).eq('id', id); + } + + subscribeToNotifications(callback: (notification: UserNotification) => void): () => void { + let userId: string | null = null; + + const setup = async () => { + const { data: { user } } = await supabase.auth.getUser(); + userId = user?.id || null; + }; + setup(); + + const channel = supabase + .channel('user-notifications') + .on('postgres_changes', { event: 'INSERT', schema: 'public', table: 'user_notifications' }, (payload) => { + const row = payload.new as any; + if (userId && row.user_id === userId) { + callback(this.mapNotification(row)); + } + }) + .subscribe(); + + return () => { supabase.removeChannel(channel); }; + } + + // ── Tickets ────────────────────────────────────────────────────────────── + + async getTickets(): Promise { + const { data: { user } } = await supabase.auth.getUser(); + if (!user) return []; + + const { data } = await supabase + .from('support_tickets') + .select('*') + .eq('user_id', user.id) + .order('created_at', { ascending: false }); + + return (data || []).map((row: any) => ({ + id: row.id, + subject: row.subject, + priority: row.priority, + conversation: typeof row.conversation === 'string' ? JSON.parse(row.conversation) : row.conversation || [], + diagnostics: row.diagnostics ? (typeof row.diagnostics === 'string' ? JSON.parse(row.diagnostics) : row.diagnostics) : undefined, + userEmail: row.user_email, + userId: row.user_id, + })); + } + + async getTicketById(id: string): Promise { + const { data } = await supabase.from('support_tickets').select('*').eq('id', id).single(); + if (!data) return null; + return { + id: data.id, + subject: data.subject, + priority: data.priority, + conversation: typeof data.conversation === 'string' ? JSON.parse(data.conversation) : data.conversation || [], + diagnostics: data.diagnostics ? (typeof data.diagnostics === 'string' ? JSON.parse(data.diagnostics) : data.diagnostics) : undefined, + userEmail: data.user_email, + userId: data.user_id, + }; + } + + // ── Helpers ────────────────────────────────────────────────────────────── + + private makeMessage(role: MessageRole, content: string): SupportMessage { + return { id: crypto.randomUUID(), role, content, timestamp: new Date() }; + } + + private mapNotification(row: any): UserNotification { + return { + id: row.id, + type: row.type, + title: row.title, + message: row.message, + actionUrl: row.action_url, + source: row.source, + read: row.read, + dismissed: row.dismissed, + createdAt: row.created_at, + }; + } +} + +// ── Export singleton ────────────────────────────────────────────────────────── + +export const supportAgent = new SupportAgentService(); +export default supportAgent; diff --git a/apps/desktop/src/vite-env.d.ts b/apps/desktop/src/vite-env.d.ts index 11f02fe..5c8b79a 100644 --- a/apps/desktop/src/vite-env.d.ts +++ b/apps/desktop/src/vite-env.d.ts @@ -1 +1,4 @@ /// + +/** Compile-time build target injected by Vite `define` */ +declare const __BUILD_TARGET__: 'web' | 'electron'; diff --git a/apps/desktop/tsconfig.app.json b/apps/desktop/tsconfig.app.json index 0b0e43e..8d49b7b 100644 --- a/apps/desktop/tsconfig.app.json +++ b/apps/desktop/tsconfig.app.json @@ -19,7 +19,7 @@ "noUnusedLocals": false, "noUnusedParameters": false, "noImplicitAny": false, - "noFallthroughCasesInSwitch": false, + "noFallthroughCasesInSwitch": true, "baseUrl": ".", "paths": { diff --git a/apps/desktop/vite.config.ts b/apps/desktop/vite.config.ts index 3f64aae..e6d0c15 100644 --- a/apps/desktop/vite.config.ts +++ b/apps/desktop/vite.config.ts @@ -3,8 +3,12 @@ import react from "@vitejs/plugin-react-swc"; import path from "path"; // https://vitejs.dev/config/ -export default defineConfig(({ mode }) => ({ - base: './', +export default defineConfig(({ mode }) => { + const buildTarget = process.env.VITE_BUILD_TARGET || 'electron'; + const isWeb = buildTarget === 'web'; + + return { + base: isWeb ? '/' : './', server: { host: "::", port: 8081, @@ -24,10 +28,14 @@ export default defineConfig(({ mode }) => ({ }, define: { "process.env.NODE_ENV": JSON.stringify(mode), + "__BUILD_TARGET__": JSON.stringify(buildTarget), + // Strip service key from web builds — must never ship to browser + ...(isWeb ? { "import.meta.env.VITE_SUPABASE_SERVICE_KEY": "undefined" } : {}), }, build: { - outDir: "dist", + outDir: isWeb ? "dist/web" : "dist", chunkSizeWarningLimit: 1000, + sourcemap: 'hidden', // Generate .map files for debugging but don't reference them in bundles rollupOptions: { external: (id: string) => { if (id === 'electron' || id.startsWith('@modelcontextprotocol/')) { @@ -64,4 +72,5 @@ export default defineConfig(({ mode }) => ({ "@modelcontextprotocol/server-memory", ], }, -})); +}; +}); diff --git a/apps/server/src/index.ts b/apps/server/src/index.ts index 04b1683..e9fc656 100644 --- a/apps/server/src/index.ts +++ b/apps/server/src/index.ts @@ -29,9 +29,21 @@ const STATIC_DIR = resolve(new URL('.', import.meta.url).pathname, '../../deskto const app = express(); -// Security headers — relaxed CSP for SPA +// Security headers — relaxed CSP for SPA (allows inline styles/scripts needed by Vite-built app) app.use(helmet({ - contentSecurityPolicy: false, + contentSecurityPolicy: { + directives: { + defaultSrc: ["'self'"], + scriptSrc: ["'self'", "'unsafe-inline'", "'unsafe-eval'"], + styleSrc: ["'self'", "'unsafe-inline'"], + imgSrc: ["'self'", 'data:', 'blob:', 'https:'], + connectSrc: ["'self'", 'wss:', 'ws:', 'https:'], + fontSrc: ["'self'", 'data:', 'https:'], + objectSrc: ["'none'"], + mediaSrc: ["'self'", 'blob:'], + frameSrc: ["'none'"], + }, + }, crossOriginEmbedderPolicy: false, })); diff --git a/apps/server/src/middleware/auth.ts b/apps/server/src/middleware/auth.ts index c46c229..dd7d5ee 100644 --- a/apps/server/src/middleware/auth.ts +++ b/apps/server/src/middleware/auth.ts @@ -2,8 +2,13 @@ import { Request, Response, NextFunction } from 'express'; import jwt from 'jsonwebtoken'; import { randomBytes } from 'node:crypto'; -// Generate a stable secret on first boot if none provided -const JWT_SECRET: string = process.env.JWT_SECRET ?? randomBytes(64).toString('hex'); +// Generate a stable secret on first boot if none provided. +// WARNING: without JWT_SECRET in the environment all tokens are invalidated on every restart. +const JWT_SECRET: string = process.env.JWT_SECRET ?? (() => { + console.warn('[!] JWT_SECRET is not set. A random secret will be generated on each startup.'); + console.warn('[!] All existing tokens will be invalidated on server restart. Set JWT_SECRET in your .env to persist sessions.'); + return randomBytes(64).toString('hex'); +})(); const TOKEN_EXPIRY = '24h'; export { JWT_SECRET, TOKEN_EXPIRY }; @@ -27,7 +32,7 @@ declare global { const PUBLIC_PATHS = ['/api/auth/login']; // Prefixes that skip auth (read-only metrics, safe to expose behind nginx) -const PUBLIC_PREFIXES = ['/api/system/', '/api/docker/', '/api/tools/available', '/api/setup/', '/api/health', '/api/errors', '/api/memory/', '/api/fleet/register', '/api/fleet/heartbeat']; +const PUBLIC_PREFIXES = ['/api/system/', '/api/docker/', '/api/tools/available', '/api/setup/', '/api/health', '/api/memory/', '/api/fleet/register', '/api/fleet/heartbeat']; export function authMiddleware(req: Request, res: Response, next: NextFunction): void { // Skip auth for non-API routes (static files, SPA) diff --git a/apps/server/src/routes/auth.ts b/apps/server/src/routes/auth.ts index 39440be..8e44953 100644 --- a/apps/server/src/routes/auth.ts +++ b/apps/server/src/routes/auth.ts @@ -23,6 +23,10 @@ async function getAdminCredentials(): Promise<{ username: string; passwordHash: if (!adminPasswordHash) { const plaintext = process.env.CROWBYTE_PASS ?? 'crowbyte'; + if (!process.env.CROWBYTE_PASS && !process.env.CROWBYTE_PASS_HASH) { + console.warn('[!] CROWBYTE_PASS is not set. Using the default password "crowbyte".'); + console.warn('[!] Change it immediately by setting CROWBYTE_PASS or CROWBYTE_PASS_HASH in your .env file.'); + } adminPasswordHash = await bcrypt.hash(plaintext, 12); } diff --git a/apps/server/src/routes/memory.ts b/apps/server/src/routes/memory.ts index a2ab9b7..1aa8630 100644 --- a/apps/server/src/routes/memory.ts +++ b/apps/server/src/routes/memory.ts @@ -4,11 +4,11 @@ */ import { Router, Request, Response } from 'express'; -import { exec } from 'node:child_process'; +import { execFile } from 'node:child_process'; import { promisify } from 'node:util'; import { existsSync } from 'node:fs'; -const execAsync = promisify(exec); +const execFileAsync = promisify(execFile); const router = Router(); // Path to memory-engine bridge script @@ -17,30 +17,34 @@ const PYTHON = process.env.PYTHON_PATH || 'python3'; const EXEC_TIMEOUT = 30_000; // 30s max per call /** - * Execute a bridge command and return parsed JSON + * Execute a bridge command and return parsed JSON. + * Uses execFile (not exec) to avoid shell injection. */ async function callBridge(command: string, args: Record = {}): Promise { - const jsonArgs = JSON.stringify(args).replace(/'/g, "'\\''"); // escape single quotes for shell - const cmd = `${PYTHON} "${BRIDGE_PATH}" ${command} '${jsonArgs}'`; + const jsonArgs = JSON.stringify(args); try { - const { stdout, stderr } = await execAsync(cmd, { - timeout: EXEC_TIMEOUT, - env: { ...process.env, PYTHONPATH: BRIDGE_PATH.replace(/\/bridge\.py$/, '') }, - }); + const { stdout, stderr } = await execFileAsync( + PYTHON, + [BRIDGE_PATH, command, jsonArgs], + { + timeout: EXEC_TIMEOUT, + env: { ...process.env, PYTHONPATH: BRIDGE_PATH.replace(/\/bridge\.py$/, '') }, + }, + ); if (stderr && !stdout) { - console.error(`[memory] bridge stderr: ${stderr}`); - return { error: stderr.trim() }; + console.error(`[memory] bridge stderr: ${stderr}`); // full detail logged server-side + return { error: 'Memory bridge returned an error' }; // sanitized message to client } return JSON.parse(stdout.trim()); } catch (err: any) { - console.error(`[memory] bridge error (${command}):`, err.message); + console.error(`[memory] bridge error (${command}):`, err.message); // full detail logged server-side if (err.stdout) { try { return JSON.parse(err.stdout.trim()); } catch {} } - throw new Error(`Memory bridge failed: ${err.message}`); + throw new Error('Memory bridge call failed'); // sanitized message to client } } diff --git a/apps/server/src/routes/tools.ts b/apps/server/src/routes/tools.ts index 6536a93..4b3be71 100644 --- a/apps/server/src/routes/tools.ts +++ b/apps/server/src/routes/tools.ts @@ -107,9 +107,9 @@ function validateCommand(command: string): boolean { } function sanitizeArgs(args: string[]): string[] { - // Block shell metacharacters in individual args + // Block shell metacharacters in all args — there is no safe exception for flag-looking args return args.map(arg => { - if (/[;&|`$(){}]/.test(arg) && !arg.startsWith('-')) { + if (/[;&|`$(){}]/.test(arg)) { throw new Error(`Unsafe argument rejected: ${arg}`); } return arg; @@ -237,6 +237,17 @@ router.get('/available', async (_req: Request, res: Response): Promise => } }); +// Validate that a scan target looks like a hostname, IP address, IP range, or URL. +// This prevents obvious shell injection targets and garbage input. +const TARGET_PATTERN = /^[a-zA-Z0-9._\-/:[\]]+$/; + +function validateTarget(target: string): boolean { + return typeof target === 'string' && + target.length > 0 && + target.length <= 500 && + TARGET_PATTERN.test(target); +} + // POST /api/tools/scan — quick scan presets router.post('/scan', async (req: Request, res: Response): Promise => { try { @@ -250,8 +261,8 @@ router.post('/scan', async (req: Request, res: Response): Promise => { return; } - if (typeof target !== 'string' || target.length > 500) { - res.status(400).json({ error: 'Invalid target' }); + if (!validateTarget(target)) { + res.status(400).json({ error: 'Invalid target: must be a valid hostname, IP address, CIDR range, or URL' }); return; } diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 86aa4aa..2bcd8fe 100644 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -50,13 +50,31 @@ echo "[*] Starting CrowByte Terminal..." cd /app # Pass through all VITE_* env vars to Electron +export NODE_ENV=production export ELECTRON_DISABLE_SECURITY_WARNINGS=true export ELECTRON_NO_ATTACH_CONSOLE=true +# Pre-seed onboarding config so Docker containers skip the wizard +# Electron uses package.json "name" (lowercase) for userData path +CROWBYTE_CONFIG_DIR="/root/.config/crowbyte" +mkdir -p "${CROWBYTE_CONFIG_DIR}" +if [ ! -f "${CROWBYTE_CONFIG_DIR}/crowbyte-config.json" ]; then + cat > "${CROWBYTE_CONFIG_DIR}/crowbyte-config.json" <&1 | sed 's/^/[electron] /' &