diff --git a/.gitignore b/.gitignore index d44f986f6..cbc487340 100644 --- a/.gitignore +++ b/.gitignore @@ -40,3 +40,6 @@ docs/.astro/ # Swift Package Manager metadata (leave sources tracked) # Packages/ # Package.resolved +._* +**/.DS_Store +**/._%2A diff --git a/Sources/CodexBar/PreferencesLMManagementPane.swift b/Sources/CodexBar/PreferencesLMManagementPane.swift new file mode 100644 index 000000000..6829c41f6 --- /dev/null +++ b/Sources/CodexBar/PreferencesLMManagementPane.swift @@ -0,0 +1,1169 @@ +import AppKit +import CodexBarCore +import SwiftUI + +// MARK: - LM Management Pane + +/// Unified LM (Language Model) management hub for CodexBar + OpenClaw. +/// Shows all configured providers, their models, health status, Ollama endpoints, +/// and the active fallback chain in one visual dashboard. +@MainActor +struct LMManagementPane: View { + @Bindable var settings: SettingsStore + @Bindable var store: UsageStore + @State private var ollamaEndpoints: [OllamaEndpointStatus] = [] + @State private var isProbing = false + @State private var lastProbeDate: Date? + @State private var exportOutput: String? + @State private var showExportSheet = false + @State private var injectStatus: String? + @State private var injectSuccess = false + @State private var discoveredGateways: [DiscoveredGateway] = [] + @State private var isDiscovering = false + @State private var selectedGatewayPort: Int = 18789 + @State private var showGatewayPicker = false + @State private var ollamaLoadedModels: [OllamaRunningModel] = [] + @State private var ollamaAllModels: [OllamaInstalledModel] = [] + @State private var systemMemory: SystemMemoryInfo = .init() + @State private var isLoadingModel = false + @State private var loadingModelName: String = "" + @State private var fallbackProviders: [FallbackProvider] = FallbackProvider.loadFromDisk() + @State private var fallbackDirty = false + + var body: some View { + ScrollView(.vertical, showsIndicators: true) { + VStack(alignment: .leading, spacing: 16) { + // Provider overview + self.providerOverviewSection + + Divider() + + // Ollama Control Panel + self.ollamaControlSection + + Divider() + + // Ollama endpoints + self.ollamaEndpointsSection + + Divider() + + // HTTP Local LMs + self.httpLocalLMSection + + Divider() + + // Fallback chain + self.fallbackChainSection + + Divider() + + // OpenClaw export & inject + self.openClawExportSection + } + .frame(maxWidth: .infinity, alignment: .leading) + .padding(.horizontal, 20) + .padding(.vertical, 12) + } + .sheet(isPresented: self.$showExportSheet) { + self.exportSheet + } + .onAppear { + self.probeOllamaEndpoints() + Task { await self.discoverGateways() } + Task { await self.refreshOllamaState() } + } + } + + // MARK: - Provider Overview + + private var providerOverviewSection: some View { + SettingsSection(contentSpacing: 10) { + HStack { + Text("Provider Overview") + .font(.caption) + .foregroundStyle(.secondary) + .textCase(.uppercase) + Spacer() + Text("\(self.enabledProviders.count) active") + .font(.caption) + .foregroundStyle(.secondary) + } + + ForEach(self.enabledProviders, id: \.rawValue) { provider in + self.providerRow(provider) + } + } + } + + private func providerRow(_ provider: UsageProvider) -> some View { + let descriptor = ProviderDescriptorRegistry.descriptor(for: provider) + let meta = descriptor.metadata + + return HStack(spacing: 12) { + // Status indicator + Circle() + .fill(self.providerStatusColor(provider)) + .frame(width: 8, height: 8) + + VStack(alignment: .leading, spacing: 2) { + Text(meta.displayName) + .font(.body.weight(.medium)) + + HStack(spacing: 8) { + if meta.supportsOpus { + self.badge("Opus", color: .purple) + } + if meta.supportsCredits { + self.badge("Credits", color: .orange) + } + self.badge(meta.cliName, color: .gray) + } + } + + Spacer() + + // Usage summary + if let snapshot = self.store.snapshot(for: provider) { + VStack(alignment: .trailing, spacing: 2) { + if let primary = snapshot.primary { + Text("\(Int(100 - primary.usedPercent))% remaining") + .font(.footnote) + .foregroundStyle(.secondary) + } + Text(UsageFormatter.updatedString(from: snapshot.updatedAt)) + .font(.caption2) + .foregroundStyle(.tertiary) + } + } else { + Text("No data") + .font(.footnote) + .foregroundStyle(.tertiary) + } + } + .padding(.vertical, 6) + .padding(.horizontal, 10) + .background( + RoundedRectangle(cornerRadius: 8) + .fill(Color(nsColor: .controlBackgroundColor)) + ) + } + + // MARK: - Ollama Endpoints + + private var ollamaEndpointsSection: some View { + SettingsSection(contentSpacing: 10) { + HStack { + Text("Ollama Endpoints") + .font(.caption) + .foregroundStyle(.secondary) + .textCase(.uppercase) + Spacer() + if self.isProbing { + ProgressView() + .controlSize(.small) + .padding(.trailing, 4) + Text("Probing...") + .font(.caption) + .foregroundStyle(.secondary) + } else { + Button("Refresh") { + self.probeOllamaEndpoints() + } + .buttonStyle(.borderless) + .font(.caption) + } + } + + if self.ollamaEndpoints.isEmpty, !self.isProbing { + HStack(spacing: 8) { + Image(systemName: "server.rack") + .foregroundStyle(.tertiary) + Text("No Ollama endpoints detected. Start Ollama locally or configure a LAN endpoint.") + .font(.footnote) + .foregroundStyle(.tertiary) + } + .padding(.vertical, 8) + } else { + ForEach(self.ollamaEndpoints, id: \.url) { endpoint in + self.ollamaEndpointRow(endpoint) + } + } + + if let lastProbe = self.lastProbeDate { + Text("Last probed: \(UsageFormatter.updatedString(from: lastProbe))") + .font(.caption2) + .foregroundStyle(.tertiary) + } + } + } + + private func ollamaEndpointRow(_ endpoint: OllamaEndpointStatus) -> some View { + VStack(alignment: .leading, spacing: 6) { + HStack(spacing: 10) { + Circle() + .fill(endpoint.isOnline ? Color.green : Color.red) + .frame(width: 8, height: 8) + + VStack(alignment: .leading, spacing: 2) { + Text(endpoint.label) + .font(.body.weight(.medium)) + Text(endpoint.url) + .font(.caption) + .foregroundStyle(.tertiary) + .textSelection(.enabled) + } + + Spacer() + + if endpoint.isOnline { + VStack(alignment: .trailing, spacing: 2) { + if let version = endpoint.version { + Text("v\(version)") + .font(.caption) + .foregroundStyle(.secondary) + } + Text("\(endpoint.modelCount) models") + .font(.footnote) + .foregroundStyle(.secondary) + } + } else { + Text("Offline") + .font(.footnote) + .foregroundStyle(.red) + } + } + + // Model list (collapsed by default for online endpoints) + if endpoint.isOnline, !endpoint.models.isEmpty { + DisclosureGroup("Models") { + VStack(alignment: .leading, spacing: 4) { + ForEach(endpoint.models, id: \.name) { model in + HStack(spacing: 8) { + if model.isRunning { + Image(systemName: "bolt.fill") + .font(.caption2) + .foregroundStyle(.green) + } + Text(model.name) + .font(.footnote.monospaced()) + Spacer() + Text(model.sizeLabel) + .font(.caption2) + .foregroundStyle(.tertiary) + if model.isReasoning { + self.badge("reasoning", color: .blue) + } + } + } + } + .padding(.leading, 18) + } + .font(.footnote) + .foregroundStyle(.secondary) + } + } + .padding(.vertical, 6) + .padding(.horizontal, 10) + .background( + RoundedRectangle(cornerRadius: 8) + .fill(Color(nsColor: .controlBackgroundColor)) + ) + } + + // MARK: - Fallback Chain + + private var fallbackChainSection: some View { + SettingsSection(contentSpacing: 10) { + HStack { + Text("Fallback Order") + .font(.caption).foregroundStyle(.secondary).textCase(.uppercase) + Spacer() + if self.fallbackDirty { + Button("Save") { self.saveFallbackOrder() } + .buttonStyle(.borderedProminent).controlSize(.small) + } + } + + Text("Reorder providers with arrows. Accounts within each provider are tried in order before moving to the next provider.") + .font(.footnote).foregroundStyle(.tertiary) + .fixedSize(horizontal: false, vertical: true) + + ForEach(Array(self.fallbackProviders.enumerated()), id: \.element.id) { index, provider in + VStack(alignment: .leading, spacing: 4) { + HStack(spacing: 10) { + Circle().fill(Color.accentColor).frame(width: 20, height: 20) + .overlay { Text("\(index + 1)").font(.caption2.weight(.bold)).foregroundStyle(.white) } + + VStack(alignment: .leading, spacing: 2) { + Text(provider.displayName).font(.footnote.weight(.medium)) + Text(provider.detail).font(.caption2).foregroundStyle(.tertiary) + } + Spacer() + + if index > 0 { + Button { self.moveFallbackProvider(from: index, direction: -1) } label: { + Image(systemName: "chevron.up").font(.caption) + }.buttonStyle(.borderless) + } + if index < self.fallbackProviders.count - 1 { + Button { self.moveFallbackProvider(from: index, direction: 1) } label: { + Image(systemName: "chevron.down").font(.caption) + }.buttonStyle(.borderless) + } + } + + if !provider.accounts.isEmpty { + ForEach(Array(provider.accounts.enumerated()), id: \.element) { accIdx, account in + HStack(spacing: 8) { + Rectangle().fill(Color.accentColor.opacity(0.3)).frame(width: 2, height: 16) + .padding(.leading, 9) + Text(account).font(.caption2.monospaced()).foregroundStyle(.secondary).lineLimit(1) + Spacer() + if accIdx > 0 { + Button { self.moveAccount(providerIndex: index, from: accIdx, direction: -1) } label: { + Image(systemName: "chevron.up").font(.system(size: 8)) + }.buttonStyle(.borderless) + } + if accIdx < provider.accounts.count - 1 { + Button { self.moveAccount(providerIndex: index, from: accIdx, direction: 1) } label: { + Image(systemName: "chevron.down").font(.system(size: 8)) + }.buttonStyle(.borderless) + } + } + } + } + } + .padding(.vertical, 4).padding(.horizontal, 8) + .background(RoundedRectangle(cornerRadius: 8).fill(Color(nsColor: .controlBackgroundColor))) + } + } + } + + private func moveFallbackProvider(from index: Int, direction: Int) { + let newIndex = index + direction + guard newIndex >= 0, newIndex < self.fallbackProviders.count else { return } + self.fallbackProviders.swapAt(index, newIndex) + self.fallbackDirty = true + } + + private func moveAccount(providerIndex: Int, from accIndex: Int, direction: Int) { + let newIndex = accIndex + direction + guard newIndex >= 0, newIndex < self.fallbackProviders[providerIndex].accounts.count else { return } + self.fallbackProviders[providerIndex].accounts.swapAt(accIndex, newIndex) + self.fallbackDirty = true + } + + private func saveFallbackOrder() { + FallbackProvider.saveToDisk(self.fallbackProviders) + self.fallbackDirty = false + self.injectStatus = "Fallback order saved — press Inject to apply" + } + + // MARK: - HTTP Local LM + + private var httpLocalLMSection: some View { + SettingsSection(contentSpacing: 10) { + HStack { + Text("HTTP Local LMs") + .font(.caption) + .foregroundStyle(.secondary) + .textCase(.uppercase) + Spacer() + self.badge("auto-detected", color: .blue) + } + + Text("Any local LM server speaking the OpenAI-compatible API at a known endpoint. Ollama, LM Studio, llama.cpp, vLLM, and others are auto-probed.") + .font(.footnote) + .foregroundStyle(.tertiary) + .fixedSize(horizontal: false, vertical: true) + + // Show the detected local models across all endpoints + let onlineEndpoints = self.ollamaEndpoints.filter(\.isOnline) + let totalModels = onlineEndpoints.reduce(0) { $0 + $1.modelCount } + + if totalModels > 0 { + HStack(spacing: 12) { + Image(systemName: "cpu") + .font(.title2) + .foregroundStyle(.green) + VStack(alignment: .leading, spacing: 2) { + Text("\(totalModels) local models available") + .font(.body.weight(.medium)) + Text("across \(onlineEndpoints.count) endpoint\(onlineEndpoints.count == 1 ? "" : "s")") + .font(.footnote) + .foregroundStyle(.secondary) + } + Spacer() + } + .padding(.vertical, 6) + .padding(.horizontal, 10) + .background( + RoundedRectangle(cornerRadius: 8) + .fill(Color.green.opacity(0.08)) + ) + } else { + HStack(spacing: 8) { + Image(systemName: "cpu") + .foregroundStyle(.tertiary) + Text("No local LM servers detected. Start Ollama or another OpenAI-compatible server.") + .font(.footnote) + .foregroundStyle(.tertiary) + } + .padding(.vertical, 6) + } + } + } + + // MARK: - OpenClaw Export & Inject + + private var openClawExportSection: some View { + SettingsSection(contentSpacing: 10) { + Text("OpenClaw Integration") + .font(.caption) + .foregroundStyle(.secondary) + .textCase(.uppercase) + + Text("Push all providers, models, and fallback config directly into your OpenClaw gateway.") + .font(.footnote) + .foregroundStyle(.tertiary) + .fixedSize(horizontal: false, vertical: true) + + // Gateway discovery + VStack(alignment: .leading, spacing: 8) { + HStack { + Text("Detected Gateways") + .font(.caption2) + .foregroundStyle(.tertiary) + Spacer() + if self.isDiscovering { + ProgressView().controlSize(.small) + } else { + Button("Scan") { Task { await self.discoverGateways() } } + .buttonStyle(.borderless).font(.caption) + } + } + + if self.discoveredGateways.isEmpty { + HStack(spacing: 6) { + Image(systemName: "network.slash") + .foregroundStyle(.tertiary) + Text("No OpenClaw gateways detected. Start one and tap Scan.") + .font(.footnote).foregroundStyle(.tertiary) + }.padding(.vertical, 4) + } else { + ForEach(self.discoveredGateways, id: \.port) { gw in + HStack(spacing: 10) { + Image(systemName: self.selectedGatewayPort == gw.port ? "checkmark.circle.fill" : "circle") + .foregroundStyle(self.selectedGatewayPort == gw.port ? .green : .secondary) + .onTapGesture { self.selectedGatewayPort = gw.port } + VStack(alignment: .leading, spacing: 2) { + Text(gw.name).font(.footnote.weight(.medium)) + Text("127.0.0.1:\(gw.port)").font(.caption2).foregroundStyle(.tertiary) + } + Spacer() + Circle().fill(.green).frame(width: 6, height: 6) + } + .padding(.vertical, 2) + .padding(.horizontal, 8) + .background( + RoundedRectangle(cornerRadius: 6) + .fill(self.selectedGatewayPort == gw.port ? Color.accentColor.opacity(0.08) : Color.clear) + ) + .onTapGesture { self.selectedGatewayPort = gw.port } + } + } + } + + // Action buttons + VStack(alignment: .leading, spacing: 10) { + HStack(spacing: 12) { + Button("Inject to OpenClaw") { + Task { await self.injectToOpenClaw() } + } + .buttonStyle(.borderedProminent) + .controlSize(.regular) + .disabled(self.discoveredGateways.isEmpty) + + Button("Preview Export") { + Task { await self.generateExport() } + } + .buttonStyle(.bordered) + .controlSize(.regular) + + Button("Copy CLI Command") { + NSPasteboard.general.clearContents() + NSPasteboard.general.setString( + "codexbar-bridge --apply", + forType: .string) + } + .buttonStyle(.bordered) + .controlSize(.regular) + } + + if let injectStatus = self.injectStatus { + HStack(spacing: 6) { + Image(systemName: self.injectSuccess ? "checkmark.circle.fill" : "exclamationmark.triangle.fill") + .foregroundStyle(self.injectSuccess ? .green : .orange) + Text(injectStatus) + .font(.footnote) + .foregroundStyle(self.injectSuccess ? Color.secondary : Color.orange) + } + } + } + } + } + + private var exportSheet: some View { + VStack(alignment: .leading, spacing: 16) { + Text("OpenClaw Export") + .font(.headline) + + if let output = self.exportOutput { + ScrollView { + Text(output) + .font(.system(.caption, design: .monospaced)) + .textSelection(.enabled) + .frame(maxWidth: .infinity, alignment: .leading) + .padding(8) + } + .frame(maxHeight: 300) + .background(Color(nsColor: .textBackgroundColor)) + .cornerRadius(8) + } else { + ProgressView("Generating export...") + } + + HStack { + Spacer() + Button("Copy to Clipboard") { + if let output = self.exportOutput { + NSPasteboard.general.clearContents() + NSPasteboard.general.setString(output, forType: .string) + } + } + .disabled(self.exportOutput == nil) + + Button("Close") { + self.showExportSheet = false + } + .keyboardShortcut(.cancelAction) + } + } + .padding(20) + .frame(width: 560) + } + + // MARK: - Helpers + + private var enabledProviders: [UsageProvider] { + self.settings.orderedProviders() + } + + private func providerStatusColor(_ provider: UsageProvider) -> Color { + if self.store.refreshingProviders.contains(provider) { + return .yellow + } + if self.store.snapshot(for: provider) != nil { + return .green + } + if self.store.errors[provider] != nil { + return .red + } + return .gray + } + + private func badge(_ text: String, color: Color) -> some View { + Text(text) + .font(.caption2) + .padding(.horizontal, 6) + .padding(.vertical, 2) + .background(color.opacity(0.15)) + .foregroundStyle(color) + .clipShape(Capsule()) + } + + private func probeOllamaEndpoints() { + guard !self.isProbing else { return } + self.isProbing = true + + Task { + let fetcher = OllamaLocalFetcher() + let endpoints = [OllamaLocalEndpoint.macLocal, OllamaLocalEndpoint.windowsLAN] + let results = await fetcher.probeAll(endpoints: endpoints) + + self.ollamaEndpoints = results.map { result in + OllamaEndpointStatus( + url: result.endpoint.url, + label: result.endpoint.label, + isOnline: result.isOnline, + version: result.version, + modelCount: result.models.count, + models: result.models.map { model in + OllamaModelStatus( + name: model.name, + sizeLabel: model.sizeLabel, + isRunning: model.isRunning, + isReasoning: model.isReasoning) + }) + } + + self.lastProbeDate = Date() + self.isProbing = false + } + } + + /// Inject CodexBar config into OpenClaw via authenticated WebSocket RPC. + /// + /// Security: Uses gateway token auth over WebSocket — NO file writes, + /// NO shell scripts, NO kill -9. Everything goes through the gateway API. + /// + /// Falls back to legacy shell script if the gateway is not reachable. + private func injectToOpenClaw() async { + self.injectStatus = "Preparing export..." + self.injectSuccess = false + + // Build the export data + let exporter = OpenClawExporter() + let fetcher = OllamaLocalFetcher() + let endpoints = [OllamaLocalEndpoint.macLocal, OllamaLocalEndpoint.windowsLAN] + let ollamaResults = await fetcher.probeAll(endpoints: endpoints) + let codexAccounts = CodexAccountInfo.loadManagedAccounts() + let version = Bundle.main.infoDictionary?["CFBundleShortVersionString"] as? String ?? "dev" + + // Build fallback order from the user's UI arrangement + let userFallbackOrder: [String]? = self.fallbackProviders.isEmpty ? nil : { + var order: [String] = [] + for provider in self.fallbackProviders { + for account in provider.accounts { + order.append(account.modelRef) + } + } + return order.isEmpty ? nil : order + }() + + let export = exporter.export( + ollamaResults: ollamaResults, + codexAccounts: codexAccounts, + fallbackOrder: userFallbackOrder, + codexbarVersion: version) + + // Try WebSocket RPC first (secure path) + let port = self.selectedGatewayPort + do { + self.injectStatus = "Connecting to gateway (port \(port))..." + + // Ensure pairing + let _ = try OpenClawPairing.ensurePaired(port: port) + + // Connect + let client = OpenClawGatewayClient(port: port) + try await client.connect() + + do { + // Get current config + base hash + self.injectStatus = "Reading gateway config..." + let snapshot = try await client.configGet() + + // Build merge-patch from export + let patch = OpenClawPatchBuilder.buildPatch(from: export) + + // Apply patch + self.injectStatus = "Applying config patch..." + let result = try await client.configPatch(patch: patch, baseHash: snapshot.baseHash) + + await client.disconnect() + + if result.ok { + self.injectSuccess = true + self.injectStatus = "Injected via gateway API (port \(port))" + } else { + self.injectSuccess = false + self.injectStatus = "Gateway rejected patch" + } + } catch { + await client.disconnect() + throw error + } + } catch { + // WebSocket failed — fall back to legacy shell script + self.injectStatus = "Gateway unavailable, falling back to legacy inject..." + await injectToOpenClawLegacy(export: export) + } + } + + /// Legacy fallback: file-write + shell script injection. + /// Used when the gateway is not running (offline mode). + private func injectToOpenClawLegacy(export: OpenClawExport) async { + let encoder = JSONEncoder() + encoder.outputFormatting = [.prettyPrinted, .sortedKeys] + guard let jsonData = try? encoder.encode(export), + let json = String(data: jsonData, encoding: .utf8) + else { + self.injectSuccess = false + self.injectStatus = "Failed to encode export JSON" + return + } + + let exportPath = FileManager.default.homeDirectoryForCurrentUser + .appendingPathComponent(".codexbar/openclaw-export.json") + + do { + try FileManager.default.createDirectory( + at: exportPath.deletingLastPathComponent(), + withIntermediateDirectories: true) + try json.data(using: .utf8)?.write(to: exportPath, options: .atomic) + + try FileManager.default.setAttributes( + [.posixPermissions: 0o600], + ofItemAtPath: exportPath.path) + + self.injectStatus = "Running legacy inject script..." + let injectScript = FileManager.default.homeDirectoryForCurrentUser + .appendingPathComponent(".openclaw/workspace/ops/codexbar-startup-inject.sh") + + if FileManager.default.isExecutableFile(atPath: injectScript.path) { + let process = Process() + process.executableURL = URL(fileURLWithPath: "/bin/bash") + process.arguments = [injectScript.path] + process.environment = ProcessInfo.processInfo.environment + let pipe = Pipe() + process.standardOutput = pipe + process.standardError = pipe + + do { + try process.run() + process.waitUntilExit() + + let output = String(data: pipe.fileHandleForReading.readDataToEndOfFile(), encoding: .utf8) ?? "" + + if process.terminationStatus == 0 { + self.injectSuccess = true + self.injectStatus = "Injected via legacy script (offline mode)" + } else { + self.injectSuccess = false + self.injectStatus = "Legacy inject failed: \(output.suffix(80))" + } + } catch { + self.injectSuccess = false + self.injectStatus = "Legacy script error: \(error.localizedDescription)" + } + } else { + self.injectSuccess = true + self.injectStatus = "Exported — run codexbar-inject manually to apply" + } + } catch { + self.injectSuccess = false + self.injectStatus = "Failed: \(error.localizedDescription)" + } + } + + /// Discover running OpenClaw gateways by scanning common ports. + private func discoverGateways() async { + self.isDiscovering = true + var found: [DiscoveredGateway] = [] + + let ports = [18789, 19789, 20789, 21789, 22789] + let session = URLSession(configuration: { + let c = URLSessionConfiguration.ephemeral + c.timeoutIntervalForRequest = 2 + return c + }()) + + for port in ports { + guard let url = URL(string: "http://127.0.0.1:\(port)/health") else { continue } + do { + let (data, response) = try await session.data(from: url) + if let http = response as? HTTPURLResponse, http.statusCode == 200, + let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any], + json["ok"] as? Bool == true + { + let name = port == 18789 ? "OpenClaw (Live)" : "OpenClaw (Port \(port))" + found.append(DiscoveredGateway(port: port, name: name, status: "live")) + } + } catch { + // Port not responding — skip + } + } + + self.discoveredGateways = found + if let first = found.first { self.selectedGatewayPort = first.port } + self.isDiscovering = false + } + + // MARK: - Ollama Control Panel + + private var ollamaControlSection: some View { + SettingsSection(contentSpacing: 10) { + HStack { + Text("Ollama Control") + .font(.caption).foregroundStyle(.secondary).textCase(.uppercase) + Spacer() + Button("Refresh") { Task { await self.refreshOllamaState() } } + .buttonStyle(.borderless).font(.caption) + } + + // System resources + HStack(spacing: 16) { + VStack(alignment: .leading, spacing: 2) { + Text("RAM").font(.caption2).foregroundStyle(.tertiary) + Text("\(self.systemMemory.usedGB, specifier: "%.1f") / \(self.systemMemory.totalGB, specifier: "%.0f") GB") + .font(.footnote.weight(.medium)) + } + VStack(alignment: .leading, spacing: 2) { + Text("GPU").font(.caption2).foregroundStyle(.tertiary) + Text(self.systemMemory.gpuName).font(.footnote.weight(.medium)) + } + Spacer() + } + .padding(.vertical, 4).padding(.horizontal, 10) + .background(RoundedRectangle(cornerRadius: 8).fill(Color(nsColor: .controlBackgroundColor))) + + // Currently loaded models + VStack(alignment: .leading, spacing: 6) { + Text("Loaded Models").font(.caption2).foregroundStyle(.tertiary) + + if self.ollamaLoadedModels.isEmpty { + Text("No models loaded — select one below to load it") + .font(.footnote).foregroundStyle(.tertiary).padding(.vertical, 4) + } else { + ForEach(self.ollamaLoadedModels, id: \.name) { model in + HStack(spacing: 10) { + Image(systemName: "bolt.fill").foregroundStyle(.green).font(.caption) + VStack(alignment: .leading, spacing: 2) { + Text(model.name).font(.footnote.weight(.medium)) + Text("\(model.sizeGB, specifier: "%.1f")GB • \(model.processor) • ctx=\(model.contextLength)") + .font(.caption2).foregroundStyle(.secondary) + } + Spacer() + Text(model.expiresIn).font(.caption2).foregroundStyle(.tertiary) + Button("Eject") { + Task { await self.ejectModel(model.name) } + } + .buttonStyle(.bordered).controlSize(.small) + .tint(.red) + } + .padding(.vertical, 4).padding(.horizontal, 10) + .background(RoundedRectangle(cornerRadius: 8).fill(Color.green.opacity(0.06))) + } + } + } + + // All installed models + VStack(alignment: .leading, spacing: 6) { + Text("Installed Models").font(.caption2).foregroundStyle(.tertiary) + + ForEach(self.ollamaAllModels, id: \.name) { model in + HStack(spacing: 10) { + let isLoaded = self.ollamaLoadedModels.contains(where: { $0.name == model.name }) + Image(systemName: isLoaded ? "circle.fill" : "circle") + .foregroundStyle(isLoaded ? .green : .gray) + .font(.caption2) + VStack(alignment: .leading, spacing: 2) { + Text(model.name).font(.footnote.monospaced()) + HStack(spacing: 8) { + Text("\(model.sizeGB, specifier: "%.1f")GB").font(.caption2).foregroundStyle(.secondary) + if model.isEmbedding { + self.badge("embed", color: .gray) + } + if model.isReasoning { + self.badge("reasoning", color: .blue) + } + Text("ctx: \(model.contextLength / 1024)k").font(.caption2).foregroundStyle(.tertiary) + } + } + Spacer() + if !isLoaded && !model.isEmbedding { + if self.isLoadingModel && self.loadingModelName == model.name { + ProgressView().controlSize(.small) + } else { + Button("Load") { + Task { await self.loadModel(model.name) } + } + .buttonStyle(.bordered).controlSize(.small) + } + } + } + .padding(.vertical, 2).padding(.horizontal, 10) + .background(RoundedRectangle(cornerRadius: 6).fill(Color(nsColor: .controlBackgroundColor))) + } + } + } + } + + private func refreshOllamaState() async { + // System memory + let totalRAM = Double(ProcessInfo.processInfo.physicalMemory) / 1_073_741_824.0 + self.systemMemory = SystemMemoryInfo( + totalGB: totalRAM, + usedGB: totalRAM * 0.6, // Approximate — macOS doesn't expose this simply + gpuName: "Apple Silicon (Unified)") + + let session = URLSession(configuration: .ephemeral) + let baseURL = "http://127.0.0.1:11434" + + // Running models via /api/ps + do { + let (data, _) = try await session.data(from: URL(string: "\(baseURL)/api/ps")!) + if let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any], + let models = json["models"] as? [[String: Any]] + { + self.ollamaLoadedModels = models.map { m in + let sizeVRAM = (m["size_vram"] as? Int64 ?? 0) + let size = (m["size"] as? Int64 ?? 0) + let expiresAt = m["expires_at"] as? String ?? "" + let processor: String + if sizeVRAM > 0 && sizeVRAM >= size { + processor = "100% GPU" + } else if sizeVRAM > 0 { + let pct = Int(Double(sizeVRAM) / Double(max(size, 1)) * 100) + processor = "\(pct)% GPU" + } else { + processor = "CPU" + } + // Parse context from details + let details = m["details"] as? [String: Any] ?? [:] + let ctx = details["context_length"] as? Int ?? 0 + + return OllamaRunningModel( + name: m["name"] as? String ?? "?", + sizeGB: Double(size) / 1_073_741_824.0, + processor: processor, + contextLength: ctx, + expiresIn: Self.formatExpiry(expiresAt)) + } + } + } catch {} + + // All installed models via /api/tags + do { + let (data, _) = try await session.data(from: URL(string: "\(baseURL)/api/tags")!) + if let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any], + let models = json["models"] as? [[String: Any]] + { + var installed: [OllamaInstalledModel] = [] + for m in models { + let name = m["name"] as? String ?? "?" + let size = m["size"] as? Int64 ?? 0 + + // Get context window via /api/show + var ctx = 131072 + do { + var showReq = URLRequest(url: URL(string: "\(baseURL)/api/show")!) + showReq.httpMethod = "POST" + showReq.setValue("application/json", forHTTPHeaderField: "Content-Type") + showReq.httpBody = "{\"name\":\"\(name)\"}".data(using: .utf8) + let (showData, _) = try await session.data(for: showReq) + if let showJSON = try? JSONSerialization.jsonObject(with: showData) as? [String: Any], + let info = showJSON["model_info"] as? [String: Any] + { + for (key, value) in info { + let lower = key.lowercased() + if lower.contains("original") || lower.contains("rope") { continue } + if lower.hasSuffix("context_length") || lower.hasSuffix(".context_length") { + if let c = value as? Int { ctx = c; break } + } + } + } + } catch {} + + let isEmbed = name.contains("embed") || name.contains("nomic") || name.contains("bge") + let isReasoning = ["coder", "qwen3", "r1", "think"].contains(where: { name.lowercased().contains($0) }) + + installed.append(OllamaInstalledModel( + name: name, + sizeGB: Double(size) / 1_073_741_824.0, + contextLength: ctx, + isEmbedding: isEmbed, + isReasoning: isReasoning)) + } + self.ollamaAllModels = installed.sorted { $0.sizeGB < $1.sizeGB } + } + } catch {} + } + + private func loadModel(_ name: String) async { + self.isLoadingModel = true + self.loadingModelName = name + + let session = URLSession(configuration: .ephemeral) + var req = URLRequest(url: URL(string: "http://127.0.0.1:11434/api/chat")!) + req.httpMethod = "POST" + req.setValue("application/json", forHTTPHeaderField: "Content-Type") + // Send empty messages to load without generating + req.httpBody = "{\"model\":\"\(name)\",\"messages\":[],\"stream\":false}".data(using: .utf8) + req.timeoutInterval = 120 + + _ = try? await session.data(for: req) + + self.isLoadingModel = false + self.loadingModelName = "" + await self.refreshOllamaState() + } + + private func ejectModel(_ name: String) async { + let session = URLSession(configuration: .ephemeral) + var req = URLRequest(url: URL(string: "http://127.0.0.1:11434/api/chat")!) + req.httpMethod = "POST" + req.setValue("application/json", forHTTPHeaderField: "Content-Type") + req.httpBody = "{\"model\":\"\(name)\",\"keep_alive\":0}".data(using: .utf8) + + _ = try? await session.data(for: req) + await self.refreshOllamaState() + } + + private static func formatExpiry(_ iso: String) -> String { + let formatter = ISO8601DateFormatter() + formatter.formatOptions = [.withInternetDateTime, .withFractionalSeconds] + guard let date = formatter.date(from: iso) else { return "?" } + let remaining = date.timeIntervalSinceNow + if remaining <= 0 { return "expiring" } + let mins = Int(remaining / 60) + if mins < 1 { return "<1m" } + return "\(mins)m" + } + + private func generateExport() async { + self.exportOutput = nil + self.showExportSheet = true + + let exporter = OpenClawExporter() + let fetcher = OllamaLocalFetcher() + let endpoints = [OllamaLocalEndpoint.macLocal, OllamaLocalEndpoint.windowsLAN] + let ollamaResults = await fetcher.probeAll(endpoints: endpoints) + let codexAccounts = CodexAccountInfo.loadManagedAccounts() + let version = Bundle.main.infoDictionary?["CFBundleShortVersionString"] as? String ?? "dev" + + let json = exporter.exportJSON( + ollamaResults: ollamaResults, + codexAccounts: codexAccounts, + codexbarVersion: version) + + self.exportOutput = json + } +} + +// MARK: - View Model Types + +struct OllamaEndpointStatus: Identifiable { + let url: String + let label: String + let isOnline: Bool + let version: String? + let modelCount: Int + let models: [OllamaModelStatus] + + var id: String { self.url } +} + +struct OllamaModelStatus: Identifiable { + let name: String + let sizeLabel: String + let isRunning: Bool + let isReasoning: Bool + + var id: String { self.name } +} + +/// A discovered OpenClaw gateway instance. +struct DiscoveredGateway: Identifiable { + let port: Int + let name: String + let status: String + + var id: Int { self.port } +} + +/// A currently loaded/running Ollama model. +struct OllamaRunningModel { + let name: String + let sizeGB: Double + let processor: String // "100% GPU", "CPU", etc. + let contextLength: Int + let expiresIn: String // "4m", "<1m", "expiring" +} + +/// An installed Ollama model (may or may not be loaded). +struct OllamaInstalledModel { + let name: String + let sizeGB: Double + let contextLength: Int + let isEmbedding: Bool + let isReasoning: Bool +} + +/// System memory info for resource monitoring. +struct SystemMemoryInfo { + var totalGB: Double = 0 + var usedGB: Double = 0 + var gpuName: String = "Unknown" +} + +/// A provider in the fallback order with its accounts. +struct FallbackProvider: Identifiable { + let id: String // e.g., "openai-codex", "anthropic", "ollama" + let displayName: String // e.g., "Codex (OpenAI)" + let detail: String // e.g., "4 accounts" + var accounts: [String] // e.g., ["codexbar-d5aa0853", "codexbar-6921f3bf"] + var models: [String] // e.g., ["gpt-5.4", "gpt-5.2-codex"] + + static func loadFromDisk() -> [FallbackProvider] { + let path = FileManager.default.homeDirectoryForCurrentUser + .appendingPathComponent(".codexbar/fallback-order.json") + + if let data = try? Data(contentsOf: path), + let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any], + let providers = json["providers"] as? [[String: Any]] + { + return providers.compactMap { p in + guard let id = p["id"] as? String else { return nil } + return FallbackProvider( + id: id, + displayName: p["displayName"] as? String ?? id, + detail: p["detail"] as? String ?? "", + accounts: p["accounts"] as? [String] ?? [], + models: p["models"] as? [String] ?? []) + } + } + + // Default fallback order + let codexAccounts = CodexAccountInfo.loadManagedAccounts() + return [ + FallbackProvider( + id: "openai-codex", + displayName: "Codex (OpenAI)", + detail: "\(codexAccounts.count) accounts", + accounts: codexAccounts.map { "codexbar-\(String($0.accountId.prefix(8)))" }, + models: ["gpt-5.4", "gpt-5.2-codex", "gpt-5.3-codex"]), + FallbackProvider( + id: "ollama", + displayName: "Ollama Local", + detail: "127.0.0.1:11434", + accounts: [], + models: ["gemma4:e4b"]), + ] + } + + static func saveToDisk(_ providers: [FallbackProvider]) { + let path = FileManager.default.homeDirectoryForCurrentUser + .appendingPathComponent(".codexbar/fallback-order.json") + + let json: [String: Any] = [ + "version": 1, + "providers": providers.map { p in + [ + "id": p.id, + "displayName": p.displayName, + "detail": p.detail, + "accounts": p.accounts, + "models": p.models, + ] as [String: Any] + }, + ] + + if let data = try? JSONSerialization.data(withJSONObject: json, options: .prettyPrinted) { + try? data.write(to: path, options: .atomic) + try? FileManager.default.setAttributes([.posixPermissions: 0o600], ofItemAtPath: path.path) + } + } +} diff --git a/Sources/CodexBar/Providers/HttpsLM/HttpsLMProviderImplementation.swift b/Sources/CodexBar/Providers/HttpsLM/HttpsLMProviderImplementation.swift new file mode 100644 index 000000000..2c07a5a50 --- /dev/null +++ b/Sources/CodexBar/Providers/HttpsLM/HttpsLMProviderImplementation.swift @@ -0,0 +1,24 @@ +import CodexBarCore +import Foundation + +struct HttpsLMProviderImplementation: ProviderImplementation { + let id: UsageProvider = .httpsLM + + @MainActor + func presentation(context _: ProviderPresentationContext) -> ProviderPresentation { + ProviderPresentation { _ in "https-api" } + } + + @MainActor + func observeSettings(_: SettingsStore) {} + + @MainActor + func isAvailable(context _: ProviderAvailabilityContext) -> Bool { + true + } + + @MainActor + func settingsFields(context _: ProviderSettingsContext) -> [ProviderSettingsFieldDescriptor] { + [] + } +} diff --git a/Sources/CodexBar/Providers/OllamaCloud/OllamaCloudProviderImplementation.swift b/Sources/CodexBar/Providers/OllamaCloud/OllamaCloudProviderImplementation.swift new file mode 100644 index 000000000..3963e0741 --- /dev/null +++ b/Sources/CodexBar/Providers/OllamaCloud/OllamaCloudProviderImplementation.swift @@ -0,0 +1,24 @@ +import CodexBarCore +import Foundation + +struct OllamaCloudProviderImplementation: ProviderImplementation { + let id: UsageProvider = .ollamaCloud + + @MainActor + func presentation(context _: ProviderPresentationContext) -> ProviderPresentation { + ProviderPresentation { _ in "cloud-api" } + } + + @MainActor + func observeSettings(_: SettingsStore) {} + + @MainActor + func isAvailable(context _: ProviderAvailabilityContext) -> Bool { + true + } + + @MainActor + func settingsFields(context _: ProviderSettingsContext) -> [ProviderSettingsFieldDescriptor] { + [] + } +} diff --git a/Sources/CodexBar/Providers/OllamaLAN/OllamaLANProviderImplementation.swift b/Sources/CodexBar/Providers/OllamaLAN/OllamaLANProviderImplementation.swift new file mode 100644 index 000000000..21c250b1e --- /dev/null +++ b/Sources/CodexBar/Providers/OllamaLAN/OllamaLANProviderImplementation.swift @@ -0,0 +1,24 @@ +import CodexBarCore +import Foundation + +struct OllamaLANProviderImplementation: ProviderImplementation { + let id: UsageProvider = .ollamaLAN + + @MainActor + func presentation(context _: ProviderPresentationContext) -> ProviderPresentation { + ProviderPresentation { _ in "lan-api" } + } + + @MainActor + func observeSettings(_: SettingsStore) {} + + @MainActor + func isAvailable(context _: ProviderAvailabilityContext) -> Bool { + true + } + + @MainActor + func settingsFields(context _: ProviderSettingsContext) -> [ProviderSettingsFieldDescriptor] { + [] + } +} diff --git a/Sources/CodexBarCore/OpenClawIntegration/OpenClawExporter.swift b/Sources/CodexBarCore/OpenClawIntegration/OpenClawExporter.swift new file mode 100644 index 000000000..2a4fb3b67 --- /dev/null +++ b/Sources/CodexBarCore/OpenClawIntegration/OpenClawExporter.swift @@ -0,0 +1,465 @@ +import Foundation + +// MARK: - OpenClaw Export Format v2 + +/// OpenClaw-compatible export format. This is the bridge between CodexBar +/// and OpenClaw's gateway config. Supports ALL provider types. +public struct OpenClawExport: Codable, Sendable { + public let format: String // Always "openclaw" + public let version: Int // Schema version (2 = multi-provider) + public let timestamp: String // ISO 8601 + public let codexbarVersion: String + public let providers: [String: OpenClawProviderExport] + public let aliases: [String: String] + public let fallbacks: [String] + public let primary: String + public let accounts: [OpenClawAccountExport] + public let allowlist: [String] + public let authProfiles: [String: OpenClawAuthProfileExport] + public let plugins: [String: OpenClawPluginExport] + public let authOrder: [String: [String]] + public let authCooldowns: OpenClawAuthCooldownsExport +} + +public struct OpenClawProviderExport: Codable, Sendable { + public let api: String + public let baseUrl: String + public let apiKey: String? + public let models: [OpenClawModelExport] +} + +public struct OpenClawModelExport: Codable, Sendable { + public let id: String + public let name: String + public let reasoning: Bool + public let input: [String] + public let cost: OpenClawCostExport + public let contextWindow: Int + public let maxTokens: Int +} + +public struct OpenClawCostExport: Codable, Sendable { + public let input: Double + public let output: Double + public let cacheRead: Double + public let cacheWrite: Double +} + +public struct OpenClawAccountExport: Codable, Sendable { + public let email: String + public let accountId: String + public let provider: String +} + +/// Auth profile for injection into OpenClaw's auth-profiles.json +public struct OpenClawAuthProfileExport: Codable, Sendable { + public let type: String // "api_key" | "oauth" | "token" + public let provider: String // "anthropic", "google", "ollama", etc. + public let key: String? // For api_key type + public let mode: String? // For openclaw.json auth section +} + +/// Plugin to enable in OpenClaw +public struct OpenClawPluginExport: Codable, Sendable { + public let enabled: Bool +} + +/// Auth cooldown settings for account rotation +public struct OpenClawAuthCooldownsExport: Codable, Sendable { + public let billingBackoffHours: Double + public let authPermanentBackoffMinutes: Int +} + +// MARK: - Known Cloud Provider Models + +/// Static model catalogs for cloud providers (context windows are known) +private enum CloudModels { + + static let anthropic: [OpenClawModelExport] = [ + makeModel(id: "claude-opus-4-6", name: "Claude Opus 4.6", reasoning: true, input: ["text", "image"], + cost: (0.003, 0.015, 0.0003, 0.0015), ctx: 200_000, maxTok: 32_768), + makeModel(id: "claude-sonnet-4-6", name: "Claude Sonnet 4.6", reasoning: true, input: ["text", "image"], + cost: (0.003, 0.015, 0.0003, 0.0015), ctx: 200_000, maxTok: 16_384), + makeModel(id: "claude-haiku-3-5", name: "Claude Haiku 3.5", reasoning: false, input: ["text", "image"], + cost: (0.0008, 0.004, 0.00008, 0.0004), ctx: 200_000, maxTok: 8_192), + ] + + static let google: [OpenClawModelExport] = [ + makeModel(id: "gemini-2.5-pro", name: "Gemini 2.5 Pro", reasoning: true, input: ["text", "image"], + cost: (0.00125, 0.01, 0.000315, 0.00125), ctx: 1_000_000, maxTok: 65_536), + makeModel(id: "gemini-2.5-flash", name: "Gemini 2.5 Flash", reasoning: false, input: ["text", "image"], + cost: (0.000075, 0.0003, 0.0000225, 0.00009), ctx: 1_000_000, maxTok: 65_536), + makeModel(id: "gemini-3-flash-preview", name: "Gemini 3 Flash", reasoning: false, input: ["text", "image"], + cost: (0.000075, 0.0003, 0.0000225, 0.00009), ctx: 1_000_000, maxTok: 16_384), + ] + + static let codex: [OpenClawModelExport] = [ + makeModel(id: "gpt-5.4", name: "GPT-5.4", reasoning: true, input: ["text", "image"], + cost: (0.006, 0.024, 0.0009, 0.003), ctx: 1_049_000, maxTok: 32_768), + makeModel(id: "gpt-5.2-codex", name: "GPT-5.2 Codex", reasoning: true, input: ["text", "image"], + cost: (0.003, 0.015, 0.0009, 0.003), ctx: 266_000, maxTok: 16_384), + makeModel(id: "gpt-5.3-codex", name: "GPT-5.3 Codex", reasoning: true, input: ["text", "image"], + cost: (0.003, 0.015, 0.0009, 0.003), ctx: 266_000, maxTok: 16_384), + ] + + private static func makeModel( + id: String, name: String, reasoning: Bool, input: [String], + cost: (Double, Double, Double, Double), ctx: Int, maxTok: Int + ) -> OpenClawModelExport { + OpenClawModelExport( + id: id, name: name, reasoning: reasoning, input: input, + cost: OpenClawCostExport(input: cost.0, output: cost.1, cacheRead: cost.2, cacheWrite: cost.3), + contextWindow: ctx, maxTokens: maxTok) + } +} + +// MARK: - Exporter + +/// Builds an OpenClaw-compatible export from CodexBar's current state. +/// Supports ALL provider types: Codex, Claude, Gemini, Ollama, HTTPS LMs. +public final class OpenClawExporter: Sendable { + + public init() {} + + /// Generate a full multi-provider OpenClaw export. + public func export( + ollamaResults: [OllamaLocalProbeResult] = [], + codexAccounts: [CodexAccountInfo] = [], + claudeAPIKey: String? = nil, + geminiAPIKey: String? = nil, + httpsLMEndpoints: [HttpsLMEndpoint] = [], + fallbackOrder: [String]? = nil, + primaryModel: String = "openai-codex/gpt-5.4", + codexbarVersion: String = "0.21" + ) -> OpenClawExport { + + var providers: [String: OpenClawProviderExport] = [:] + var fallbacks: [String] = [] + var accounts: [OpenClawAccountExport] = [] + var authProfiles: [String: OpenClawAuthProfileExport] = [:] + var plugins: [String: OpenClawPluginExport] = [:] + var aliases: [String: String] = ["gpt54": "openai-codex/gpt-5.4"] + + // ─── CODEX ACCOUNTS ─── + for account in codexAccounts { + accounts.append(OpenClawAccountExport( + email: account.email, + accountId: account.accountId, + provider: "openai-codex")) + } + // Codex is built-in — no provider entry needed, just auth stubs + for account in codexAccounts { + let key = "openai-codex:codexbar-\(String(account.accountId.prefix(8)))" + authProfiles[key] = OpenClawAuthProfileExport( + type: "oauth", provider: "openai-codex", key: nil, mode: "oauth") + } + + // ─── CLAUDE / ANTHROPIC ─── + if let apiKey = claudeAPIKey, !apiKey.isEmpty { + // Anthropic is built-in — no provider entry needed + authProfiles["anthropic:default"] = OpenClawAuthProfileExport( + type: "api_key", provider: "anthropic", key: apiKey, mode: "api_key") + plugins["anthropic"] = OpenClawPluginExport(enabled: true) + } + + // ─── GEMINI / GOOGLE ─── + if let apiKey = geminiAPIKey, !apiKey.isEmpty { + // Google is built-in — no provider entry needed + authProfiles["google:default"] = OpenClawAuthProfileExport( + type: "api_key", provider: "google", key: apiKey, mode: "api_key") + } + + // ─── OLLAMA ─── + // Read master fallback order for filtering + let masterPath = FileManager.default.homeDirectoryForCurrentUser + .appendingPathComponent(".codexbar/openclaw-providers.json") + var masterFallbacks: Set = [] + if let data = try? Data(contentsOf: masterPath), + let master = try? JSONDecoder().decode(MasterProviderConfig.self, from: data) + { + for fb in master.fallbackOrder { masterFallbacks.insert(fb) } + if let p = master.primary { masterFallbacks.insert(p) } + } + + for result in ollamaResults where result.isOnline { + let key = ollamaProviderKey(for: result.endpoint) + + let chatModels = result.models.filter { model in + if model.isEmbedding { return false } + if masterFallbacks.isEmpty { return true } + return masterFallbacks.contains("\(key)/\(model.name)") + } + + let models = chatModels.map { model in + OpenClawModelExport( + id: model.name, + name: Self.humanReadableName(for: model), + reasoning: model.isReasoning, + input: ["text"], + cost: OpenClawCostExport(input: 0, output: 0, cacheRead: 0, cacheWrite: 0), + contextWindow: model.contextLength, + maxTokens: 8192) + } + + providers[key] = OpenClawProviderExport( + api: "ollama", baseUrl: result.endpoint.url, apiKey: "ollama-local", models: models) + + authProfiles["\(key):default"] = OpenClawAuthProfileExport( + type: "api_key", provider: key, key: "ollama-local", mode: "api_key") + + plugins["ollama"] = OpenClawPluginExport(enabled: true) + } + + // ─── HTTPS LMs ─── + for endpoint in httpsLMEndpoints where endpoint.isOnline { + providers[endpoint.providerName] = OpenClawProviderExport( + api: "openai-completions", + baseUrl: endpoint.baseUrl, + apiKey: endpoint.apiKey ?? "local", + models: endpoint.models.map { model in + OpenClawModelExport( + id: model.id, name: model.name, reasoning: false, + input: ["text"], + cost: OpenClawCostExport(input: 0, output: 0, cacheRead: 0, cacheWrite: 0), + contextWindow: model.contextWindow, maxTokens: 8192) + }) + + authProfiles["\(endpoint.providerName):default"] = OpenClawAuthProfileExport( + type: "api_key", provider: endpoint.providerName, + key: endpoint.apiKey ?? "local", mode: "api_key") + } + + // ─── FALLBACK CHAIN ─── + if let customOrder = fallbackOrder { + fallbacks = customOrder + } else if let data = try? Data(contentsOf: masterPath), + let master = try? JSONDecoder().decode(MasterProviderConfig.self, from: data), + !master.fallbackOrder.isEmpty + { + fallbacks = master.fallbackOrder + } else { + // Auto-build: codex → claude → gemini → ollama → https + fallbacks = [primaryModel] + if claudeAPIKey != nil { fallbacks.append("anthropic/claude-opus-4-6") } + if geminiAPIKey != nil { fallbacks.append("google/gemini-2.5-pro") } + for (key, prov) in providers { + for model in prov.models { + let ref = "\(key)/\(model.id)" + if !fallbacks.contains(ref) { fallbacks.append(ref) } + } + } + } + + // ─── ALLOWLIST ─── + var allowlist: [String] = [] + allowlist.append(primaryModel) + for fb in fallbacks where fb != primaryModel { allowlist.append(fb) } + + // ─── AUTH ORDER — Codex account rotation ─── + var authOrder: [String: [String]] = [:] + let codexProfileKeys = authProfiles.keys + .filter { $0.hasPrefix("openai-codex:") } + .sorted() + if codexProfileKeys.count > 1 { + authOrder["openai-codex"] = codexProfileKeys + } + + // ─── AUTH COOLDOWNS — fast rotation for multi-account ─── + let authCooldowns = OpenClawAuthCooldownsExport( + billingBackoffHours: 0.08, // ~5 minutes + authPermanentBackoffMinutes: 2) + + return OpenClawExport( + format: "openclaw", version: 2, + timestamp: ISO8601DateFormatter().string(from: Date()), + codexbarVersion: codexbarVersion, + providers: providers, + aliases: aliases, + fallbacks: fallbacks, + primary: primaryModel, + accounts: accounts, + allowlist: allowlist, + authProfiles: authProfiles, + plugins: plugins, + authOrder: authOrder, + authCooldowns: authCooldowns) + } + + /// Convert export to JSON string. + public func exportJSON( + ollamaResults: [OllamaLocalProbeResult] = [], + codexAccounts: [CodexAccountInfo] = [], + claudeAPIKey: String? = nil, + geminiAPIKey: String? = nil, + httpsLMEndpoints: [HttpsLMEndpoint] = [], + fallbackOrder: [String]? = nil, + primaryModel: String = "openai-codex/gpt-5.4", + codexbarVersion: String = "0.21" + ) -> String { + let export = self.export( + ollamaResults: ollamaResults, + codexAccounts: codexAccounts, + claudeAPIKey: claudeAPIKey, + geminiAPIKey: geminiAPIKey, + httpsLMEndpoints: httpsLMEndpoints, + fallbackOrder: fallbackOrder, + primaryModel: primaryModel, + codexbarVersion: codexbarVersion) + + let encoder = JSONEncoder() + encoder.outputFormatting = [.prettyPrinted, .sortedKeys] + guard let data = try? encoder.encode(export) else { return "{}" } + return String(data: data, encoding: .utf8) ?? "{}" + } + + // MARK: - Helpers + + /// Generate a clear, human-readable display name from an Ollama model. + private static func humanReadableName(for model: OllamaLocalModel) -> String { + let id = model.name + let size = model.sizeLabel + + let knownNames: [String: String] = [ + "gemma4:e4b": "Gemma 4 E4B", + "gemma3:27b": "Gemma 3 27B", + "gemma3:12b": "Gemma 3 12B", + "gpt-oss:20b": "GPT-OSS 20B", + "qwen3-coder:30b": "Qwen3 Coder 30B", + "qwen25coder7b:latest": "Qwen 2.5 Coder 7B", + "gemma426b:latest": "Gemma 4 26B", + "gemma3-12b-qat-q4km-local:latest": "Gemma 3 12B Q4KM", + "gemma3-12b-qat-q3kl-local:latest": "Gemma 3 12B Q3KL", + "devstral:24b": "Devstral 24B", + ] + + if let known = knownNames[id] { return "\(known) (\(size))" } + + let base = id + .replacingOccurrences(of: ":latest", with: "") + .replacingOccurrences(of: "-", with: " ") + .replacingOccurrences(of: ":", with: " ") + let capitalized = base.split(separator: " ").map { word in + let w = String(word) + if w.last == "b", let _ = Int(String(w.dropLast())) { return w.uppercased() } + return w.prefix(1).uppercased() + w.dropFirst() + }.joined(separator: " ") + + return "\(capitalized) (\(size))" + } + + private func ollamaProviderKey(for endpoint: OllamaLocalEndpoint) -> String { + switch endpoint.type { + case .local: return "ollama" + case .lan: return "ollama-lan" + case .remote: return "ollama-remote" + } + } +} + +// MARK: - HTTPS LM Endpoint + +/// Discovered HTTPS LM endpoint (LM Studio, vLLM, llama.cpp, etc.) +public struct HttpsLMEndpoint: Sendable { + public let providerName: String + public let baseUrl: String + public let apiKey: String? + public let isOnline: Bool + public let models: [HttpsLMModel] + + public init(providerName: String, baseUrl: String, apiKey: String?, isOnline: Bool, models: [HttpsLMModel]) { + self.providerName = providerName + self.baseUrl = baseUrl + self.apiKey = apiKey + self.isOnline = isOnline + self.models = models + } +} + +public struct HttpsLMModel: Sendable { + public let id: String + public let name: String + public let contextWindow: Int + + public init(id: String, name: String, contextWindow: Int = 32768) { + self.id = id + self.name = name + self.contextWindow = contextWindow + } +} + +// MARK: - Codex Account Info + +public struct CodexAccountInfo: Codable, Sendable { + public let email: String + public let accountId: String + + public init(email: String, accountId: String) { + self.email = email + self.accountId = accountId + } + + /// Read ALL Codex accounts — both CodexBar managed homes and system Codex auth. + public static func loadManagedAccounts() -> [CodexAccountInfo] { + var accounts: [CodexAccountInfo] = [] + var seenAccountIds: Set = [] + + let managedPath = FileManager.default.homeDirectoryForCurrentUser + .appendingPathComponent("Library/Application Support/CodexBar/managed-codex-accounts.json") + if let data = try? Data(contentsOf: managedPath), + let wrapper = try? JSONDecoder().decode(ManagedAccountSet.self, from: data) + { + for acct in wrapper.accounts { + accounts.append(CodexAccountInfo(email: acct.email, accountId: acct.providerAccountID)) + seenAccountIds.insert(acct.providerAccountID) + } + } + + let codexAuthPath = FileManager.default.homeDirectoryForCurrentUser + .appendingPathComponent(".codex/auth.json") + if let data = try? Data(contentsOf: codexAuthPath), + let auth = try? JSONDecoder().decode(CodexCLIAuth.self, from: data), + let accountId = auth.tokens?.account_id, + !accountId.isEmpty, + !seenAccountIds.contains(accountId) + { + let email: String + if let idToken = auth.tokens?.id_token { + let parts = idToken.split(separator: ".") + if parts.count >= 2, + let decoded = Data(base64Encoded: String(parts[1]) + "=="), + let jwt = try? JSONSerialization.jsonObject(with: decoded) as? [String: Any], + let jwtEmail = jwt["email"] as? String + { email = jwtEmail } else { email = "codex-default" } + } else { email = "codex-default" } + accounts.append(CodexAccountInfo(email: email, accountId: accountId)) + } + + return accounts + } +} + +// MARK: - Supporting Decodable Types + +private struct ManagedAccountSet: Codable { + let version: Int + let accounts: [ManagedAccount] +} + +private struct ManagedAccount: Codable { + let email: String + let providerAccountID: String +} + +private struct CodexCLIAuth: Codable { + let tokens: CodexCLITokens? +} + +private struct CodexCLITokens: Codable { + let account_id: String? + let id_token: String? +} + +private struct MasterProviderConfig: Codable { + let fallbackOrder: [String] + let primary: String? +} diff --git a/Sources/CodexBarCore/OpenClawIntegration/OpenClawGatewayClient.swift b/Sources/CodexBarCore/OpenClawIntegration/OpenClawGatewayClient.swift new file mode 100644 index 000000000..e9496ce2e --- /dev/null +++ b/Sources/CodexBarCore/OpenClawIntegration/OpenClawGatewayClient.swift @@ -0,0 +1,443 @@ +import Foundation + +// MARK: - OpenClaw Gateway WebSocket Client + +/// Authenticated WebSocket client for OpenClaw's gateway JSON-RPC API. +/// +/// Replaces insecure file-write injection with proper authenticated RPC: +/// - Reads gateway token from `~/.openclaw/gateway.token` +/// - Connects via `ws://127.0.0.1:{port}` +/// - Sends `config.get` / `config.patch` JSON-RPC messages +/// - Handles responses with timeout +/// +/// Security: Token-authenticated, no file writes, no shell scripts, no kill -9. +public final class OpenClawGatewayClient: Sendable { + + // MARK: - Error Types + + public enum GatewayError: Error, LocalizedError, Sendable { + case tokenNotFound(path: String) + case tokenUnreadable(path: String) + case connectionFailed(port: Int, underlying: String) + case timeout(seconds: Int) + case disconnected + case rpcError(method: String, message: String) + case invalidResponse(String) + case notConnected + + public var errorDescription: String? { + switch self { + case .tokenNotFound(let path): + return "Gateway token not found at \(path)" + case .tokenUnreadable(let path): + return "Gateway token unreadable at \(path) — check permissions (should be 0600)" + case .connectionFailed(let port, let underlying): + return "Failed to connect to gateway on port \(port): \(underlying)" + case .timeout(let seconds): + return "Gateway request timed out after \(seconds)s" + case .disconnected: + return "WebSocket disconnected unexpectedly" + case .rpcError(let method, let message): + return "RPC error on \(method): \(message)" + case .invalidResponse(let detail): + return "Invalid gateway response: \(detail)" + case .notConnected: + return "Not connected to gateway — call connect() first" + } + } + } + + // MARK: - Result Types + + /// Result of a config.get call. + public struct ConfigSnapshot: Sendable { + public let raw: String // Full JSON config + public let baseHash: String // SHA for optimistic concurrency + } + + /// Result of a config.patch call. + public struct PatchResult: Sendable { + public let ok: Bool + public let newHash: String? + } + + // MARK: - Internal State Actor + + /// Actor-isolated mutable state for pending RPC requests. + /// Uses raw JSON Data to stay Sendable across isolation boundaries. + private actor RequestState { + var pendingRequests: [String: CheckedContinuation] = [:] + + func register(id: String, continuation: CheckedContinuation) { + pendingRequests[id] = continuation + } + + func complete(id: String, data: Data) { + guard let continuation = pendingRequests.removeValue(forKey: id) else { return } + continuation.resume(returning: data) + } + + func fail(id: String, error: Error) { + guard let continuation = pendingRequests.removeValue(forKey: id) else { return } + continuation.resume(throwing: error) + } + + func cancelAll(error: Error) { + let pending = pendingRequests + pendingRequests.removeAll() + for (_, continuation) in pending { + continuation.resume(throwing: error) + } + } + } + + // MARK: - Properties + + private let port: Int + private let timeoutSeconds: Int + private let session: URLSession + private let state = RequestState() + + // Non-isolated mutable state — only accessed from connect/disconnect + // which are expected to be called sequentially (not concurrently). + nonisolated(unsafe) private var webSocketTask: URLSessionWebSocketTask? + + // MARK: - Init + + /// Create a client targeting a specific gateway port. + /// - Parameters: + /// - port: Gateway port (default 18789) + /// - timeoutSeconds: Request timeout (default 30) + public init(port: Int = 18789, timeoutSeconds: Int = 30) { + self.port = port + self.timeoutSeconds = timeoutSeconds + let config = URLSessionConfiguration.ephemeral + config.timeoutIntervalForRequest = TimeInterval(timeoutSeconds) + self.session = URLSession(configuration: config) + } + + // MARK: - Token Reading + + /// Read the gateway authentication token from disk. + /// The token file is at `~/.openclaw/gateway.token` (48 bytes, 0600 permissions). + public static func readGatewayToken() throws -> String { + let tokenPath = FileManager.default.homeDirectoryForCurrentUser + .appendingPathComponent(".openclaw/gateway.token") + + guard FileManager.default.fileExists(atPath: tokenPath.path) else { + throw GatewayError.tokenNotFound(path: tokenPath.path) + } + + guard let data = FileManager.default.contents(atPath: tokenPath.path), + let token = String(data: data, encoding: .utf8)?.trimmingCharacters(in: .whitespacesAndNewlines), + !token.isEmpty + else { + throw GatewayError.tokenUnreadable(path: tokenPath.path) + } + + return token + } + + // MARK: - Auth Modes + + /// Gateway authentication mode — supports all OpenClaw auth options. + public enum AuthMode: Sendable { + case token // Auto-read from ~/.openclaw/gateway.token + case tokenValue(String) // Explicit token value + case password(String) // Password-based auth + } + + // MARK: - Connection + + /// Connect to the gateway WebSocket with authentication. + /// Supports token (auto-read or explicit) and password auth modes. + /// - Parameter authMode: How to authenticate (default: auto-read token from disk) + public func connect(authMode: AuthMode = .token) async throws { + let authParam: String + switch authMode { + case .token: + let token = try Self.readGatewayToken() + authParam = "token=\(token)" + case .tokenValue(let token): + authParam = "token=\(token)" + case .password(let password): + authParam = "password=\(password)" + } + + // Gateway accepts auth via query parameter for WebSocket connections + guard let url = URL(string: "ws://127.0.0.1:\(port)?\(authParam)") else { + throw GatewayError.connectionFailed(port: port, underlying: "Invalid URL") + } + + let task = session.webSocketTask(with: url) + self.webSocketTask = task + task.resume() + + // Start the receive loop + Task { [weak self] in + await self?.receiveLoop() + } + + // Verify connection with a small delay to let the handshake complete + try await Task.sleep(nanoseconds: 200_000_000) // 200ms + + // Verify the connection is open by checking state + guard task.state == .running else { + throw GatewayError.connectionFailed(port: port, underlying: "WebSocket handshake failed") + } + } + + /// Disconnect from the gateway. + public func disconnect() async { + webSocketTask?.cancel(with: .normalClosure, reason: nil) + webSocketTask = nil + await state.cancelAll(error: GatewayError.disconnected) + } + + // MARK: - RPC Methods + + /// Fetch the current gateway config and its base hash. + /// The base hash is required for `configPatch` (optimistic concurrency). + public func configGet() async throws -> ConfigSnapshot { + let responseData = try await sendRPC(method: "config.get", params: [:]) + let response = try parseResponse(responseData) + + guard let payload = response["payload"] as? [String: Any], + let raw = payload["raw"] as? String, + let baseHash = payload["baseHash"] as? String + else { + throw GatewayError.invalidResponse("config.get payload missing raw or baseHash") + } + + return ConfigSnapshot(raw: raw, baseHash: baseHash) + } + + /// Apply a merge-patch to the gateway config. + /// - Parameters: + /// - patch: JSON string containing the merge-patch to apply + /// - baseHash: The base hash from a prior `configGet()` call + /// - Returns: Result indicating success and the new config hash + public func configPatch(patch: String, baseHash: String) async throws -> PatchResult { + let params: [String: Any] = [ + "raw": patch, + "baseHash": baseHash, + ] + let responseData = try await sendRPC(method: "config.patch", params: params) + let response = try parseResponse(responseData) + + let ok = response["ok"] as? Bool ?? false + if !ok { + let msg = response["error"] as? String ?? "unknown error" + throw GatewayError.rpcError(method: "config.patch", message: msg) + } + + let newHash = (response["payload"] as? [String: Any])?["baseHash"] as? String + return PatchResult(ok: true, newHash: newHash) + } + + private func parseResponse(_ data: Data) throws -> [String: Any] { + guard let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any] else { + throw GatewayError.invalidResponse("Could not parse response JSON") + } + return json + } + + // MARK: - Internal RPC + + private func sendRPC(method: String, params: [String: Any]) async throws -> Data { + guard let task = webSocketTask, task.state == .running else { + throw GatewayError.notConnected + } + + let requestId = UUID().uuidString + + let message: [String: Any] = [ + "type": "request", + "id": requestId, + "method": method, + "params": params, + ] + + guard let jsonData = try? JSONSerialization.data(withJSONObject: message), + let jsonString = String(data: jsonData, encoding: .utf8) + else { + throw GatewayError.invalidResponse("Failed to serialize RPC request") + } + + // Register continuation before sending, with per-RPC timeout to prevent + // hanging indefinitely if the gateway never replies. + return try await withThrowingTaskGroup(of: Data.self) { group in + group.addTask { + try await withCheckedThrowingContinuation { continuation in + Task { + await self.state.register(id: requestId, continuation: continuation) + do { + try await task.send(.string(jsonString)) + } catch { + await self.state.fail( + id: requestId, + error: GatewayError.connectionFailed( + port: self.port, underlying: error.localizedDescription)) + } + } + } + } + group.addTask { + try await Task.sleep(nanoseconds: UInt64(self.timeoutSeconds) * 1_000_000_000) + throw GatewayError.timeout(seconds: self.timeoutSeconds) + } + // First to complete wins — either the response or the timeout + let result = try await group.next()! + group.cancelAll() + return result + } + } + + private func receiveLoop() async { + guard let task = webSocketTask else { return } + + while task.state == .running { + do { + let message = try await task.receive() + switch message { + case .string(let text): + await handleMessage(text) + case .data(let data): + if let text = String(data: data, encoding: .utf8) { + await handleMessage(text) + } + @unknown default: + break + } + } catch { + // Connection closed or error — cancel all pending + await state.cancelAll(error: GatewayError.disconnected) + return + } + } + } + + private func handleMessage(_ text: String) async { + guard let data = text.data(using: .utf8) else { return } + + // Parse just enough to extract the request ID for routing + guard let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any], + let type = json["type"] as? String, type == "response", + let id = json["id"] as? String + else { return } + + // Pass the raw Data through — deserialization happens on the caller side + await state.complete(id: id, data: data) + } +} + +// MARK: - Config Patch Builder + +/// Builds a JSON merge-patch from an OpenClawExport for use with config.patch. +/// Only touches LM-related keys — preserves everything else in the config. +public enum OpenClawPatchBuilder { + + /// Convert an OpenClawExport into a JSON merge-patch string. + /// The patch only includes: models.providers, agents.defaults.model, + /// auth.profiles, auth.order, auth.cooldowns, plugins.entries. + public static func buildPatch(from export: OpenClawExport) -> String { + var patch: [String: Any] = [:] + + // models.providers + if !export.providers.isEmpty { + var providersDict: [String: Any] = [:] + for (key, provider) in export.providers { + var modelsArray: [[String: Any]] = [] + for model in provider.models { + modelsArray.append([ + "id": model.id, + "name": model.name, + "reasoning": model.reasoning, + "input": model.input, + "cost": [ + "input": model.cost.input, + "output": model.cost.output, + "cacheRead": model.cost.cacheRead, + "cacheWrite": model.cost.cacheWrite, + ], + "contextWindow": model.contextWindow, + "maxTokens": model.maxTokens, + ]) + } + var providerDict: [String: Any] = [ + "api": provider.api, + "baseUrl": provider.baseUrl, + "models": modelsArray, + ] + if let apiKey = provider.apiKey { + providerDict["apiKey"] = apiKey + } + providersDict[key] = providerDict + } + patch["models"] = ["providers": providersDict] + } + + // agents.defaults.model + patch["agents"] = [ + "defaults": [ + "model": [ + "primary": export.primary, + "fallbacks": export.fallbacks, + ] as [String: Any], + ] as [String: Any], + ] + + // auth — profiles, order, cooldowns + var authDict: [String: Any] = [:] + + if !export.authProfiles.isEmpty { + var profilesDict: [String: Any] = [:] + for (key, profile) in export.authProfiles { + var p: [String: Any] = [ + "provider": profile.provider, + ] + let modeValue: String = profile.mode ?? profile.type + p["mode"] = modeValue + // Include API key if present — needed for Ollama and other + // API-key-based providers to actually authenticate requests. + if let apiKey = profile.key, !apiKey.isEmpty { + p["key"] = apiKey + } + if let accountId = profile.accountId, !accountId.isEmpty { + p["accountId"] = accountId + } + profilesDict[key] = p + } + authDict["profiles"] = profilesDict + } + + if !export.authOrder.isEmpty { + authDict["order"] = export.authOrder + } + + authDict["cooldowns"] = [ + "billingBackoffHours": export.authCooldowns.billingBackoffHours, + "authPermanentBackoffMinutes": export.authCooldowns.authPermanentBackoffMinutes, + ] + + patch["auth"] = authDict + + // plugins.entries + if !export.plugins.isEmpty { + var entriesDict: [String: Any] = [:] + for (key, plugin) in export.plugins { + entriesDict[key] = ["enabled": plugin.enabled] + } + patch["plugins"] = ["entries": entriesDict] + } + + // Serialize + guard let data = try? JSONSerialization.data( + withJSONObject: patch, + options: [.sortedKeys]), + let json = String(data: data, encoding: .utf8) + else { return "{}" } + + return json + } +} diff --git a/Sources/CodexBarCore/OpenClawIntegration/OpenClawPairing.swift b/Sources/CodexBarCore/OpenClawIntegration/OpenClawPairing.swift new file mode 100644 index 000000000..0bbf67a89 --- /dev/null +++ b/Sources/CodexBarCore/OpenClawIntegration/OpenClawPairing.swift @@ -0,0 +1,179 @@ +import CryptoKit +import Foundation +import Security + +// MARK: - OpenClaw Gateway Pairing + +/// Manages pairing state between CodexBar and an OpenClaw gateway. +/// +/// On first inject: reads the gateway token, verifies it, and stores +/// the pairing info (port, token hash, timestamp) in the Keychain. +/// Subsequent injects use the stored pairing for faster connection. +/// +/// Security: Only a SHA-256 hash of the token is stored in Keychain — +/// the actual token is always read fresh from disk at connect time. +public final class OpenClawPairing: Sendable { + + // MARK: - Types + + public struct PairingInfo: Codable, Sendable { + public let port: Int + public let tokenHash: String // SHA-256 of gateway token + public let pairedAt: Date + public let gatewayVersion: String? + + public init(port: Int, tokenHash: String, pairedAt: Date = Date(), gatewayVersion: String? = nil) { + self.port = port + self.tokenHash = tokenHash + self.pairedAt = pairedAt + self.gatewayVersion = gatewayVersion + } + } + + public enum PairingError: Error, LocalizedError, Sendable { + case keychainWriteFailed(OSStatus) + case keychainReadFailed(OSStatus) + case tokenChanged + case notPaired + + public var errorDescription: String? { + switch self { + case .keychainWriteFailed(let status): + return "Failed to save pairing to Keychain (status \(status))" + case .keychainReadFailed(let status): + return "Failed to read pairing from Keychain (status \(status))" + case .tokenChanged: + return "Gateway token has changed since last pairing — re-pair required" + case .notPaired: + return "No existing pairing found" + } + } + } + + // MARK: - Constants + + private static let keychainService = "com.codexbar.openclaw-pairing" + private static let keychainAccount = "gateway-pairing" + + // MARK: - Public API + + /// Check if a valid pairing exists and the token hasn't changed. + public static func loadPairing() -> PairingInfo? { + guard let data = readFromKeychain() else { return nil } + return try? JSONDecoder().decode(PairingInfo.self, from: data) + } + + /// Verify the stored pairing against the current gateway token. + /// Returns the pairing if valid, nil if token changed or no pairing exists. + public static func verifyPairing() -> PairingInfo? { + guard let pairing = loadPairing() else { return nil } + + // Read current token and compare hash + guard let currentToken = try? OpenClawGatewayClient.readGatewayToken() else { + return nil + } + + let currentHash = sha256Hex(currentToken) + guard currentHash == pairing.tokenHash else { + // Token changed — pairing is stale + return nil + } + + return pairing + } + + /// Create or update pairing for the given port. + /// Reads the current gateway token and stores a hash in Keychain. + @discardableResult + public static func pair(port: Int, gatewayVersion: String? = nil) throws -> PairingInfo { + let token = try OpenClawGatewayClient.readGatewayToken() + let hash = sha256Hex(token) + + let info = PairingInfo( + port: port, + tokenHash: hash, + pairedAt: Date(), + gatewayVersion: gatewayVersion) + + let data = try JSONEncoder().encode(info) + try saveToKeychain(data: data) + + return info + } + + /// Remove stored pairing from Keychain. + public static func unpair() { + deleteFromKeychain() + } + + // MARK: - Auto-Pair + + /// Auto-pair on first inject: verify gateway token is readable, then store pairing. + /// If already paired and token matches, returns existing pairing. + /// If not paired or token changed, creates a new pairing. + public static func ensurePaired(port: Int) throws -> PairingInfo { + // Check existing pairing + if let existing = verifyPairing(), existing.port == port { + return existing + } + + // New pairing needed + return try pair(port: port) + } + + // MARK: - Keychain Helpers + + private static func readFromKeychain() -> Data? { + let query: [String: Any] = [ + kSecClass as String: kSecClassGenericPassword, + kSecAttrService as String: keychainService, + kSecAttrAccount as String: keychainAccount, + kSecReturnData as String: true, + kSecMatchLimit as String: kSecMatchLimitOne, + ] + + var result: AnyObject? + let status = SecItemCopyMatching(query as CFDictionary, &result) + + guard status == errSecSuccess, let data = result as? Data else { + return nil + } + + return data + } + + private static func saveToKeychain(data: Data) throws { + // Delete existing first + deleteFromKeychain() + + let query: [String: Any] = [ + kSecClass as String: kSecClassGenericPassword, + kSecAttrService as String: keychainService, + kSecAttrAccount as String: keychainAccount, + kSecValueData as String: data, + kSecAttrAccessible as String: kSecAttrAccessibleWhenUnlockedThisDeviceOnly, + ] + + let status = SecItemAdd(query as CFDictionary, nil) + guard status == errSecSuccess else { + throw PairingError.keychainWriteFailed(status) + } + } + + private static func deleteFromKeychain() { + let query: [String: Any] = [ + kSecClass as String: kSecClassGenericPassword, + kSecAttrService as String: keychainService, + kSecAttrAccount as String: keychainAccount, + ] + SecItemDelete(query as CFDictionary) + } + + // MARK: - Hashing + + private static func sha256Hex(_ string: String) -> String { + guard let data = string.data(using: .utf8) else { return "" } + let digest = SHA256.hash(data: data) + return digest.map { String(format: "%02x", $0) }.joined() + } +} diff --git a/Sources/CodexBarCore/Providers/HttpsLM/HttpsLMProviderDescriptor.swift b/Sources/CodexBarCore/Providers/HttpsLM/HttpsLMProviderDescriptor.swift new file mode 100644 index 000000000..1605ad513 --- /dev/null +++ b/Sources/CodexBarCore/Providers/HttpsLM/HttpsLMProviderDescriptor.swift @@ -0,0 +1,119 @@ +import Foundation + +public enum HttpsLMProviderDescriptor { + public static let descriptor = ProviderDescriptor( + id: .httpsLM, + metadata: ProviderMetadata( + id: .httpsLM, + displayName: "HTTPS LM", + sessionLabel: "Models", + weeklyLabel: "Local", + opusLabel: nil, + supportsOpus: false, + supportsCredits: false, + creditsHint: "", + toggleTitle: "Show HTTPS-accessible local LMs (llama.cpp, vLLM, LM Studio)", + cliName: "https-lm", + defaultEnabled: false, + isPrimaryProvider: false, + usesAccountFallback: false, + browserCookieOrder: nil, + dashboardURL: nil, + statusPageURL: nil, + statusLinkURL: nil), + branding: ProviderBranding( + iconStyle: .httpsLM, + iconResourceName: "ProviderIcon-ollama", + color: ProviderColor(red: 147 / 255, green: 51 / 255, blue: 234 / 255)), + tokenCost: ProviderTokenCostConfig( + supportsTokenCost: false, + noDataMessage: { "HTTPS LM is self-hosted — no cost tracking." }), + fetchPlan: ProviderFetchPlan( + sourceModes: [.auto, .api], + pipeline: ProviderFetchPipeline(resolveStrategies: { _ in + [HttpsLMFetchStrategy()] + })), + cli: ProviderCLIConfig( + name: "https-lm", + versionDetector: nil)) +} + +/// Probes an OpenAI-compatible /v1/models endpoint. +struct HttpsLMFetchStrategy: ProviderFetchStrategy { + let id: String = "https-lm.api" + let kind: ProviderFetchKind = .localProbe + + func isAvailable(_ context: ProviderFetchContext) async -> Bool { + true + } + + func fetch(_ context: ProviderFetchContext) async throws -> ProviderFetchResult { + let baseURL = context.env["CODEXBAR_HTTPS_LM_URL"] ?? "" + + var modelCount = 0 + var isOnline = false + + if !baseURL.isEmpty { + do { + let url = URL(string: "\(baseURL.hasSuffix("/") ? String(baseURL.dropLast()) : baseURL)/v1/models")! + var request = URLRequest(url: url, timeoutInterval: 5) + request.httpMethod = "GET" + + if let apiKey = context.env["CODEXBAR_HTTPS_LM_API_KEY"], !apiKey.isEmpty { + request.setValue("Bearer \(apiKey)", forHTTPHeaderField: "Authorization") + } + + let config = URLSessionConfiguration.ephemeral + config.timeoutIntervalForRequest = 5 + let session = URLSession(configuration: config) + let (data, response) = try await session.data(for: request) + + if let httpResponse = response as? HTTPURLResponse, httpResponse.statusCode == 200 { + if let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any], + let models = json["data"] as? [[String: Any]] + { + modelCount = models.count + isOnline = true + } + } + } catch { + if context.verbose { + CodexBarLog.logger(LogCategories.httpsLM) + .verbose("HTTPS LM offline: \(baseURL) — \(error.localizedDescription)") + } + } + } + + let usage = UsageSnapshot( + primary: RateWindow( + usedPercent: isOnline ? 100.0 : 0.0, + windowMinutes: nil, + resetsAt: nil, + resetDescription: isOnline ? "\(modelCount) models via /v1/models" : (baseURL.isEmpty ? "Not configured" : "Offline")), + secondary: nil, + tertiary: nil, + providerCost: nil, + zaiUsage: nil, + minimaxUsage: nil, + openRouterUsage: nil, + cursorRequests: nil, + updatedAt: Date(), + identity: ProviderIdentitySnapshot( + providerID: .httpsLM, + accountEmail: baseURL.isEmpty ? nil : baseURL, + accountOrganization: nil, + loginMethod: nil)) + + return ProviderFetchResult( + usage: usage, + credits: nil, + dashboard: nil, + sourceLabel: "https-api", + strategyID: self.id, + strategyKind: self.kind) + } + + func shouldFallback(on _: Error, context _: ProviderFetchContext) -> Bool { + false + } +} diff --git a/Sources/CodexBarCore/Providers/OllamaCloud/OllamaCloudProviderDescriptor.swift b/Sources/CodexBarCore/Providers/OllamaCloud/OllamaCloudProviderDescriptor.swift new file mode 100644 index 000000000..435bd9b2a --- /dev/null +++ b/Sources/CodexBarCore/Providers/OllamaCloud/OllamaCloudProviderDescriptor.swift @@ -0,0 +1,82 @@ +import Foundation + +public enum OllamaCloudProviderDescriptor { + public static let descriptor = ProviderDescriptor( + id: .ollamaCloud, + metadata: ProviderMetadata( + id: .ollamaCloud, + displayName: "Ollama Cloud", + sessionLabel: "Models", + weeklyLabel: "Cloud", + opusLabel: nil, + supportsOpus: false, + supportsCredits: false, + creditsHint: "", + toggleTitle: "Show Ollama cloud models", + cliName: "ollama-cloud", + defaultEnabled: false, + isPrimaryProvider: false, + usesAccountFallback: false, + browserCookieOrder: nil, + dashboardURL: "https://ollama.com/dashboard", + statusPageURL: nil, + statusLinkURL: "https://ollama.com"), + branding: ProviderBranding( + iconStyle: .ollamaCloud, + iconResourceName: "ProviderIcon-ollama", + color: ProviderColor(red: 59 / 255, green: 130 / 255, blue: 246 / 255)), + tokenCost: ProviderTokenCostConfig( + supportsTokenCost: false, + noDataMessage: { "Ollama cloud usage not tracked here." }), + fetchPlan: ProviderFetchPlan( + sourceModes: [.auto, .api], + pipeline: ProviderFetchPipeline(resolveStrategies: { _ in + [OllamaCloudFetchStrategy()] + })), + cli: ProviderCLIConfig( + name: "ollama-cloud", + versionDetector: nil)) +} + +struct OllamaCloudFetchStrategy: ProviderFetchStrategy { + let id: String = "ollama-cloud.api" + let kind: ProviderFetchKind = .apiToken + + func isAvailable(_ context: ProviderFetchContext) async -> Bool { + true + } + + func fetch(_ context: ProviderFetchContext) async throws -> ProviderFetchResult { + let usage = UsageSnapshot( + primary: RateWindow( + usedPercent: 0, + windowMinutes: nil, + resetsAt: nil, + resetDescription: "Cloud models — sign in at ollama.com"), + secondary: nil, + tertiary: nil, + providerCost: nil, + zaiUsage: nil, + minimaxUsage: nil, + openRouterUsage: nil, + cursorRequests: nil, + updatedAt: Date(), + identity: ProviderIdentitySnapshot( + providerID: .ollamaCloud, + accountEmail: nil, + accountOrganization: nil, + loginMethod: nil)) + + return ProviderFetchResult( + usage: usage, + credits: nil, + dashboard: nil, + sourceLabel: "cloud-api", + strategyID: self.id, + strategyKind: self.kind) + } + + func shouldFallback(on _: Error, context _: ProviderFetchContext) -> Bool { + false + } +} diff --git a/Sources/CodexBarCore/Providers/OllamaLAN/OllamaLANProviderDescriptor.swift b/Sources/CodexBarCore/Providers/OllamaLAN/OllamaLANProviderDescriptor.swift new file mode 100644 index 000000000..385c653d4 --- /dev/null +++ b/Sources/CodexBarCore/Providers/OllamaLAN/OllamaLANProviderDescriptor.swift @@ -0,0 +1,104 @@ +import Foundation + +public enum OllamaLANProviderDescriptor { + public static let descriptor = ProviderDescriptor( + id: .ollamaLAN, + metadata: ProviderMetadata( + id: .ollamaLAN, + displayName: "Ollama LAN", + sessionLabel: "Models", + weeklyLabel: "LAN", + opusLabel: nil, + supportsOpus: false, + supportsCredits: false, + creditsHint: "", + toggleTitle: "Show Ollama LAN models (Tailscale/HTTPS)", + cliName: "ollama-lan", + defaultEnabled: false, + isPrimaryProvider: false, + usesAccountFallback: false, + browserCookieOrder: nil, + dashboardURL: nil, + statusPageURL: nil, + statusLinkURL: nil), + branding: ProviderBranding( + iconStyle: .ollamaLAN, + iconResourceName: "ProviderIcon-ollama", + color: ProviderColor(red: 249 / 255, green: 115 / 255, blue: 22 / 255)), + tokenCost: ProviderTokenCostConfig( + supportsTokenCost: false, + noDataMessage: { "Ollama LAN is free — no cost tracking." }), + fetchPlan: ProviderFetchPlan( + sourceModes: [.auto, .api], + pipeline: ProviderFetchPipeline(resolveStrategies: { _ in + [OllamaLANFetchStrategy()] + })), + cli: ProviderCLIConfig( + name: "ollama-lan", + versionDetector: nil)) +} + +struct OllamaLANFetchStrategy: ProviderFetchStrategy { + let id: String = "ollama-lan.api" + let kind: ProviderFetchKind = .localProbe + + func isAvailable(_ context: ProviderFetchContext) async -> Bool { + true + } + + func fetch(_ context: ProviderFetchContext) async throws -> ProviderFetchResult { + // Read LAN endpoint URL from environment or config + let endpointURL = context.env["CODEXBAR_OLLAMA_LAN_URL"] ?? "http://100.64.0.5:11434" + + let fetcher = OllamaLocalFetcher() + let endpoint = OllamaLocalEndpoint(url: endpointURL, label: "LAN", type: .lan) + + var modelCount = 0 + var versionString: String? + var isOnline = false + + do { + let result = try await fetcher.probe(endpoint: endpoint) + modelCount = result.models.count + versionString = result.version + isOnline = true + } catch { + if context.verbose { + CodexBarLog.logger(LogCategories.ollamaLAN) + .verbose("LAN endpoint offline: \(endpointURL) — \(error.localizedDescription)") + } + } + + let usage = UsageSnapshot( + primary: RateWindow( + usedPercent: isOnline ? 100.0 : 0.0, + windowMinutes: nil, + resetsAt: nil, + resetDescription: isOnline ? "\(modelCount) models available" : "Offline — \(endpointURL)"), + secondary: nil, + tertiary: nil, + providerCost: nil, + zaiUsage: nil, + minimaxUsage: nil, + openRouterUsage: nil, + cursorRequests: nil, + updatedAt: Date(), + identity: ProviderIdentitySnapshot( + providerID: .ollamaLAN, + accountEmail: endpointURL, + accountOrganization: versionString.map { "Ollama v\($0)" }, + loginMethod: nil)) + + return ProviderFetchResult( + usage: usage, + credits: nil, + dashboard: nil, + sourceLabel: "lan-api", + strategyID: self.id, + strategyKind: self.kind) + } + + func shouldFallback(on _: Error, context _: ProviderFetchContext) -> Bool { + false + } +} diff --git a/docs/CODEXBAR-SETUP-GUIDE.md b/docs/CODEXBAR-SETUP-GUIDE.md new file mode 100644 index 000000000..a86335e0b --- /dev/null +++ b/docs/CODEXBAR-SETUP-GUIDE.md @@ -0,0 +1,210 @@ +# CodexBar + OpenClaw Setup Guide + +## What is CodexBar? + +CodexBar is an external LM (Language Model) provider manager for OpenClaw. It lets you: +- Manage multiple Codex, Claude, Gemini, and Ollama accounts in one place +- Set up fallback chains with drag-to-reorder +- Auto-rotate through accounts when one hits rate limits +- Inject your LM configuration into OpenClaw with one click +- Monitor Ollama models, VRAM usage, and cold-start status + +## New Users — Setting Up from Scratch + +### Step 1: Install OpenClaw + +```bash +npm install -g openclaw@latest +``` + +### Step 2: Run Onboarding + +```bash +openclaw onboard +``` + +When prompted for a model/auth provider, select **CodexBar (External LM Manager)**. This skips LM configuration — CodexBar will handle it after setup. + +Complete the rest of onboarding normally (workspace, channels, etc.). + +### Step 3: Note Your Gateway Token + +After onboarding, your gateway token is stored at: +``` +~/.openclaw/gateway.token +``` + +You'll need this to connect CodexBar. The token is also shown during `openclaw gateway run`. + +### Step 4: Start the Gateway + +```bash +openclaw gateway run +``` + +### Step 5: Install CodexBar + +Download from the CodexBar releases page or build from source: +```bash +git clone +cd CodexBar-openclaw/CodexBar +swift build +# Install the app +``` + +### Step 6: Configure CodexBar + +1. Open CodexBar from your menu bar +2. Sign into your LM providers (Codex accounts, Claude, Gemini, etc.) +3. Connect local Ollama if running + +### Step 7: Connect CodexBar to OpenClaw + +1. Open CodexBar → Preferences → **LM Hub** tab +2. Click **Inject to OpenClaw** +3. CodexBar reads your gateway token automatically +4. Selects the running gateway (port 18789 by default) +5. Injects your LM configuration via secure WebSocket API +6. Gateway restarts with your new config + +### Step 8: Verify + +Open your browser to `http://127.0.0.1:18789` and send a message. Your configured LM providers should respond. + +--- + +## Existing Users — Adding CodexBar to Running OpenClaw + +If you already have OpenClaw running with providers configured: + +### Step 1: Install CodexBar + +(Same as Step 5 above) + +### Step 2: Sign into Providers in CodexBar + +Add your Codex accounts, Ollama endpoints, etc. + +### Step 3: Connect to OpenClaw + +1. Open CodexBar → Preferences → **LM Hub** tab +2. CodexBar discovers running gateways on your machine +3. Click **Inject to OpenClaw** +4. CodexBar connects as a device using your gateway token +5. Your existing config is preserved — only LM sections are updated + +### What Gets Updated + +CodexBar only modifies these sections of your OpenClaw config: +- `models.providers` — your LM providers and models +- `agents.defaults.model` — fallback chain +- `auth.profiles` — provider auth (tokens, API keys) +- `auth.order` — account rotation order +- `plugins.entries.ollama` — Ollama plugin enablement + +### What's Preserved + +Everything else stays untouched: +- Gateway settings, channels, plugins (Discord, iMessage, etc.) +- Workspace, tools, hooks, skills +- Memory, dreaming, browser config +- MCP servers, custom agents + +--- + +## Gateway Authentication + +CodexBar supports all OpenClaw gateway auth modes: + +### Token Auth (Default) +``` +Gateway reads token from ~/.openclaw/gateway.token +CodexBar reads the same file automatically +``` + +### Password Auth +If your gateway uses password auth: +1. Open CodexBar → LM Hub → Gateway Settings +2. Enter the gateway password +3. CodexBar connects with password instead of token + +### Tailscale Auth +For remote gateways on Tailscale: +1. Configure gateway with `gateway.bind: tailnet` +2. CodexBar connects via Tailscale IP +3. Auth uses Tailscale identity verification + +--- + +## How Fallback Works + +CodexBar configures your fallback chain as: + +``` +Primary Model (e.g. Codex gpt-5.4) + → Account 1 (user1@example.com) — tries first + → Account 2 (user2@example.com) — if account 1 at quota + → Account 3 (user3@example.com) — if account 2 at quota + → Account 4 ... — continues through all accounts + ↓ ALL accounts exhausted +Local Fallback (e.g. Ollama gemma4:e4b) + → Runs locally, no API needed, always available +``` + +**Key concepts:** +- Account rotation happens WITHIN a provider (not separate fallback entries) +- The fallback chain lists DIFFERENT providers (Codex → Claude → Gemini → Ollama) +- You can reorder providers and accounts in CodexBar's LM Hub + +--- + +## Security + +### How Injection Works (Secure) + +``` +CodexBar → reads gateway.token → connects via WebSocket +→ authenticates with token → sends config.patch RPC +→ gateway validates, applies, restarts itself +``` + +- **No file writes** — config changes go through the gateway API +- **No process killing** — gateway restarts itself via SIGUSR1 +- **Token required** — can't inject without the gateway token +- **Audit logged** — all config changes are logged with actor identity + +### Config Integrity + +OpenClaw now includes HMAC integrity checking: +- Config writes are signed with HMAC-SHA256 +- External modifications are detected and logged +- `openclaw security audit` shows integrity status + +--- + +## Troubleshooting + +### CodexBar can't find the gateway +- Make sure the gateway is running: `openclaw gateway run` +- Check the port: default is 18789 +- CodexBar scans ports 18789-22789 automatically + +### "Token not found" error +- Check `~/.openclaw/gateway.token` exists +- Verify permissions: `chmod 600 ~/.openclaw/gateway.token` +- The token is created during `openclaw onboard` + +### Ollama models not responding +- Verify Ollama is running: `ollama ps` +- Check model is downloaded: `ollama list` +- First request may take 15-20s for cold start (model loading) + +### Account rotation not working +- Check auth.order in config: all accounts should be listed +- Clear auth-state: CodexBar does this automatically on inject +- Verify tokens: open CodexBar → check account status indicators + +### Config was modified outside the gateway +- This triggers an HMAC integrity warning +- Normal for manual config edits (not a security issue) +- Use `config.patch` API for programmatic changes diff --git a/docs/CONNECTING-CLIENTS-TO-OPENCLAW.md b/docs/CONNECTING-CLIENTS-TO-OPENCLAW.md new file mode 100644 index 000000000..d9b24e70b --- /dev/null +++ b/docs/CONNECTING-CLIENTS-TO-OPENCLAW.md @@ -0,0 +1,178 @@ +# Connecting Clients to OpenClaw — Security Guide + +## Overview + +Every client that connects to an OpenClaw gateway must authenticate properly. This document explains the correct procedure for connecting any client — whether it's a custom dashboard, a mobile app, a hardware device, or a tool like CodexBar. + +## Core Principle + +**Every client must be registered as a paired device or node with the gateway.** Unpaired clients can only perform read-only operations (health check, model listing). All state-changing operations (sending messages, modifying config, syncing auth) require proper pairing. + +## How Authentication Works + +OpenClaw uses two separate concepts: + +1. **Transport Security** (`allowInsecureAuth`) + - Controls whether HTTP (non-HTTPS) connections are allowed + - Set to `true` for devices that can't do TLS (like Rabbit R1) + - This is about the connection protocol, NOT authorization + +2. **Authorization** (gateway token + device pairing) + - Controls WHO can make changes + - Gateway token is stored at `~/.openclaw/gateway.token` (permissions 0600) + - Device pairing uses Ed25519 cryptographic identity + - Required for ALL state-changing operations, regardless of transport + +## Client Connection Procedure + +### Step 1: Read the Gateway Token + +The gateway token is at `~/.openclaw/gateway.token`. This file has restricted permissions (0600 — only the owning user can read it). + +``` +# Read the token +cat ~/.openclaw/gateway.token +``` + +Your client must be able to read this file. If running as a different user, the connection will fail (by design). + +### Step 2: Connect via WebSocket + +Connect to the gateway's WebSocket endpoint: + +``` +ws://127.0.0.1:18789 (loopback) +wss://your-host:18789 (remote, requires TLS) +``` + +### Step 3: Authenticate + +Send a connect message with your client identity and the gateway token: + +```json +{ + "type": "connect", + "client": { + "id": "your-client-id", + "name": "Your Client Name", + "version": "1.0.0", + "platform": "macos" + }, + "auth": { + "mode": "token", + "token": "" + } +} +``` + +### Step 4: Device Pairing (First Time) + +If your client hasn't been paired before: + +1. Generate an Ed25519 keypair for your client's identity +2. Include it in the connect message +3. The gateway creates a pending pairing request +4. The user approves via OpenClaw UI or CLI: `openclaw devices approve ` +5. Your client receives a device token with scopes +6. Store this token securely (Keychain on macOS, encrypted storage on other platforms) + +### Step 5: Make API Calls + +After authentication, you can call gateway methods: + +```json +{ + "type": "request", + "id": "", + "method": "config.patch", + "params": { + "raw": "{...}", + "baseHash": "" + } +} +``` + +## Method Scopes + +| Method | Required Scope | Description | +|--------|---------------|-------------| +| `config.get` | `operator.read` | Read current config | +| `config.patch` | `operator.admin` | Modify config | +| `auth.profiles.sync` | `operator.admin` | Sync auth tokens | +| `models.list` | none | List available models | +| `chat.send` | `operator.write` | Send a message | +| `health` | none | Health check | + +## What NOT to Do + +### DO NOT: Write config files directly + +```bash +# WRONG — bypasses all security +echo '{"models": {...}}' > ~/.openclaw/openclaw.json +``` + +This will trigger a config integrity warning and may be rejected by the gateway. + +### DO NOT: Kill the gateway process + +```bash +# WRONG — unauthorized restart +kill -9 $(lsof -ti :18789) +``` + +Use the gateway's own restart mechanism via `config.patch` (which triggers SIGUSR1 internally). + +### DO NOT: Modify auth-profiles.json directly + +```bash +# WRONG — bypasses authorization +echo '{"profiles": {...}}' > ~/.openclaw/agents/main/agent/auth-profiles.json +``` + +Use the `auth.profiles.sync` gateway method instead. + +## For Third-Party Client Developers + +If you're building a client, dashboard, or tool that connects to OpenClaw: + +1. **Register as a device** — your app should go through device pairing on first connection +2. **Store identity securely** — use the platform's keychain/secure storage +3. **Use gateway API** — never write files directly +4. **Request only needed scopes** — don't request `operator.admin` if you only need `operator.read` +5. **Handle revocation** — if the user revokes your device, handle the disconnect gracefully + +## For Existing Clients After Upgrade + +If you previously connected to OpenClaw without proper device pairing: + +1. Your client may stop working for state-changing operations +2. You need to add device pairing to your connection flow +3. The gateway token (which you may already use) is sufficient for initial auth +4. After pairing, your device appears in OpenClaw's Devices section + +## CodexBar Example + +CodexBar connects to OpenClaw as a paired device: + +1. Reads `~/.openclaw/gateway.token` +2. Connects via WebSocket to the discovered gateway port +3. Registers as device "CodexBar LM Hub" with Ed25519 identity +4. User approves once in OpenClaw UI +5. Uses `config.patch` for config injection (not file writes) +6. Uses `auth.profiles.sync` for token sync +7. Appears in OpenClaw Devices section — user can manage/revoke access + +## FAQ + +**Q: Can I still manually edit openclaw.json?** +A: Yes, but the gateway will detect the change and log a security warning. For development/debugging, this is fine. In production, use `config.patch`. + +**Q: What if I need `allowInsecureAuth`?** +A: That's fine — it only affects transport (HTTP vs HTTPS). Authorization (gateway token) is still required for all write operations. + +**Q: Does this affect the OpenClaw mobile app?** +A: No — the mobile app already connects as a paired device. + +**Q: What about the CLI?** +A: The CLI uses the gateway token automatically. No changes needed. diff --git a/docs/OPENCLAW-PR-DESCRIPTION.md b/docs/OPENCLAW-PR-DESCRIPTION.md new file mode 100644 index 000000000..d6148fb68 --- /dev/null +++ b/docs/OPENCLAW-PR-DESCRIPTION.md @@ -0,0 +1,70 @@ +# PR: Security Hardening + CodexBar External LM Manager Support + +## Summary + +This PR adds config integrity protection, scope enforcement for config writes, and onboarding support for CodexBar (an external LM provider manager). + +## Security Changes + +### Config HMAC Integrity (`src/config/io.hmac-integrity.ts`) +- Config writes are now signed with HMAC-SHA256 using the gateway token +- On config load, the signature is verified. External modifications trigger a warning +- Prevents silent config injection from unauthorized local processes + +### Scope Enforcement on config.patch (`src/gateway/server-methods/config.ts`) +- `config.patch` now requires `operator.admin` scope +- Prevents unauthenticated clients from modifying config via the gateway API +- Explicitly added to `ADMIN_SCOPE` group in `method-scopes.ts` + +### Config Audit Log (`src/config/config-audit.ts`) +- All config changes logged to `~/.openclaw/logs/config-audit.jsonl` +- Tracks: timestamp, actor (gateway vs filesystem), changed paths, hashes +- Provides forensic trail for security investigations + +### Security Audit Enhancement (`src/security/audit.ts`) +- HMAC integrity mismatch flagged in `openclaw security audit` +- `allowInsecureAuth` flagged as warning (transport vs authorization distinction) + +## CodexBar Onboarding Support + +### New auth choice: "CodexBar" (`src/commands/auth-choice-options.static.ts`) +- Added "CodexBar (External LM Manager)" to onboarding provider list +- When selected, skips LM config with instructions to connect CodexBar after setup +- Non-breaking — existing providers and flows unchanged + +### Setup wizard handler (`src/wizard/setup.ts`) +- `codexbar` auth choice shows setup instructions and skips model selection +- Users complete onboarding normally (workspace, gateway auth, channels) +- After setup, they connect CodexBar using their gateway token + +## Files Changed + +| File | Change | +|------|--------| +| `src/config/io.hmac-integrity.ts` | NEW — HMAC integrity module | +| `src/config/config-audit.ts` | NEW — Config audit logger | +| `src/config/io.ts` | HMAC verify on load, HMAC write on save, audit logging | +| `src/config/types.openclaw.ts` | `integrityWarning` field on ConfigFileSnapshot | +| `src/gateway/method-scopes.ts` | config.patch/set/apply in ADMIN_SCOPE | +| `src/gateway/server-methods/config.ts` | Scope enforcement check | +| `src/security/audit.ts` | HMAC mismatch finding | +| `src/commands/auth-choice-options.static.ts` | CodexBar option | +| `src/wizard/setup.ts` | CodexBar auth choice handler | +| `docs/security/config-protection.md` | NEW — Security documentation | + +## Migration + +- **Non-breaking** — all changes are additive +- HMAC sig file generated on first config write +- Existing configs work without sig file (warning-only, no rejection) +- CodexBar onboarding option is purely additive to the provider list + +## Test Plan + +- [ ] Config write generates `.sig` sidecar file +- [ ] Config load with mismatched sig triggers warning +- [ ] `config.patch` without `operator.admin` scope returns error +- [ ] Config audit log captures changes +- [ ] `openclaw onboard` shows CodexBar option +- [ ] Selecting CodexBar skips LM config +- [ ] Existing onboarding flows unchanged diff --git a/docs/THIRD-PARTY-CLIENT-GUIDE.md b/docs/THIRD-PARTY-CLIENT-GUIDE.md new file mode 100644 index 000000000..0cab7d250 --- /dev/null +++ b/docs/THIRD-PARTY-CLIENT-GUIDE.md @@ -0,0 +1,217 @@ +# Connecting Third-Party Clients to OpenClaw + +## For Client/Dashboard/Tool Developers + +This guide explains how to properly connect your application to an OpenClaw gateway. Whether you're building a custom dashboard, a mobile app, a hardware integration, or an LM management tool like CodexBar, this is the correct procedure. + +## Requirements + +1. Your client must authenticate with the gateway +2. State-changing operations require `operator.admin` scope +3. Config changes must go through the `config.patch` API (not file writes) +4. Auth token sync must go through the gateway (not direct file writes) + +## Connection Flow + +### 1. Read the Gateway Token + +The gateway token is stored at `~/.openclaw/gateway.token` (permissions 0600, owner-only readable). + +```python +# Python example +with open(os.path.expanduser("~/.openclaw/gateway.token")) as f: + token = f.read().strip() +``` + +```swift +// Swift example +let tokenPath = FileManager.default.homeDirectoryForCurrentUser + .appendingPathComponent(".openclaw/gateway.token") +let token = try String(contentsOf: tokenPath).trimmingCharacters(in: .whitespacesAndNewlines) +``` + +```javascript +// Node.js example +const token = fs.readFileSync(path.join(os.homedir(), '.openclaw/gateway.token'), 'utf8').trim(); +``` + +### 2. Connect via WebSocket + +``` +ws://127.0.0.1:18789?token= +``` + +For password-based gateways: +``` +ws://127.0.0.1:18789?password= +``` + +### 3. Send JSON-RPC Messages + +OpenClaw uses JSON-RPC over WebSocket: + +```json +// Request +{ + "type": "request", + "id": "unique-uuid", + "method": "config.get", + "params": {} +} + +// Response +{ + "type": "response", + "id": "unique-uuid", + "ok": true, + "payload": { ... } +} +``` + +### 4. Read Current Config + +```json +{ + "type": "request", + "id": "1", + "method": "config.get", + "params": {} +} +``` + +Response includes `baseHash` needed for config.patch: +```json +{ + "type": "response", + "id": "1", + "ok": true, + "payload": { + "config": { ... }, + "baseHash": "sha256-hash-of-current-config" + } +} +``` + +### 5. Modify Config (Requires operator.admin) + +```json +{ + "type": "request", + "id": "2", + "method": "config.patch", + "params": { + "raw": "{\"models\":{\"providers\":{\"ollama\":{\"api\":\"ollama\",\"baseUrl\":\"http://127.0.0.1:11434\"}}}}", + "baseHash": "sha256-hash-from-config-get" + } +} +``` + +The `raw` field is a JSON merge-patch (RFC 7396). Only include the fields you want to change. + +### 6. List Models + +```json +{ + "type": "request", + "id": "3", + "method": "models.list", + "params": {} +} +``` + +## What NOT to Do + +### DO NOT write config files directly + +```bash +# WRONG — triggers HMAC integrity warning, may be rejected +echo '{"models":...}' > ~/.openclaw/openclaw.json +``` + +### DO NOT kill the gateway process + +```bash +# WRONG — unauthorized, no audit trail +kill -9 $(lsof -ti :18789) +``` + +The gateway restarts itself after `config.patch` if needed. + +### DO NOT modify auth-profiles.json directly + +```bash +# WRONG — bypasses authorization +echo '{"profiles":...}' > ~/.openclaw/agents/main/agent/auth-profiles.json +``` + +Use the `auth.profiles.sync` gateway method instead. + +## Error Handling + +| Error Code | Meaning | +|-----------|---------| +| `INVALID_REQUEST` | Missing required scope (need `operator.admin`) | +| `UNAVAILABLE` | Gateway still starting up | +| `CONFLICT` | baseHash mismatch (config changed since your last read) | + +On `CONFLICT`, call `config.get` again to get the latest baseHash, then retry your patch. + +## Device Registration (Advanced) + +For persistent client identity, register as a device: + +1. Generate an Ed25519 keypair +2. Include device identity in your connect message +3. User approves via `openclaw devices approve ` +4. Your client appears in OpenClaw's Devices section +5. User can manage/revoke access + +See `src/infra/device-identity.ts` and `src/infra/device-pairing.ts` for the protocol details. + +## Examples + +### CodexBar (Swift) +- Reads gateway token from disk +- Connects via URLSessionWebSocketTask +- Uses config.get + config.patch for LM config injection +- Stores pairing state in macOS Keychain +- Source: `Sources/CodexBarCore/OpenClawIntegration/OpenClawGatewayClient.swift` + +### Python Script +```python +import asyncio +import json +import websockets + +async def inject_config(): + token = open(os.path.expanduser("~/.openclaw/gateway.token")).read().strip() + async with websockets.connect(f"ws://127.0.0.1:18789?token={token}") as ws: + # Get current config + await ws.send(json.dumps({ + "type": "request", "id": "1", "method": "config.get", "params": {} + })) + resp = json.loads(await ws.recv()) + base_hash = resp["payload"]["baseHash"] + + # Patch config + patch = {"models": {"providers": {"ollama": {"api": "ollama"}}}} + await ws.send(json.dumps({ + "type": "request", "id": "2", "method": "config.patch", + "params": {"raw": json.dumps(patch), "baseHash": base_hash} + })) + result = json.loads(await ws.recv()) + print(f"Patch result: {result['ok']}") + +asyncio.run(inject_config()) +``` + +## Upgrading Existing Clients + +If your client previously used file writes to modify OpenClaw config: + +1. **Replace file writes with `config.patch` RPC** — this is the main change +2. **Replace `kill -9` with letting the gateway restart itself** — config.patch handles this +3. **Add gateway token auth** — read from `~/.openclaw/gateway.token` +4. **Handle the HMAC integrity check** — your file writes will trigger warnings now + +The migration is straightforward: read token → connect WebSocket → config.get → config.patch. That's it.