diff --git a/Makefile b/Makefile index b7b2101..5521972 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ .PHONY: help init plan apply destroy cluster-setup deploy-all deploy-infra deploy-services test clean .PHONY: k8s-status k8s-start k8s-stop k8s-restart -.PHONY: build start stop status logs +.PHONY: build start stop status logs restart-agent TERRAFORM_DIR := infrastructure/terraform KUBECONFIG := $(shell pwd)/.kube/config @@ -202,6 +202,12 @@ start: echo "Required sibling repos: vcli, verifier, feeplugin, app-recurring"; \ exit 1; \ fi + @if [ ! -d "../agent-backend" ]; then \ + echo "WARNING: ../agent-backend directory not found — agent-backend will be skipped"; \ + fi + @if [ ! -d "../mcp" ]; then \ + echo "WARNING: ../mcp directory not found — mcp server will be skipped"; \ + fi @echo "Starting infrastructure (postgres, redis, minio)..." @docker compose -f $(COMPOSE_FILE) down -v --remove-orphans 2>/dev/null || true docker compose -f $(COMPOSE_FILE) up -d @@ -219,6 +225,10 @@ stop: @-pkill -9 -f "go run.*cmd/server" 2>/dev/null || true @-pkill -9 -f "go run.*cmd/scheduler" 2>/dev/null || true @-pkill -9 -f "go run.*cmd/tx_indexer" 2>/dev/null || true + @-pkill -9 -f "go run.*agent-backend.*cmd/server" 2>/dev/null || true + @-pkill -9 -f "agent-backend-server" 2>/dev/null || true + @-pkill -9 -f "go run.*mcp.*cmd/mcp-server" 2>/dev/null || true + @-pkill -9 -f "mcp-server.*-http" 2>/dev/null || true @-pkill -9 -f "go-build.*/verifier$$" 2>/dev/null || true @-pkill -9 -f "go-build.*/worker$$" 2>/dev/null || true @-pkill -9 -f "go-build.*/server$$" 2>/dev/null || true @@ -231,6 +241,31 @@ stop: @rm -rf ~/.vultisig/vaults/ 2>/dev/null || true @echo "Stopped and cleaned." +restart-agent: + @echo "Restarting agent-backend..." + @-lsof -ti :8084 | xargs kill -9 2>/dev/null || true + @sleep 1 + @if [ ! -d "../agent-backend" ]; then \ + echo "ERROR: ../agent-backend directory not found"; \ + exit 1; \ + fi + @cd ../agent-backend && \ + set -a && . ./.env && set +a && \ + export DATABASE_DSN="postgres://vultisig:vultisig@localhost:5432/vultisig-agent?sslmode=disable" && \ + export REDIS_URI="redis://:vultisig@localhost:6379" && \ + export VERIFIER_URL="http://localhost:8080" && \ + export LOG_FORMAT="text" && \ + export SERVER_PORT="8084" && \ + go build -o /tmp/agent-backend-server ./cmd/server && \ + /tmp/agent-backend-server > $(CURDIR)/logs/agent-backend.log 2>&1 & + @for i in 1 2 3 4 5 6 7 8 9 10; do \ + if curl -s http://localhost:8084/healthz > /dev/null 2>&1; then \ + echo "Agent Backend restarted → localhost:8084"; \ + break; \ + fi; \ + sleep 1; \ + done + status: @docker compose -f $(COMPOSE_FILE) ps @@ -244,6 +279,8 @@ logs: @echo " tail -f local/logs/dca-scheduler.log" @echo " tail -f local/logs/fee-server.log" @echo " tail -f local/logs/fee-worker.log" + @echo " tail -f local/logs/agent-backend.log" + @echo " tail -f local/logs/mcp-server.log" @echo "" @echo "All logs: tail -f local/logs/*.log" diff --git a/README.md b/README.md index 19c9502..e91060b 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,7 @@ The Vultisig stack is tightly coupled: - **vcli** depends on verifier (TSS protocols) - **verifier** depends on recipes (chain abstraction) - **app-recurring** depends on recipes + verifier (policy execution) +- **agent-backend** depends on verifier (plugin specs, policy suggestions) - All depend on **go-wrappers** (cryptographic primitives) Changes in one repo often require changes in others. Docker images create version drift - the local vcli binary may be incompatible with pre-built Docker images due to protocol or signature changes. @@ -39,6 +40,7 @@ mkdir vultisig && cd vultisig git clone https://github.com/vultisig/vcli.git git clone https://github.com/vultisig/verifier.git git clone https://github.com/vultisig/app-recurring.git +git clone https://github.com/vultisig/agent-backend.git git clone https://github.com/vultisig/recipes.git git clone https://github.com/vultisig/go-wrappers.git ``` @@ -46,11 +48,12 @@ git clone https://github.com/vultisig/go-wrappers.git Directory structure: ``` vultisig/ -├── vcli/ # This tool -├── verifier/ # Policy verification + TSS -├── app-recurring/ # DCA plugin -├── recipes/ # Chain abstraction layer -└── go-wrappers/ # Rust crypto (auto-downloaded, but useful to have) +├── vcli/ # This tool +├── verifier/ # Policy verification + TSS +├── app-recurring/ # DCA + Sends plugins +├── agent-backend/ # AI agent backend (optional) +├── recipes/ # Chain abstraction layer +└── go-wrappers/ # Rust crypto (auto-downloaded, but useful to have) ``` --- @@ -76,7 +79,8 @@ make start # Starts postgres/redis/minio in Docker, services run natively `make start`: 1. Starts infrastructure in Docker (postgres, redis, minio) 2. Runs verifier (API + worker) natively with `go run` -3. Runs app-recurring (server + worker + scheduler) natively with `go run` +3. Runs app-recurring (DCA + Sends: server + worker + scheduler) natively with `go run` +4. Runs agent-backend natively with `go run` (if repo present) Logs: `tail -f local/logs/*.log` @@ -149,7 +153,7 @@ make start This starts: - Infrastructure in Docker: PostgreSQL, Redis, MinIO -- Services natively: Verifier API/worker, DCA plugin server/worker/scheduler +- Services natively: Verifier API/worker, DCA plugin server/worker/scheduler, Sends plugin server/worker/scheduler, Agent Backend **Validation:** ```bash @@ -437,6 +441,14 @@ make status ## vcli Commands Reference ```bash +# Use production verifier/plugin endpoints for any command +./local/vcli.sh --prod [flags] + +# Examples +./local/vcli.sh --prod status +./local/vcli.sh --prod plugin list +./local/vcli.sh --prod policy generate --from eth --to usdc --amount 0.01 --output $(pwd)/local/policies/prod-policy.json + # Vault management (put .vult file in local/keyshares/ first) ./local/vcli.sh vault import --password "password" ./local/vcli.sh vault list @@ -461,6 +473,12 @@ make status ./local/vcli.sh status ``` +When `--prod` is set, vcli uses: +- Verifier: `https://verifier.vultisig.com` +- DCA (Recurring Swaps): `https://plugin-dca-swap.prod.plugins.vultisig.com` +- Fees: `https://plugin-fees.prod.plugins.vultisig.com` +- Recurring Sends: `https://plugin-dca-send.prod.plugins.vultisig.com` + ## Services & Ports | Service | Port | Notes | @@ -475,6 +493,24 @@ make status | DCA Worker | - | Native (go run) | | DCA Scheduler | - | Native (go run) | | DCA TX Indexer | - | Native (go run) | +| Sends Server | 8083 | Native (go run) | +| Sends Worker | - | Native (go run) | +| Sends Scheduler | - | Native (go run) | +| Sends TX Indexer | - | Native (go run) | +| Agent Backend | 8084 | Native (go run), optional | + +## Agent Backend Setup + +The agent-backend requires an `ANTHROPIC_API_KEY` in its `.env` file. If present, `make start` sources `../agent-backend/.env` and runs it automatically against the shared infrastructure. + +```bash +# One-time: copy and fill in your API key +cd ../agent-backend +cp .env.example .env +# Edit .env and set ANTHROPIC_API_KEY +``` + +If the `agent-backend/` repo is not present, `make start` skips it with a warning. ## Queue Isolation (4-Party TSS) @@ -515,12 +551,13 @@ vcli/ └── README.md ``` -**Sibling repos (required):** +**Sibling repos:** ``` vultisig/ ├── vcli/ -├── verifier/ -├── app-recurring/ +├── verifier/ # Required +├── app-recurring/ # Required +├── agent-backend/ # Optional (skipped if missing) ├── recipes/ └── go-wrappers/ ``` @@ -572,6 +609,8 @@ tail -f local/logs/worker.log # Verifier worker tail -f local/logs/dca-server.log # DCA plugin server tail -f local/logs/dca-worker.log # DCA plugin worker tail -f local/logs/dca-scheduler.log # DCA scheduler +tail -f local/logs/sends-server.log # Sends plugin server +tail -f local/logs/agent-backend.log # Agent backend # View all logs tail -f local/logs/*.log diff --git a/local/cmd/vcli/cmd/auth.go b/local/cmd/vcli/cmd/auth.go index 388d4c1..e0b0eb1 100644 --- a/local/cmd/vcli/cmd/auth.go +++ b/local/cmd/vcli/cmd/auth.go @@ -10,8 +10,10 @@ import ( "io" "net/http" "os" + "strings" "time" + "github.com/ethereum/go-ethereum/crypto" "github.com/spf13/cobra" ) @@ -84,6 +86,44 @@ type AuthToken struct { ExpiresAt time.Time `json:"expires_at"` } +type authMessagePayload struct { + Message string `json:"message"` + Nonce string `json:"nonce"` + ExpiresAt string `json:"expiresAt"` + Address string `json:"address"` +} + +func extractAuthTokenFromResponse(body []byte) (string, error) { + var payload map[string]any + if err := json.Unmarshal(body, &payload); err != nil { + return "", err + } + + if data, ok := payload["data"].(map[string]any); ok { + if token, ok := data["token"].(string); ok && token != "" { + return token, nil + } + if token, ok := data["access_token"].(string); ok && token != "" { + return token, nil + } + if token, ok := data["jwt"].(string); ok && token != "" { + return token, nil + } + } + + if token, ok := payload["token"].(string); ok && token != "" { + return token, nil + } + if token, ok := payload["access_token"].(string); ok && token != "" { + return token, nil + } + if token, ok := payload["jwt"].(string); ok && token != "" { + return token, nil + } + + return "", fmt.Errorf("auth response missing token") +} + func runAuthLogin(vaultID, password string) error { cfg, err := LoadConfig() if err != nil { @@ -118,8 +158,23 @@ func runAuthLogin(vaultID, password string) error { } nonce := hex.EncodeToString(nonceBytes) - expiryTime := time.Now().Add(5 * time.Minute) - message := fmt.Sprintf("%s:%d", nonce, expiryTime.Unix()) + address, err := deriveEthereumAddressFromPubKey(vault.PublicKeyECDSA) + if err != nil { + return fmt.Errorf("derive address from vault public key: %w", err) + } + + expiryTime := time.Now().Add(15 * time.Minute).UTC() + messagePayload := authMessagePayload{ + Message: "Sign into Vultisig App Store", + Nonce: nonce, + ExpiresAt: expiryTime.Format(time.RFC3339), + Address: strings.ToLower(address), + } + messageBytes, err := json.Marshal(messagePayload) + if err != nil { + return fmt.Errorf("marshal auth message: %w", err) + } + message := string(messageBytes) fmt.Printf("Authenticating with verifier...\n") fmt.Printf(" Vault: %s\n", vault.Name) @@ -133,7 +188,10 @@ func runAuthLogin(vaultID, password string) error { fmt.Println("\nPerforming TSS keysign for authentication...") derivePath := "m/44'/60'/0'/0/0" - results, err := tss.Keysign(ctx, vault, []string{message}, derivePath, false, password) + ethPrefixedMessage := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(message), message) + messageHash := crypto.Keccak256([]byte(ethPrefixedMessage)) + hexMessage := hex.EncodeToString(messageHash) + results, err := tss.KeysignWithFastVault(ctx, vault, []string{hexMessage}, derivePath, password) if err != nil { return fmt.Errorf("TSS keysign failed: %w", err) } @@ -142,7 +200,7 @@ func runAuthLogin(vaultID, password string) error { return fmt.Errorf("no signature result") } - signature := results[0].DerSignature + signature := "0x" + results[0].R + results[0].S + results[0].RecoveryID authReq := map[string]string{ "message": message, @@ -175,18 +233,13 @@ func runAuthLogin(vaultID, password string) error { return fmt.Errorf("authentication failed (%d): %s", resp.StatusCode, string(body)) } - var authResp struct { - Data struct { - Token string `json:"token"` - } `json:"data"` - } - err = json.Unmarshal(body, &authResp) + tokenValue, err := extractAuthTokenFromResponse(body) if err != nil { return fmt.Errorf("parse auth response: %w", err) } authToken := AuthToken{ - Token: authResp.Data.Token, + Token: tokenValue, PublicKey: vault.PublicKeyECDSA, ExpiresAt: time.Now().Add(7 * 24 * time.Hour), } @@ -202,6 +255,18 @@ func runAuthLogin(vaultID, password string) error { return nil } +func deriveEthereumAddressFromPubKey(publicKeyHex string) (string, error) { + keyBytes, err := hex.DecodeString(strings.TrimPrefix(strings.TrimPrefix(publicKeyHex, "0x"), "0X")) + if err != nil { + return "", fmt.Errorf("decode public key: %w", err) + } + pubKey, err := crypto.DecompressPubkey(keyBytes) + if err != nil { + return "", fmt.Errorf("decompress pubkey: %w", err) + } + return crypto.PubkeyToAddress(*pubKey).Hex(), nil +} + func runAuthStatus() error { token, err := LoadAuthToken() if err != nil { diff --git a/local/cmd/vcli/cmd/config.go b/local/cmd/vcli/cmd/config.go index a8f86e1..c60fe31 100644 --- a/local/cmd/vcli/cmd/config.go +++ b/local/cmd/vcli/cmd/config.go @@ -18,6 +18,7 @@ type DevConfig struct { Verifier string `json:"verifier_url"` FeePlugin string `json:"fee_plugin_url"` DCAPlugin string `json:"dca_plugin_url"` + SendsPlugin string `json:"sends_plugin_url"` RelayServer string `json:"relay_server"` DatabaseDSN string `json:"database_dsn"` RedisURI string `json:"redis_uri"` @@ -33,25 +34,19 @@ type DevConfig struct { AuthExpiresAt string `json:"auth_expires_at,omitempty"` } -func getEnvOrDefault(key, defaultVal string) string { - if val := os.Getenv(key); val != "" { - return val - } - return defaultVal -} - func DefaultConfig() *DevConfig { return &DevConfig{ - Verifier: getEnvOrDefault("VCLI_VERIFIER_URL", "http://localhost:8080"), - FeePlugin: getEnvOrDefault("VCLI_FEE_PLUGIN_URL", "http://localhost:8085"), - DCAPlugin: getEnvOrDefault("VCLI_DCA_PLUGIN_URL", "http://localhost:8082"), - RelayServer: getEnvOrDefault("VCLI_RELAY_URL", "https://api.vultisig.com/router"), - DatabaseDSN: getEnvOrDefault("VCLI_DATABASE_DSN", "postgres://vultisig:vultisig@localhost:5432/vultisig-verifier?sslmode=disable"), - RedisURI: getEnvOrDefault("VCLI_REDIS_URI", "redis://:vultisig@localhost:6379"), - MinioHost: getEnvOrDefault("VCLI_MINIO_HOST", "http://localhost:9000"), - MinioAccess: getEnvOrDefault("VCLI_MINIO_ACCESS_KEY", "minioadmin"), - MinioSecret: getEnvOrDefault("VCLI_MINIO_SECRET_KEY", "minioadmin"), - Encryption: getEnvOrDefault("VCLI_ENCRYPTION_SECRET", "dev-encryption-secret-32b"), + Verifier: "http://localhost:8080", + FeePlugin: "http://localhost:8085", + DCAPlugin: "http://localhost:8082", + SendsPlugin: "http://localhost:8083", + RelayServer: "https://api.vultisig.com/router", + DatabaseDSN: "postgres://vultisig:vultisig@localhost:5432/vultisig-verifier?sslmode=disable", + RedisURI: "redis://:vultisig@localhost:6379", + MinioHost: "http://localhost:9000", + MinioAccess: "minioadmin", + MinioSecret: "minioadmin", + Encryption: "dev-encryption-secret-32b", } } @@ -65,7 +60,9 @@ func LoadConfig() (*DevConfig, error) { data, err := os.ReadFile(path) if err != nil { if os.IsNotExist(err) { - return DefaultConfig(), nil + cfg := DefaultConfig() + applyEnvironmentOverrides(cfg) + return cfg, nil } return nil, fmt.Errorf("read config: %w", err) } @@ -75,9 +72,46 @@ func LoadConfig() (*DevConfig, error) { if err != nil { return nil, fmt.Errorf("parse config: %w", err) } + applyEnvironmentOverrides(cfg) return cfg, nil } +func applyEnvironmentOverrides(cfg *DevConfig) { + if val := os.Getenv("VCLI_VERIFIER_URL"); val != "" { + cfg.Verifier = val + } + if val := os.Getenv("VCLI_FEE_PLUGIN_URL"); val != "" { + cfg.FeePlugin = val + } + if val := os.Getenv("VCLI_DCA_PLUGIN_URL"); val != "" { + cfg.DCAPlugin = val + } + if val := os.Getenv("VCLI_SENDS_PLUGIN_URL"); val != "" { + cfg.SendsPlugin = val + } + if val := os.Getenv("VCLI_RELAY_URL"); val != "" { + cfg.RelayServer = val + } + if val := os.Getenv("VCLI_DATABASE_DSN"); val != "" { + cfg.DatabaseDSN = val + } + if val := os.Getenv("VCLI_REDIS_URI"); val != "" { + cfg.RedisURI = val + } + if val := os.Getenv("VCLI_MINIO_HOST"); val != "" { + cfg.MinioHost = val + } + if val := os.Getenv("VCLI_MINIO_ACCESS_KEY"); val != "" { + cfg.MinioAccess = val + } + if val := os.Getenv("VCLI_MINIO_SECRET_KEY"); val != "" { + cfg.MinioSecret = val + } + if val := os.Getenv("VCLI_ENCRYPTION_SECRET"); val != "" { + cfg.Encryption = val + } +} + func SaveConfig(cfg *DevConfig) error { path := ConfigPath() dir := filepath.Dir(path) @@ -167,6 +201,10 @@ func GetPluginServerURL(pluginIDOrAlias string) (string, error) { if cfg.FeePlugin != "" && cfg.FeePlugin != "http://localhost:8085" { return cfg.FeePlugin, nil } + case "vultisig-recurring-sends-0000": + if cfg.SendsPlugin != "" && cfg.SendsPlugin != "http://localhost:8083" { + return cfg.SendsPlugin, nil + } } } diff --git a/local/cmd/vcli/cmd/plugin.go b/local/cmd/vcli/cmd/plugin.go index d0c063e..45b8aab 100644 --- a/local/cmd/vcli/cmd/plugin.go +++ b/local/cmd/vcli/cmd/plugin.go @@ -27,6 +27,7 @@ func NewPluginCmd() *cobra.Command { cmd.AddCommand(newPluginInstallCmd()) cmd.AddCommand(newPluginUninstallCmd()) cmd.AddCommand(newPluginSpecCmd()) + cmd.AddCommand(newPluginInstalledCmd()) return cmd } @@ -245,17 +246,27 @@ func runPluginInstall(pluginIDOrAlias string, password string) error { return fmt.Errorf("load config: %w", err) } - authHeader, err := GetAuthHeader() - if err != nil { - return fmt.Errorf("authentication required: %w\n\nRun 'vcli vault import --password xxx' to authenticate first", err) - } - vaults, err := ListVaults() if err != nil || len(vaults) == 0 { return fmt.Errorf("no vaults found. Import a vault first: vcli vault import") } vault := vaults[0] + authHeader, err := GetAuthHeader() + if err != nil { + if password == "" { + return fmt.Errorf("authentication required: %w\n\nRun 'vcli vault import --password xxx' to authenticate first", err) + } + fmt.Println("No valid auth token found; authenticating with Fast Vault...") + if authErr := authenticateVault(vault, password); authErr != nil { + return fmt.Errorf("authentication required: %w\n\nautomatic authentication failed: %v", err, authErr) + } + authHeader, err = GetAuthHeader() + if err != nil { + return fmt.Errorf("authentication required after re-auth: %w", err) + } + } + fmt.Printf("Installing plugin %s...\n", pluginID) fmt.Printf(" Vault: %s (%s...)\n", vault.Name, vault.PublicKeyECDSA[:16]) fmt.Printf(" Verifier: %s\n", cfg.Verifier) @@ -273,11 +284,26 @@ func runPluginInstall(pluginIDOrAlias string, password string) error { return fmt.Errorf("password is required for Fast Vault reshare. Use --password flag") } - // Check if plugin is already installed - dbRecord := checkPluginInstallation(pluginID, vault.PublicKeyECDSA) - if dbRecord != "" { + isProduction := strings.Contains(cfg.Verifier, "vultisig.com") + + var isInstalled bool + var dbRecord string + if isProduction { + installed, err := checkPluginInstallationProduction(cfg, pluginID, vault.PublicKeyECDSA) + if err != nil { + fmt.Printf(" Warning: Could not check installation status: %v\n", err) + } + isInstalled = installed + } else { + dbRecord = checkPluginInstallation(pluginID, vault.PublicKeyECDSA) + isInstalled = dbRecord != "" + } + + if isInstalled { fmt.Printf("\n Plugin %s is already installed for this vault.\n", pluginID) - fmt.Printf(" Installed at: %s\n", dbRecord) + if dbRecord != "" { + fmt.Printf(" Installed at: %s\n", dbRecord) + } fmt.Println("\n To reinstall, first run: vcli plugin uninstall", pluginID) return nil } @@ -307,7 +333,7 @@ func runPluginInstall(pluginIDOrAlias string, password string) error { tss := NewTSSService(vault.LocalPartyID) reshareStart := time.Now() - reshareCtx, reshareCancel := context.WithTimeout(context.Background(), 3*time.Minute) + reshareCtx, reshareCancel := context.WithTimeout(context.Background(), 2*time.Minute) defer reshareCancel() newVault, err := tss.ReshareWithDKLS(reshareCtx, vault, pluginID, cfg.Verifier, authHeader, password) @@ -316,10 +342,12 @@ func runPluginInstall(pluginIDOrAlias string, password string) error { } reshareDuration := time.Since(reshareStart) - err = SaveVault(newVault) - if err != nil { - return fmt.Errorf("save vault: %w", err) - } + // NOTE: We intentionally do NOT save the 4-party vault locally. + // The plugin's 2-of-4 keyshares are stored in MinIO by verifier and worker. + // The local vault remains at 2-of-2 so users can: + // 1. Sign transactions directly with CLI + Fast Vault Server + // 2. Install additional plugins from the same original vault + _ = newVault // Keyshare is uploaded to MinIO, not saved locally totalDuration := time.Since(startTime) @@ -329,7 +357,8 @@ func runPluginInstall(pluginIDOrAlias string, password string) error { // Validate storage - check MinIO buckets (with retry) verifierFile, verifierSize := checkMinioFileWithRetry("vultisig-verifier", pluginID, vault.PublicKeyECDSA, 3) - dcaFile, dcaSize := checkMinioFileWithRetry("vultisig-dca", pluginID, vault.PublicKeyECDSA, 3) + pluginBucket := getPluginBucket(pluginID) + pluginFile, pluginSize := checkMinioFileWithRetry(pluginBucket, pluginID, vault.PublicKeyECDSA, 3) // Check database record dbRecord = checkPluginInstallation(pluginID, vault.PublicKeyECDSA) @@ -364,10 +393,11 @@ func runPluginInstall(pluginIDOrAlias string, password string) error { } else { fmt.Printf("│ Verifier (MinIO): ✗ %-41s │\n", "Not found") } - if dcaFile != "" { - fmt.Printf("│ DCA Plugin (MinIO): ✓ %-39s │\n", dcaSize) + pluginLabel := getPluginLabel(pluginID) + if pluginFile != "" { + fmt.Printf("│ %s (MinIO): ✓ %-38s │\n", pluginLabel, pluginSize) } else { - fmt.Printf("│ DCA Plugin (MinIO): ✗ %-39s │\n", "Not found") + fmt.Printf("│ %s (MinIO): ✗ %-38s │\n", pluginLabel, "Not found") } fmt.Println("│ │") fmt.Println("│ Database: │") @@ -400,9 +430,26 @@ func getSignerRole(signer, localPartyID string) string { if strings.HasPrefix(signer, "dca-worker-") { return "(DCA Plugin)" } + if strings.HasPrefix(signer, "sends-worker-") { + return "(Sends Plugin)" + } return "" } +func getPluginBucket(pluginID string) string { + if strings.Contains(pluginID, "send") { + return "vultisig-sends" + } + return "vultisig-dca" +} + +func getPluginLabel(pluginID string) string { + if strings.Contains(pluginID, "send") { + return "Sends Plugin" + } + return "DCA Plugin " +} + func checkMinioFileWithRetry(bucket, pluginID, publicKey string, maxRetries int) (string, string) { for i := 0; i < maxRetries; i++ { file, size := checkMinioFile(bucket, pluginID, publicKey) @@ -473,6 +520,30 @@ func checkPluginInstallation(pluginID, publicKey string) string { return t.Format("2006-01-02 15:04:05") } +func checkPluginInstallationProduction(cfg *DevConfig, pluginID, publicKey string) (bool, error) { + url := fmt.Sprintf("%s/vault/exist/%s/%s", cfg.Verifier, pluginID, publicKey) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return false, err + } + + if cfg.AuthToken != "" { + req.Header.Set("Authorization", "Bearer "+cfg.AuthToken) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return false, err + } + defer resp.Body.Close() + + return resp.StatusCode == http.StatusOK, nil +} + func runPluginUninstall(pluginID string) error { startTime := time.Now() @@ -488,12 +559,87 @@ func runPluginUninstall(pluginID string) error { fmt.Printf("Uninstalling plugin %s...\n", pluginID) fmt.Printf(" Vault: %s\n", cfg.PublicKeyECDSA[:16]+"...") + isProduction := strings.Contains(cfg.Verifier, "vultisig.com") + + if isProduction { + return runPluginUninstallProduction(cfg, pluginID, startTime) + } + + return runPluginUninstallLocal(cfg, pluginID, startTime) +} + +func runPluginUninstallProduction(cfg *DevConfig, pluginID string, startTime time.Time) error { + if cfg.AuthToken == "" { + return fmt.Errorf("not authenticated. Run 'vcli auth login' first") + } + + fmt.Printf(" Verifier: %s\n", cfg.Verifier) + + fmt.Println("\nRemoving plugin via verifier API...") + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + url := fmt.Sprintf("%s/plugin/%s", cfg.Verifier, pluginID) + req, err := http.NewRequestWithContext(ctx, "DELETE", url, nil) + if err != nil { + return fmt.Errorf("create request: %w", err) + } + req.Header.Set("Authorization", "Bearer "+cfg.AuthToken) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("send request: %w", err) + } + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + + totalDuration := time.Since(startTime) + + fmt.Println() + fmt.Println("┌─────────────────────────────────────────────────────────────────┐") + fmt.Println("│ PLUGIN UNINSTALL │") + fmt.Println("├─────────────────────────────────────────────────────────────────┤") + fmt.Println("│ │") + fmt.Printf("│ Plugin: %-52s │\n", pluginID) + fmt.Printf("│ Vault: %-52s │\n", cfg.PublicKeyECDSA[:16]+"...") + fmt.Println("│ │") + + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNoContent { + fmt.Printf("│ Status: ✓ %-50s │\n", "Uninstalled successfully") + } else if resp.StatusCode == http.StatusNotFound { + fmt.Printf("│ Status: - %-50s │\n", "Plugin was not installed") + } else { + fmt.Printf("│ Status: ✗ %-50s │\n", fmt.Sprintf("Failed (%d)", resp.StatusCode)) + fmt.Printf("│ Response: %-52s │\n", truncateString(string(body), 50)) + } + + fmt.Println("│ │") + fmt.Printf("│ Total Time: %-51s │\n", totalDuration.Round(time.Millisecond).String()) + fmt.Println("│ │") + fmt.Println("└─────────────────────────────────────────────────────────────────┘") + fmt.Println() + fmt.Println("Next: vcli plugin install", pluginID, "--password ") + + return nil +} + +func truncateString(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + return s[:maxLen-3] + "..." +} + +func runPluginUninstallLocal(cfg *DevConfig, pluginID string, startTime time.Time) error { // Check current installation status + pluginBucket := getPluginBucket(pluginID) dbRecord := checkPluginInstallation(pluginID, cfg.PublicKeyECDSA) verifierFile, _ := checkMinioFile("vultisig-verifier", pluginID, cfg.PublicKeyECDSA) - dcaFile, _ := checkMinioFile("vultisig-dca", pluginID, cfg.PublicKeyECDSA) + pluginFile, _ := checkMinioFile(pluginBucket, pluginID, cfg.PublicKeyECDSA) - if dbRecord == "" && verifierFile == "" && dcaFile == "" { + if dbRecord == "" && verifierFile == "" && pluginFile == "" { fmt.Println("\n Plugin is not installed for this vault.") return nil } @@ -502,7 +648,7 @@ func runPluginUninstall(pluginID string) error { // Remove MinIO files (verifier + plugin 2-of-4 shares) verifierRemoved := removeMinioFile("vultisig-verifier", pluginID, cfg.PublicKeyECDSA) - dcaRemoved := removeMinioFile("vultisig-dca", pluginID, cfg.PublicKeyECDSA) + pluginRemoved := removeMinioFile(pluginBucket, pluginID, cfg.PublicKeyECDSA) // Remove database record dbRemoved := removePluginInstallation(pluginID, cfg.PublicKeyECDSA) @@ -526,12 +672,13 @@ func runPluginUninstall(pluginID string) error { } else { fmt.Printf("│ Verifier keyshare (MinIO): - %-32s │\n", "Not found") } - if dcaRemoved { - fmt.Printf("│ DCA Plugin keyshare (MinIO): ✓ %-30s │\n", "Deleted") - } else if dcaFile != "" { - fmt.Printf("│ DCA Plugin keyshare (MinIO): ✗ %-30s │\n", "Failed to delete") + pluginLabel := getPluginLabel(pluginID) + if pluginRemoved { + fmt.Printf("│ %s keyshare (MinIO): ✓ %-29s │\n", pluginLabel, "Deleted") + } else if pluginFile != "" { + fmt.Printf("│ %s keyshare (MinIO): ✗ %-29s │\n", pluginLabel, "Failed to delete") } else { - fmt.Printf("│ DCA Plugin keyshare (MinIO): - %-30s │\n", "Not found") + fmt.Printf("│ %s keyshare (MinIO): - %-29s │\n", pluginLabel, "Not found") } if dbRemoved { fmt.Printf("│ Database record: ✓ %-42s │\n", "Deleted") @@ -634,3 +781,102 @@ func doRequest(method, url string, body interface{}) ([]byte, int, error) { respBody, _ := io.ReadAll(resp.Body) return respBody, resp.StatusCode, nil } + +func newPluginInstalledCmd() *cobra.Command { + return &cobra.Command{ + Use: "installed", + Short: "List installed plugins for current vault", + Long: `List all plugins that have been installed for the current vault. + +This queries the verifier to show which plugins have keyshares registered +for your vault's public key.`, + RunE: func(cmd *cobra.Command, args []string) error { + return runPluginInstalled() + }, + } +} + +func runPluginInstalled() error { + cfg, err := LoadConfig() + if err != nil { + return fmt.Errorf("load config: %w", err) + } + + if cfg.PublicKeyECDSA == "" { + return fmt.Errorf("no vault configured. Run 'vcli vault import' first") + } + + fmt.Printf("Fetching installed plugins for vault %s...\n\n", cfg.PublicKeyECDSA[:16]+"...") + + url := fmt.Sprintf("%s/plugins/installed?public_key=%s", cfg.Verifier, cfg.PublicKeyECDSA) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return fmt.Errorf("create request: %w", err) + } + + if cfg.AuthToken != "" { + req.Header.Set("Authorization", "Bearer "+cfg.AuthToken) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("request failed: %w", err) + } + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + + var result struct { + Data struct { + Plugins []struct { + ID string `json:"id"` + Title string `json:"title"` + Description string `json:"description"` + } `json:"plugins"` + TotalCount int `json:"total_count"` + } `json:"data"` + Error struct { + Message string `json:"message"` + } `json:"error"` + Status int `json:"status"` + } + + err = json.Unmarshal(body, &result) + if err != nil { + return fmt.Errorf("parse response: %w", err) + } + + if result.Error.Message != "" { + return fmt.Errorf("verifier error: %s", result.Error.Message) + } + + if result.Data.TotalCount == 0 { + fmt.Println("No plugins installed for this vault.") + fmt.Println("\nTo install a plugin: vcli plugin install --password ") + return nil + } + + fmt.Printf("Installed Plugins (%d):\n\n", result.Data.TotalCount) + fmt.Println("┌─────────────────────────────────────┬────────────────────────────────┐") + fmt.Println("│ Plugin ID │ Name │") + fmt.Println("├─────────────────────────────────────┼────────────────────────────────┤") + for _, p := range result.Data.Plugins { + id := p.ID + if len(id) > 35 { + id = id[:32] + "..." + } + title := p.Title + if len(title) > 30 { + title = title[:27] + "..." + } + fmt.Printf("│ %-35s │ %-30s │\n", id, title) + } + fmt.Println("└─────────────────────────────────────┴────────────────────────────────┘") + + fmt.Println("\nTo view policies: vcli policy list --plugin ") + + return nil +} diff --git a/local/cmd/vcli/cmd/policy.go b/local/cmd/vcli/cmd/policy.go index 9708cac..3394a33 100644 --- a/local/cmd/vcli/cmd/policy.go +++ b/local/cmd/vcli/cmd/policy.go @@ -223,30 +223,65 @@ func runPolicyList(pluginID string) error { return fmt.Errorf("request failed (%d): %s", resp.StatusCode, string(body)) } - var policies []map[string]interface{} - err = json.Unmarshal(body, &policies) + var result struct { + Data struct { + Policies []struct { + ID string `json:"id"` + Active bool `json:"active"` + CreatedAt string `json:"created_at"` + Recipe struct { + SourceAsset string `json:"source_asset"` + DestAsset string `json:"dest_asset"` + Amount string `json:"amount"` + Schedule string `json:"schedule"` + } `json:"recipe"` + } `json:"policies"` + TotalCount int `json:"total_count"` + } `json:"data"` + Error struct { + Message string `json:"message"` + } `json:"error"` + } + + err = json.Unmarshal(body, &result) if err != nil { - var result map[string]interface{} - json.Unmarshal(body, &result) - prettyJSON, _ := json.MarshalIndent(result, "", " ") + var rawResult map[string]any + json.Unmarshal(body, &rawResult) + prettyJSON, _ := json.MarshalIndent(rawResult, "", " ") fmt.Println(string(prettyJSON)) return nil } - if len(policies) == 0 { + if result.Error.Message != "" { + return fmt.Errorf("verifier error: %s", result.Error.Message) + } + + if result.Data.TotalCount == 0 || len(result.Data.Policies) == 0 { fmt.Println("No policies found for this plugin.") + fmt.Printf("\nTo create a policy: vcli policy add --plugin %s --policy-file --password \n", pluginID) return nil } - fmt.Printf("Found %d policies:\n\n", len(policies)) - for i, p := range policies { - policyID := p["id"] - active := p["active"] - createdAt := p["created_at"] - fmt.Printf(" %d. Policy ID: %v\n", i+1, policyID) - fmt.Printf(" Active: %v\n", active) - fmt.Printf(" Created: %v\n\n", createdAt) + fmt.Printf("Found %d policies:\n\n", result.Data.TotalCount) + fmt.Println("┌──────────────────────────────────────┬────────┬─────────────────────┐") + fmt.Println("│ Policy ID │ Active │ Created │") + fmt.Println("├──────────────────────────────────────┼────────┼─────────────────────┤") + for _, p := range result.Data.Policies { + id := p.ID + if len(id) > 36 { + id = id[:33] + "..." + } + active := "No" + if p.Active { + active = "Yes" + } + created := p.CreatedAt + if len(created) > 19 { + created = created[:19] + } + fmt.Printf("│ %-36s │ %-6s │ %-19s │\n", id, active, created) } + fmt.Println("└──────────────────────────────────────┴────────┴─────────────────────┘") return nil } @@ -327,8 +362,11 @@ func runPolicyAdd(pluginID, configFile string, password string) error { } recipeBase64 := base64.StdEncoding.EncodeToString(policyBytes) - policyVersion := 1 - pluginVersion := "1.0.0" + policyVersion := 0 + pluginVersion, err := getVerifierPluginVersion(cfg.Verifier, pluginID) + if err != nil { + return fmt.Errorf("get plugin version: %w", err) + } // Step 5: Create signature message and sign // Message format: {recipe}*#*{public_key}*#*{policy_version}*#*{plugin_version} @@ -505,6 +543,53 @@ func getPluginServerURL(verifierURL, pluginID string) (string, error) { return GetPluginServerURL(pluginID) } +func getVerifierPluginVersion(verifierURL, pluginID string) (string, error) { + const fallbackVersion = "1.0.0" + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + url := fmt.Sprintf("%s/plugins/%s", verifierURL, pluginID) + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return "", fmt.Errorf("create request: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return "", fmt.Errorf("request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return "", fmt.Errorf("plugin request failed (%d): %s", resp.StatusCode, strings.TrimSpace(string(body))) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("read response: %w", err) + } + + var payload struct { + Data struct { + Version string `json:"version"` + PluginVersion string `json:"plugin_version"` + } `json:"data"` + } + if err := json.Unmarshal(body, &payload); err != nil { + return fallbackVersion, nil + } + if payload.Data.Version != "" { + return payload.Data.Version, nil + } + if payload.Data.PluginVersion != "" { + return payload.Data.PluginVersion, nil + } + + return fallbackVersion, nil +} + func getPluginPolicySuggest(pluginServerURL string, recipeConfig map[string]interface{}) (*rtypes.PolicySuggest, error) { reqBody, err := json.Marshal(map[string]interface{}{ "configuration": recipeConfig, diff --git a/local/cmd/vcli/cmd/status.go b/local/cmd/vcli/cmd/status.go index 7f5f56d..b7cf80e 100644 --- a/local/cmd/vcli/cmd/status.go +++ b/local/cmd/vcli/cmd/status.go @@ -35,6 +35,7 @@ func runStatus() error { {"Verifier", cfg.Verifier + "/healthz"}, {"Fee Plugin", cfg.FeePlugin + "/healthz"}, {"DCA Plugin", cfg.DCAPlugin + "/healthz"}, + {"Sends Plugin", cfg.SendsPlugin + "/healthz"}, } for _, svc := range services { diff --git a/local/cmd/vcli/cmd/tss.go b/local/cmd/vcli/cmd/tss.go index dc26c04..f6fe1e4 100644 --- a/local/cmd/vcli/cmd/tss.go +++ b/local/cmd/vcli/cmd/tss.go @@ -35,16 +35,16 @@ type KeyShare struct { } type LocalVault struct { - Name string `json:"name"` - PublicKeyECDSA string `json:"pubKeyECDSA"` - PublicKeyEdDSA string `json:"pubKeyEdDSA"` - HexChainCode string `json:"hexChainCode"` - LocalPartyID string `json:"localPartyID"` - Signers []string `json:"signers"` - KeyShares []KeyShare `json:"keyshares"` - ResharePrefix string `json:"resharePrefix,omitempty"` - CreatedAt string `json:"createdAt"` - LibType int `json:"libType"` // 0 = GG20, 1 = DKLS + Name string `json:"name"` + PublicKeyECDSA string `json:"pubKeyECDSA"` + PublicKeyEdDSA string `json:"pubKeyEdDSA"` + HexChainCode string `json:"hexChainCode"` + LocalPartyID string `json:"localPartyID"` + Signers []string `json:"signers"` + KeyShares []KeyShare `json:"keyshares"` + ResharePrefix string `json:"resharePrefix,omitempty"` + CreatedAt string `json:"createdAt"` + LibType int `json:"libType"` // 0 = GG20, 1 = DKLS } type BackupVault struct { @@ -250,9 +250,16 @@ func (t *TSSService) Reshare(ctx context.Context, vault *LocalVault, pluginID, v t.logger.Info("Requesting Fast Vault Server to join reshare...") err = t.requestFastVaultReshare(ctx, vault, sessionID, hexEncryptionKey, vaultPassword) if err != nil { - t.logger.WithError(err).Warn("Failed to request Fast Vault Server - continuing anyway") + return nil, fmt.Errorf("request fast vault reshare: %w", err) } + t.logger.Info("Waiting for Fast Vault Server to join before requesting Verifier...") + _, err = t.waitForParties(ctx, sessionID, 2) + if err != nil { + return nil, fmt.Errorf("fast vault did not join: %w", err) + } + t.logger.Info("Fast Vault Server joined successfully") + t.logger.Info("Requesting Verifier to join reshare...") err = t.requestVerifierReshare(ctx, vault, sessionID, hexEncryptionKey, pluginID, verifierURL, authHeader) if err != nil { @@ -351,8 +358,14 @@ func (t *TSSService) requestVerifierReshare(ctx context.Context, vault *LocalVau HexChainCode string `json:"hex_chain_code"` LocalPartyId string `json:"local_party_id"` OldParties []string `json:"old_parties"` + OldResharePrefix string `json:"old_reshare_prefix"` Email string `json:"email"` PluginID string `json:"plugin_id"` + ReshareType int `json:"reshare_type"` + LibType int `json:"lib_type"` + UseVultisigRelay bool `json:"use_vultisig_relay"` + RelayURL string `json:"relay_url"` + RelayServer string `json:"relay_server"` } req := VerifierReshareRequest{ @@ -363,8 +376,14 @@ func (t *TSSService) requestVerifierReshare(ctx context.Context, vault *LocalVau HexChainCode: vault.HexChainCode, LocalPartyId: "verifier-" + sessionID[:8], OldParties: vault.Signers, + OldResharePrefix: vault.ResharePrefix, Email: "", PluginID: pluginID, + ReshareType: 1, + LibType: vault.LibType, + UseVultisigRelay: true, + RelayURL: RelayServer, + RelayServer: RelayServer, } reqJSON, err := json.Marshal(req) @@ -421,8 +440,15 @@ func (t *TSSService) ReshareWithPlugin(ctx context.Context, vault *LocalVault, p t.logger.Info("Requesting Fast Vault Server to join reshare...") err = t.requestFastVaultReshare(ctx, vault, sessionID, hexEncryptionKey, vaultPassword) if err != nil { - t.logger.WithError(err).Warn("Failed to request Fast Vault Server - continuing anyway") + return nil, fmt.Errorf("request fast vault reshare: %w", err) + } + + t.logger.Info("Waiting for Fast Vault Server to join before requesting Verifier...") + _, err = t.waitForParties(ctx, sessionID, 2) + if err != nil { + return nil, fmt.Errorf("fast vault did not join: %w", err) } + t.logger.Info("Fast Vault Server joined successfully") t.logger.Info("Requesting Verifier to join reshare (with plugin)...") err = t.requestVerifierReshare(ctx, vault, sessionID, hexEncryptionKey, pluginID, verifierURL, authHeader) @@ -666,6 +692,7 @@ func (t *TSSService) requestFastVaultKeysign(ctx context.Context, vault *LocalVa HexEncryptionKey string `json:"hex_encryption_key"` DerivePath string `json:"derive_path"` IsECDSA bool `json:"is_ecdsa"` + DoSetupMsg bool `json:"do_setup_msg"` VaultPassword string `json:"vault_password"` } @@ -681,6 +708,7 @@ func (t *TSSService) requestFastVaultKeysign(ctx context.Context, vault *LocalVa HexEncryptionKey: hexEncKey, DerivePath: derivePath, IsECDSA: !isEdDSA, + DoSetupMsg: false, VaultPassword: vaultPassword, } @@ -689,7 +717,7 @@ func (t *TSSService) requestFastVaultKeysign(ctx context.Context, vault *LocalVa return fmt.Errorf("marshal request: %w", err) } - url := FastVaultServer + "/sign" + url := FastVaultServer + "/vault/sign" httpReq, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(reqJSON)) if err != nil { return fmt.Errorf("create request: %w", err) diff --git a/local/cmd/vcli/cmd/tss_keysign.go b/local/cmd/vcli/cmd/tss_keysign.go index 529253f..92e76d7 100644 --- a/local/cmd/vcli/cmd/tss_keysign.go +++ b/local/cmd/vcli/cmd/tss_keysign.go @@ -12,14 +12,13 @@ import ( "io" "net/http" "strings" - "sync" "time" "github.com/google/uuid" "github.com/sirupsen/logrus" + "github.com/vultisig/vultiserver/relay" vgcommon "github.com/vultisig/vultisig-go/common" vgrelay "github.com/vultisig/vultisig-go/relay" - "github.com/vultisig/vultiserver/relay" "github.com/vultisig/verifier/vault" "github.com/vultisig/verifier/vault_config" @@ -91,7 +90,7 @@ func (t *TSSService) KeysignWithFastVault(ctx context.Context, v *LocalVault, me for i, msg := range messages { t.logger.WithField("message_index", i).Info("Running DKLS keysign protocol...") - result, err := t.runKeysignAsInitiator(mpcWrapper, v, sessionID, hexEncryptionKey, parties, msg, derivePath, i) + result, err := t.runKeysignAsInitiator(ctx, mpcWrapper, v, sessionID, hexEncryptionKey, parties, msg, derivePath, i) if err != nil { return nil, fmt.Errorf("keysign message %d failed: %w", i, err) } @@ -115,6 +114,7 @@ func (t *TSSService) requestFastVaultKeysignDKLS(ctx context.Context, v *LocalVa HexEncryptionKey string `json:"hex_encryption_key"` DerivePath string `json:"derive_path"` IsECDSA bool `json:"is_ecdsa"` + DoSetupMsg bool `json:"do_setup_msg"` VaultPassword string `json:"vault_password"` } @@ -125,6 +125,7 @@ func (t *TSSService) requestFastVaultKeysignDKLS(ctx context.Context, v *LocalVa HexEncryptionKey: hexEncKey, DerivePath: derivePath, IsECDSA: true, + DoSetupMsg: false, VaultPassword: vaultPassword, } @@ -156,7 +157,7 @@ func (t *TSSService) requestFastVaultKeysignDKLS(ctx context.Context, v *LocalVa return nil } -func (t *TSSService) runKeysignAsInitiator(mpcWrapper *vault.MPCWrapperImp, v *LocalVault, sessionID, hexEncryptionKey string, parties []string, message, derivePath string, msgIndex int) (*KeysignResult, error) { +func (t *TSSService) runKeysignAsInitiator(ctx context.Context, mpcWrapper *vault.MPCWrapperImp, v *LocalVault, sessionID, hexEncryptionKey string, parties []string, message, derivePath string, msgIndex int) (*KeysignResult, error) { relayClient := vgrelay.NewRelayClient(RelayServer) publicKey := v.PublicKeyECDSA @@ -234,7 +235,7 @@ func (t *TSSService) runKeysignAsInitiator(mpcWrapper *vault.MPCWrapperImp, v *L return nil, fmt.Errorf("create session from setup: %w", err) } - return t.processKeysignProtocol(mpcWrapper, sessionHandle, sessionID, hexEncryptionKey, parties, messageID) + return t.processKeysignProtocol(ctx, mpcWrapper, sessionHandle, sessionID, hexEncryptionKey, parties, messageID) } func fmtDerivePath(path string) []byte { @@ -248,41 +249,24 @@ func fmtIdsSlice(ids []string) []byte { return []byte(strings.Join(ids, "\x00")) } -func (t *TSSService) processKeysignProtocol(mpcWrapper *vault.MPCWrapperImp, sessionHandle vault.Handle, sessionID, hexEncryptionKey string, parties []string, messageID string) (*KeysignResult, error) { +func (t *TSSService) processKeysignProtocol(ctx context.Context, mpcWrapper *vault.MPCWrapperImp, sessionHandle vault.Handle, sessionID, hexEncryptionKey string, parties []string, messageID string) (*KeysignResult, error) { messenger := relay.NewMessenger(RelayServer, sessionID, hexEncryptionKey, true, messageID) relayClient := vgrelay.NewRelayClient(RelayServer) - var messageCache sync.Map + messageCache := map[string]struct{}{} - go func() { - for { - outbound, err := mpcWrapper.SignSessionOutputMessage(sessionHandle) - if err != nil { - t.logger.WithError(err).Debug("Failed to get output message") - return - } - if len(outbound) == 0 { - return - } - - encodedOutbound := base64.StdEncoding.EncodeToString(outbound) - for i := 0; i < len(parties); i++ { - receiver, err := mpcWrapper.SignSessionMessageReceiver(sessionHandle, outbound, i) - if err != nil { - t.logger.WithError(err).Debug("Failed to get receiver") - continue - } - if len(receiver) == 0 { - break - } - - t.logger.WithField("receiver", string(receiver)).Debug("Sending message") - _ = messenger.Send(t.localPartyID, string(receiver), encodedOutbound) - } - } - }() + // Drain initial outbound messages before reading inbound messages. + if err := t.sendAllKeysignOutputMessages(mpcWrapper, sessionHandle, messenger, parties); err != nil { + return nil, fmt.Errorf("send initial keysign messages: %w", err) + } start := time.Now() for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + if time.Since(start) > 2*time.Minute { return nil, fmt.Errorf("keysign timeout") } @@ -300,7 +284,7 @@ func (t *TSSService) processKeysignProtocol(mpcWrapper *vault.MPCWrapperImp, ses } cacheKey := fmt.Sprintf("%s-%s", sessionID, msg.Hash) - if _, found := messageCache.Load(cacheKey); found { + if _, found := messageCache[cacheKey]; found { continue } @@ -323,7 +307,7 @@ func (t *TSSService) processKeysignProtocol(mpcWrapper *vault.MPCWrapperImp, ses continue } - messageCache.Store(cacheKey, true) + messageCache[cacheKey] = struct{}{} t.logger.WithFields(logrus.Fields{ "from": msg.From, "hash": msg.Hash[:8], @@ -331,19 +315,9 @@ func (t *TSSService) processKeysignProtocol(mpcWrapper *vault.MPCWrapperImp, ses _ = relayClient.DeleteMessageFromServer(sessionID, t.localPartyID, msg.Hash, messageID) - for { - outbound, err := mpcWrapper.SignSessionOutputMessage(sessionHandle) - if err != nil || len(outbound) == 0 { - break - } - encodedOutbound := base64.StdEncoding.EncodeToString(outbound) - for i := 0; i < len(parties); i++ { - receiver, _ := mpcWrapper.SignSessionMessageReceiver(sessionHandle, outbound, i) - if len(receiver) == 0 { - break - } - _ = messenger.Send(t.localPartyID, string(receiver), encodedOutbound) - } + if err := t.sendAllKeysignOutputMessages(mpcWrapper, sessionHandle, messenger, parties); err != nil { + t.logger.WithError(err).Debug("Failed to send keysign output messages") + continue } if isFinished { @@ -378,3 +352,30 @@ func (t *TSSService) processKeysignProtocol(mpcWrapper *vault.MPCWrapperImp, ses time.Sleep(100 * time.Millisecond) } } + +func (t *TSSService) sendAllKeysignOutputMessages(mpcWrapper *vault.MPCWrapperImp, sessionHandle vault.Handle, messenger *relay.MessengerImp, parties []string) error { + for { + outbound, err := mpcWrapper.SignSessionOutputMessage(sessionHandle) + if err != nil { + return err + } + if len(outbound) == 0 { + return nil + } + + encodedOutbound := base64.StdEncoding.EncodeToString(outbound) + for i := 0; i < len(parties); i++ { + receiver, err := mpcWrapper.SignSessionMessageReceiver(sessionHandle, outbound, i) + if err != nil { + t.logger.WithError(err).Debug("Failed to get receiver") + continue + } + if len(receiver) == 0 { + break + } + if err := messenger.Send(t.localPartyID, string(receiver), encodedOutbound); err != nil { + t.logger.WithError(err).Debug("Failed to send message") + } + } + } +} diff --git a/local/cmd/vcli/cmd/tss_reshare.go b/local/cmd/vcli/cmd/tss_reshare.go index a6662e1..94bb9c1 100644 --- a/local/cmd/vcli/cmd/tss_reshare.go +++ b/local/cmd/vcli/cmd/tss_reshare.go @@ -8,14 +8,13 @@ import ( "fmt" "math" "slices" - "sync" "time" "github.com/google/uuid" "github.com/sirupsen/logrus" + "github.com/vultisig/vultiserver/relay" vgcommon "github.com/vultisig/vultisig-go/common" vgrelay "github.com/vultisig/vultisig-go/relay" - "github.com/vultisig/vultiserver/relay" "github.com/vultisig/verifier/vault" "github.com/vultisig/verifier/vault_config" @@ -51,8 +50,15 @@ func (t *TSSService) ReshareWithDKLS(ctx context.Context, v *LocalVault, pluginI t.logger.Info("Requesting Fast Vault Server to join reshare...") err = t.requestFastVaultReshare(ctx, v, sessionID, hexEncryptionKey, vaultPassword) if err != nil { - t.logger.WithError(err).Warn("Failed to request Fast Vault Server - continuing anyway") + return nil, fmt.Errorf("request fast vault reshare: %w", err) + } + + t.logger.Info("Waiting for Fast Vault Server to join before requesting Verifier...") + _, err = t.waitForParties(ctx, sessionID, 2) + if err != nil { + return nil, fmt.Errorf("fast vault did not join: %w", err) } + t.logger.Info("Fast Vault Server joined successfully") t.logger.Info("Requesting Verifier to join reshare (with plugin)...") err = t.requestVerifierReshare(ctx, v, sessionID, hexEncryptionKey, pluginID, verifierURL, authHeader) @@ -91,13 +97,13 @@ func (t *TSSService) ReshareWithDKLS(ctx context.Context, v *LocalVault, pluginI } t.logger.Info("Running DKLS reshare protocol (ECDSA)...") - ecdsaPubkey, chainCode, err := t.runReshareAsInitiator(dklsService, v, sessionID, hexEncryptionKey, parties, false) + ecdsaPubkey, chainCode, err := t.runReshareAsInitiator(ctx, dklsService, v, sessionID, hexEncryptionKey, parties, false) if err != nil { return nil, fmt.Errorf("reshare ECDSA failed: %w", err) } t.logger.Info("Running DKLS reshare protocol (EdDSA)...") - eddsaPubkey, _, err := t.runReshareAsInitiator(dklsService, v, sessionID, hexEncryptionKey, parties, true) + eddsaPubkey, _, err := t.runReshareAsInitiator(ctx, dklsService, v, sessionID, hexEncryptionKey, parties, true) if err != nil { return nil, fmt.Errorf("reshare EdDSA failed: %w", err) } @@ -128,7 +134,7 @@ func (t *TSSService) ReshareWithDKLS(ctx context.Context, v *LocalVault, pluginI return newVault, nil } -func (t *TSSService) runReshareAsInitiator(dklsService *vault.DKLSTssService, v *LocalVault, sessionID, hexEncryptionKey string, parties []string, isEdDSA bool) (string, string, error) { +func (t *TSSService) runReshareAsInitiator(ctx context.Context, dklsService *vault.DKLSTssService, v *LocalVault, sessionID, hexEncryptionKey string, parties []string, isEdDSA bool) (string, string, error) { mpcWrapper := dklsService.GetMPCKeygenWrapper(isEdDSA) relayClient := vgrelay.NewRelayClient(RelayServer) @@ -210,47 +216,27 @@ func (t *TSSService) runReshareAsInitiator(dklsService *vault.DKLSTssService, v return "", "", fmt.Errorf("create session from setup: %w", err) } - return t.processReshareProtocol(mpcWrapper, sessionHandle, sessionID, hexEncryptionKey, parties, isEdDSA) + return t.processReshareProtocol(ctx, mpcWrapper, sessionHandle, sessionID, hexEncryptionKey, parties, isEdDSA) } -func (t *TSSService) processReshareProtocol(mpcWrapper *vault.MPCWrapperImp, sessionHandle vault.Handle, sessionID, hexEncryptionKey string, parties []string, isEdDSA bool) (string, string, error) { +func (t *TSSService) processReshareProtocol(ctx context.Context, mpcWrapper *vault.MPCWrapperImp, sessionHandle vault.Handle, sessionID, hexEncryptionKey string, parties []string, isEdDSA bool) (string, string, error) { messenger := relay.NewMessenger(RelayServer, sessionID, hexEncryptionKey, true, "") relayClient := vgrelay.NewRelayClient(RelayServer) - var messageCache sync.Map - - go func() { - for { - outbound, err := mpcWrapper.QcSessionOutputMessage(sessionHandle) - if err != nil { - t.logger.WithError(err).Debug("Failed to get output message") - return - } - if len(outbound) == 0 { - return - } - - encodedOutbound := base64.StdEncoding.EncodeToString(outbound) - for i := 0; i < len(parties); i++ { - receiver, err := mpcWrapper.QcSessionMessageReceiver(sessionHandle, outbound, i) - if err != nil { - t.logger.WithError(err).Debug("Failed to get receiver") - continue - } - if len(receiver) == 0 { - break - } + messageCache := map[string]struct{}{} - t.logger.WithField("receiver", receiver).Debug("Sending message") - err = messenger.Send(t.localPartyID, receiver, encodedOutbound) - if err != nil { - t.logger.WithError(err).Debug("Failed to send message") - } - } - } - }() + // Drain initial outbound messages before processing inbound relay traffic. + if err := t.sendAllReshareOutputMessages(mpcWrapper, sessionHandle, messenger, parties); err != nil { + return "", "", fmt.Errorf("send initial reshare messages: %w", err) + } start := time.Now() for { + select { + case <-ctx.Done(): + return "", "", ctx.Err() + default: + } + if time.Since(start) > 2*time.Minute { return "", "", fmt.Errorf("reshare timeout") } @@ -268,7 +254,7 @@ func (t *TSSService) processReshareProtocol(mpcWrapper *vault.MPCWrapperImp, ses } cacheKey := fmt.Sprintf("%s-%s", sessionID, msg.Hash) - if _, found := messageCache.Load(cacheKey); found { + if _, found := messageCache[cacheKey]; found { continue } @@ -291,7 +277,7 @@ func (t *TSSService) processReshareProtocol(mpcWrapper *vault.MPCWrapperImp, ses continue } - messageCache.Store(cacheKey, true) + messageCache[cacheKey] = struct{}{} t.logger.WithFields(logrus.Fields{ "from": msg.From, "hash": msg.Hash[:8], @@ -299,19 +285,9 @@ func (t *TSSService) processReshareProtocol(mpcWrapper *vault.MPCWrapperImp, ses _ = relayClient.DeleteMessageFromServer(sessionID, t.localPartyID, msg.Hash, "") - for { - outbound, err := mpcWrapper.QcSessionOutputMessage(sessionHandle) - if err != nil || len(outbound) == 0 { - break - } - encodedOutbound := base64.StdEncoding.EncodeToString(outbound) - for i := 0; i < len(parties); i++ { - receiver, _ := mpcWrapper.QcSessionMessageReceiver(sessionHandle, outbound, i) - if len(receiver) == 0 { - break - } - _ = messenger.Send(t.localPartyID, receiver, encodedOutbound) - } + if err := t.sendAllReshareOutputMessages(mpcWrapper, sessionHandle, messenger, parties); err != nil { + t.logger.WithError(err).Debug("Failed to send reshare output messages") + continue } if isFinished { @@ -355,3 +331,30 @@ func (t *TSSService) processReshareProtocol(mpcWrapper *vault.MPCWrapperImp, ses time.Sleep(100 * time.Millisecond) } } + +func (t *TSSService) sendAllReshareOutputMessages(mpcWrapper *vault.MPCWrapperImp, sessionHandle vault.Handle, messenger *relay.MessengerImp, parties []string) error { + for { + outbound, err := mpcWrapper.QcSessionOutputMessage(sessionHandle) + if err != nil { + return err + } + if len(outbound) == 0 { + return nil + } + + encodedOutbound := base64.StdEncoding.EncodeToString(outbound) + for i := 0; i < len(parties); i++ { + receiver, err := mpcWrapper.QcSessionMessageReceiver(sessionHandle, outbound, i) + if err != nil { + t.logger.WithError(err).Debug("Failed to get receiver") + continue + } + if len(receiver) == 0 { + break + } + if err := messenger.Send(t.localPartyID, receiver, encodedOutbound); err != nil { + t.logger.WithError(err).Debug("Failed to send message") + } + } + } +} diff --git a/local/cmd/vcli/cmd/vault.go b/local/cmd/vcli/cmd/vault.go index 0cb4bfd..8675b97 100644 --- a/local/cmd/vcli/cmd/vault.go +++ b/local/cmd/vcli/cmd/vault.go @@ -108,12 +108,19 @@ Example: if envPass := os.Getenv("VAULT_PASSWORD"); envPass != "" { actualPassword = envPass } + if strings.TrimSpace(verifierURL) == "" { + cfg, err := LoadConfig() + if err != nil { + return fmt.Errorf("load config: %w", err) + } + verifierURL = cfg.Verifier + } return runVaultReshare(ResolvePluginID(pluginID), verifierURL, actualPassword) }, } cmd.Flags().StringVar(&pluginID, "plugin", "", "Plugin ID or alias (required)") - cmd.Flags().StringVarP(&verifierURL, "verifier", "v", "http://localhost:8080", "Verifier server URL") + cmd.Flags().StringVarP(&verifierURL, "verifier", "v", "", "Verifier server URL (defaults to configured verifier)") cmd.Flags().StringVar(&password, "password", "", "Fast Vault password (or set VAULT_PASSWORD)") cmd.MarkFlagRequired("plugin") @@ -617,20 +624,29 @@ func runVaultImport(file, password string) error { format = ".vult (protobuf)" fmt.Println("Detected .vult protobuf format") } else { - // Fall back to JSON format - var backup BackupVault - jsonErr := json.Unmarshal(data, &backup) - if jsonErr == nil && backup.Version != "" { - localVault = backup.Vault - format = fmt.Sprintf("iOS backup (v%s)", backup.Version) - fmt.Printf("Detected iOS backup format (version: %s)\n", backup.Version) + // DecryptVaultFromBackup has a bug: it doesn't base64-decode the Vault + // field when IsEncrypted=false. Try parseVultFile which handles this. + pbVault, parseErr := parseVultFile(data, password) + if parseErr == nil { + localVault = convertProtoVaultToLocal(pbVault) + format = ".vult (protobuf)" + fmt.Println("Detected .vult protobuf format") } else { - jsonErr = json.Unmarshal(data, &localVault) - if jsonErr != nil { - return fmt.Errorf("parse vault file: protobuf error: %v, json error: %v", err, jsonErr) + // Fall back to JSON format + var backup BackupVault + jsonErr := json.Unmarshal(data, &backup) + if jsonErr == nil && backup.Version != "" { + localVault = backup.Vault + format = fmt.Sprintf("iOS backup (v%s)", backup.Version) + fmt.Printf("Detected iOS backup format (version: %s)\n", backup.Version) + } else { + jsonErr = json.Unmarshal(data, &localVault) + if jsonErr != nil { + return fmt.Errorf("parse vault file: protobuf error: %v, json error: %v", err, jsonErr) + } + format = "JSON" + fmt.Println("Detected JSON format") } - format = "JSON" - fmt.Println("Detected JSON format") } } @@ -845,19 +861,14 @@ func authenticateVault(vault *LocalVault, password string) error { return fmt.Errorf("authentication failed (%d): %s", resp.StatusCode, string(body)) } - var authResp struct { - Data struct { - Token string `json:"token"` - } `json:"data"` - } - err = json.Unmarshal(body, &authResp) + tokenValue, err := extractAuthTokenFromResponse(body) if err != nil { return fmt.Errorf("parse auth response: %w", err) } // Save token authToken := AuthToken{ - Token: authResp.Data.Token, + Token: tokenValue, PublicKey: vault.PublicKeyECDSA, ExpiresAt: time.Now().Add(7 * 24 * time.Hour), } diff --git a/local/cmd/vcli/main.go b/local/cmd/vcli/main.go index def932a..f4fc702 100644 --- a/local/cmd/vcli/main.go +++ b/local/cmd/vcli/main.go @@ -9,6 +9,20 @@ import ( "github.com/vultisig/vcli/local/cmd/vcli/cmd" ) +const ( + prodVerifierURL = "https://verifier.vultisig.com" + prodDCAURL = "https://plugin-dca-swap.prod.plugins.vultisig.com" + prodFeesURL = "https://plugin-fees.prod.plugins.vultisig.com" + prodSendsURL = "https://plugin-dca-send.prod.plugins.vultisig.com" +) + +func applyProdEndpointEnv() { + _ = os.Setenv("VCLI_VERIFIER_URL", prodVerifierURL) + _ = os.Setenv("VCLI_DCA_PLUGIN_URL", prodDCAURL) + _ = os.Setenv("VCLI_FEE_PLUGIN_URL", prodFeesURL) + _ = os.Setenv("VCLI_SENDS_PLUGIN_URL", prodSendsURL) +} + func main() { // Ensure go-wrappers CGO libraries are downloaded and library path is set. // This must happen before any CGO code is loaded. @@ -17,6 +31,8 @@ func main() { fmt.Fprintf(os.Stderr, "TSS operations may not work. Run 'vcli start' to retry download.\n") } + var useProd bool + rootCmd := &cobra.Command{ Use: "vcli", Short: "Vultisig CLI for local plugin development and testing", @@ -50,6 +66,7 @@ FLAG CONVENTIONS: --password = Vault/Fast Vault password (all commands) --plugin = Plugin ID or alias -c, --policy-file = Config file path + --prod = Use production verifier/plugin endpoints Commands: start - Start all local development services @@ -62,7 +79,14 @@ Commands: report - Show comprehensive validation report status - Show quick service status `, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if useProd { + applyProdEndpointEnv() + } + return nil + }, } + rootCmd.PersistentFlags().BoolVar(&useProd, "prod", false, "Use production verifier/plugin endpoints") rootCmd.AddCommand(cmd.NewStartCmd()) rootCmd.AddCommand(cmd.NewStopCmd()) diff --git a/local/docker-compose.yaml b/local/docker-compose.yaml index ca72f6f..bc8ce80 100644 --- a/local/docker-compose.yaml +++ b/local/docker-compose.yaml @@ -88,6 +88,7 @@ services: mc mb --ignore-existing myminio/vultisig-fee; mc mb --ignore-existing myminio/vultisig-dca; mc mb --ignore-existing myminio/vultisig-vultiserver; + mc mb --ignore-existing myminio/vultisig-sends; mc anonymous set download myminio/vultisig-plugin-assets; exit 0; " diff --git a/local/init-db.sql b/local/init-db.sql index b55bf69..e7e3fa1 100644 --- a/local/init-db.sql +++ b/local/init-db.sql @@ -1,9 +1,13 @@ -- Create databases for all services CREATE DATABASE "vultisig-verifier"; CREATE DATABASE "vultisig-dca"; +CREATE DATABASE "vultisig-sends"; CREATE DATABASE "vultisig-fee"; +CREATE DATABASE "vultisig-agent"; -- Grant all privileges to vultisig user GRANT ALL PRIVILEGES ON DATABASE "vultisig-verifier" TO vultisig; GRANT ALL PRIVILEGES ON DATABASE "vultisig-dca" TO vultisig; +GRANT ALL PRIVILEGES ON DATABASE "vultisig-sends" TO vultisig; GRANT ALL PRIVILEGES ON DATABASE "vultisig-fee" TO vultisig; +GRANT ALL PRIVILEGES ON DATABASE "vultisig-agent" TO vultisig; diff --git a/local/scripts/run-services.sh b/local/scripts/run-services.sh index 0891c6c..e6da487 100755 --- a/local/scripts/run-services.sh +++ b/local/scripts/run-services.sh @@ -6,7 +6,7 @@ # Prerequisites: # - Docker running with postgres, redis, minio (via docker-compose.yaml) # - Go installed -# - Sibling repos: ../verifier, ../app-recurring +# - Sibling repos: ../verifier, ../app-recurring, ../agent-backend, ../mcp set -e @@ -43,6 +43,14 @@ if [ ! -d "$ROOT_DIR/app-recurring" ]; then exit 1 fi +if [ ! -d "$ROOT_DIR/agent-backend" ]; then + echo -e "${YELLOW}WARNING: agent-backend repo not found at $ROOT_DIR/agent-backend — skipping${NC}" +fi + +if [ ! -d "$ROOT_DIR/mcp" ]; then + echo -e "${YELLOW}WARNING: mcp repo not found at $ROOT_DIR/mcp — skipping${NC}" +fi + # Create logs directory LOG_DIR="$VCLI_DIR/logs" mkdir -p "$LOG_DIR" @@ -76,6 +84,10 @@ pkill -9 -f "go run.*cmd/worker" 2>/dev/null || true pkill -9 -f "go run.*cmd/server" 2>/dev/null || true pkill -9 -f "go run.*cmd/scheduler" 2>/dev/null || true pkill -9 -f "go run.*cmd/tx_indexer" 2>/dev/null || true +pkill -9 -f "go run.*agent-backend.*cmd/server" 2>/dev/null || true +pkill -9 -f "agent-backend-server" 2>/dev/null || true +pkill -9 -f "go run.*mcp.*cmd/mcp-server" 2>/dev/null || true +pkill -9 -f "mcp-server.*-http" 2>/dev/null || true # Also kill compiled binaries in go-build cache pkill -9 -f "go-build.*/verifier$" 2>/dev/null || true pkill -9 -f "go-build.*/worker$" 2>/dev/null || true @@ -116,6 +128,12 @@ export ENCRYPTION_SECRET="dev-encryption-secret-32b" export METRICS_ENABLED="true" export METRICS_HOST="0.0.0.0" export METRICS_PORT="8088" +export PLUGIN_ASSETS_HOST="http://localhost:9000" +export PLUGIN_ASSETS_REGION="us-east-1" +export PLUGIN_ASSETS_BUCKET="vultisig-plugin-assets" +export PLUGIN_ASSETS_ACCESS_KEY="minioadmin" +export PLUGIN_ASSETS_SECRET="minioadmin" +export PLUGIN_ASSETS_PUBLIC_BASE_URL="http://localhost:9000/vultisig-plugin-assets" go run ./cmd/verifier > "$LOG_DIR/verifier.log" 2>&1 & VERIFIER_PID=$! @@ -267,6 +285,160 @@ go run ./cmd/tx_indexer > "$LOG_DIR/dca-tx-indexer.log" 2>&1 & DCA_TX_INDEXER_PID=$! echo -e " ${GREEN}✓${NC} DCA TX Indexer (PID: $DCA_TX_INDEXER_PID)" +# ============================================ +# APP-RECURRING (SENDS) SERVICES +# ============================================ + +echo -e "${CYAN}Starting Sends Plugin Server...${NC}" +cd "$ROOT_DIR/app-recurring" + +# Sends environment +export MODE="send" +export SERVER_PORT="8083" +export SERVER_HOST="0.0.0.0" +export TASK_QUEUE_NAME="sends_plugin_queue" +export SERVER_ENCRYPTIONSECRET="dev-encryption-secret-32b" +export POSTGRES_DSN="postgres://vultisig:vultisig@localhost:5432/vultisig-sends?sslmode=disable" +export REDIS_URI="redis://:vultisig@localhost:6379" +export BLOCKSTORAGE_HOST="http://localhost:9000" +export BLOCKSTORAGE_REGION="us-east-1" +export BLOCKSTORAGE_ACCESSKEY="minioadmin" +export BLOCKSTORAGE_SECRETKEY="minioadmin" +export BLOCKSTORAGE_BUCKET="vultisig-sends" +export VERIFIER_URL="http://localhost:8080" +export METRICS_ENABLED="true" +export METRICS_HOST="0.0.0.0" +export METRICS_PORT="8191" + +go run ./cmd/server > "$LOG_DIR/sends-server.log" 2>&1 & +SENDS_SERVER_PID=$! +echo -e " ${GREEN}✓${NC} Sends Server (PID: $SENDS_SERVER_PID) → localhost:8083" + +echo -e " ${YELLOW}⏳${NC} Waiting for Sends server migrations..." +for i in {1..30}; do + if curl -s http://localhost:8083/health > /dev/null 2>&1; then + echo -e " ${GREEN}✓${NC} Sends server ready" + break + fi + if grep -q "FATA" "$LOG_DIR/sends-server.log" 2>/dev/null; then + echo -e " ${RED}✗${NC} Sends server failed to start. Check logs/sends-server.log" + exit 1 + fi + sleep 1 +done + +echo -e "${CYAN}Starting Sends Worker...${NC}" +export TASK_QUEUE_NAME="sends_plugin_queue" +export VERIFIER_PARTYPREFIX="verifier" +export VERIFIER_SENDTOKEN="local-dev-send-apikey" +export VERIFIER_SWAPTOKEN="local-dev-send-apikey" +export VAULTSERVICE_LOCALPARTYPREFIX="sends-worker" +export VAULTSERVICE_RELAY_SERVER="https://api.vultisig.com/router" +export VAULTSERVICE_ENCRYPTIONSECRET="dev-encryption-secret-32b" +export VAULTSERVICE_DOSETUPMSG="true" +export METRICS_PORT="8193" + +go run ./cmd/worker > "$LOG_DIR/sends-worker.log" 2>&1 & +SENDS_WORKER_PID=$! +echo -e " ${GREEN}✓${NC} Sends Worker (PID: $SENDS_WORKER_PID)" + +echo -e "${CYAN}Starting Sends Scheduler...${NC}" +export HEALTHPORT="8194" +export METRICS_PORT="8195" + +go run ./cmd/scheduler > "$LOG_DIR/sends-scheduler.log" 2>&1 & +SENDS_SCHEDULER_PID=$! +echo -e " ${GREEN}✓${NC} Sends Scheduler (PID: $SENDS_SCHEDULER_PID)" + +echo -e "${CYAN}Starting Sends TX Indexer...${NC}" +export HEALTHPORT="8196" +export METRICS_PORT="8197" +export BASE_DATABASE_DSN="postgres://vultisig:vultisig@localhost:5432/vultisig-sends?sslmode=disable" + +go run ./cmd/tx_indexer > "$LOG_DIR/sends-tx-indexer.log" 2>&1 & +SENDS_TX_INDEXER_PID=$! +echo -e " ${GREEN}✓${NC} Sends TX Indexer (PID: $SENDS_TX_INDEXER_PID)" + +# ============================================ +# MCP SERVER (must start before agent-backend) +# ============================================ + +if [ -d "$ROOT_DIR/mcp" ]; then + echo -e "${CYAN}Starting MCP Server...${NC}" + cd "$ROOT_DIR/mcp" + + export ETH_RPC_URL="https://ethereum-rpc.publicnode.com" + export BLOCKCHAIR_API_URL="https://api.vultisig.com/blockchair" + + go build -o /tmp/mcp-server ./cmd/mcp-server + /tmp/mcp-server -http :8086 > "$LOG_DIR/mcp-server.log" 2>&1 & + MCP_SERVER_PID=$! + echo -e " ${GREEN}✓${NC} MCP Server (PID: $MCP_SERVER_PID) → localhost:8086/mcp" + + echo -e " ${YELLOW}⏳${NC} Waiting for MCP Server..." + for i in {1..15}; do + if curl -s --max-time 2 -X POST -H "Content-Type: application/json" \ + -d '{"jsonrpc":"2.0","id":1,"method":"tools/list"}' \ + http://localhost:8086/mcp > /dev/null 2>&1; then + echo -e " ${GREEN}✓${NC} MCP Server ready" + break + fi + if grep -q "Fatalf\|fatal\|FATA" "$LOG_DIR/mcp-server.log" 2>/dev/null; then + echo -e " ${RED}✗${NC} MCP Server failed to start. Check logs/mcp-server.log" + break + fi + sleep 1 + done +else + echo -e "${YELLOW}Skipping MCP Server (repo not found)${NC}" +fi + +# ============================================ +# AGENT BACKEND +# ============================================ + +if [ -d "$ROOT_DIR/agent-backend" ]; then + echo -e "${CYAN}Starting Agent Backend...${NC}" + cd "$ROOT_DIR/agent-backend" + + # Source .env file for ANTHROPIC_API_KEY and other secrets + if [ -f "$ROOT_DIR/agent-backend/.env" ]; then + set -a + source "$ROOT_DIR/agent-backend/.env" + set +a + fi + + # Override infra to use shared vcli services + export SERVER_HOST="0.0.0.0" + export SERVER_PORT="8084" + export DATABASE_DSN="postgres://vultisig:vultisig@localhost:5432/vultisig-agent?sslmode=disable" + export REDIS_URI="redis://:vultisig@localhost:6379" + export VERIFIER_URL="http://localhost:8080" + export DCA_PLUGIN_URL="http://localhost:8082" + export AUTH_CACHE_KEY_SECRET="local-dev-secret-key" + export LOG_FORMAT="text" + + go build -o /tmp/agent-backend-server ./cmd/server + /tmp/agent-backend-server > "$LOG_DIR/agent-backend.log" 2>&1 & + AGENT_BACKEND_PID=$! + echo -e " ${GREEN}✓${NC} Agent Backend (PID: $AGENT_BACKEND_PID) → localhost:8084" + + echo -e " ${YELLOW}⏳${NC} Waiting for Agent Backend..." + for i in {1..30}; do + if curl -s http://localhost:8084/healthz > /dev/null 2>&1; then + echo -e " ${GREEN}✓${NC} Agent Backend ready" + break + fi + if grep -q "Fatal\|fatal\|FATA" "$LOG_DIR/agent-backend.log" 2>/dev/null; then + echo -e " ${RED}✗${NC} Agent Backend failed to start. Check logs/agent-backend.log" + break + fi + sleep 1 + done +else + echo -e "${YELLOW}Skipping Agent Backend (repo not found)${NC}" +fi + # ============================================ # SEED DATABASE # ============================================ @@ -298,6 +470,16 @@ echo -e " DCA Plugin API localhost:8082" echo -e " DCA Plugin Worker (background)" echo -e " DCA Scheduler (background)" echo -e " DCA TX Indexer (background)" +echo -e " Sends Plugin API localhost:8083" +echo -e " Sends Plugin Worker (background)" +echo -e " Sends Scheduler (background)" +echo -e " Sends TX Indexer (background)" +if [ -d "$ROOT_DIR/agent-backend" ]; then +echo -e " Agent Backend API localhost:8084" +fi +if [ -d "$ROOT_DIR/mcp" ]; then +echo -e " MCP Server localhost:8086/mcp" +fi echo "" echo -e " ${CYAN}Infrastructure (Docker):${NC}" echo -e " PostgreSQL localhost:5432" @@ -307,11 +489,14 @@ echo "" echo -e " ${CYAN}Logs:${NC}" echo -e " tail -f $LOG_DIR/verifier.log" echo -e " tail -f $LOG_DIR/dca-server.log" +echo -e " tail -f $LOG_DIR/sends-server.log" +echo -e " tail -f $LOG_DIR/agent-backend.log" +echo -e " tail -f $LOG_DIR/mcp-server.log" echo -e " (or any file in $LOG_DIR/)" echo "" echo -e " ${CYAN}Stop:${NC} make stop" echo "" -echo -e "${GREEN}Edit code in ../verifier or ../app-recurring, then restart with 'make start'${NC}" +echo -e "${GREEN}Edit code in ../verifier, ../app-recurring, ../agent-backend, or ../mcp, then restart with 'make start'${NC}" echo "" # Save PIDs for later cleanup @@ -321,3 +506,13 @@ echo "$DCA_SERVER_PID" > "$LOG_DIR/dca-server.pid" echo "$DCA_WORKER_PID" > "$LOG_DIR/dca-worker.pid" echo "$DCA_SCHEDULER_PID" > "$LOG_DIR/dca-scheduler.pid" echo "$DCA_TX_INDEXER_PID" > "$LOG_DIR/dca-tx-indexer.pid" +echo "$SENDS_SERVER_PID" > "$LOG_DIR/sends-server.pid" +echo "$SENDS_WORKER_PID" > "$LOG_DIR/sends-worker.pid" +echo "$SENDS_SCHEDULER_PID" > "$LOG_DIR/sends-scheduler.pid" +echo "$SENDS_TX_INDEXER_PID" > "$LOG_DIR/sends-tx-indexer.pid" +if [ -n "${AGENT_BACKEND_PID:-}" ]; then + echo "$AGENT_BACKEND_PID" > "$LOG_DIR/agent-backend.pid" +fi +if [ -n "${MCP_SERVER_PID:-}" ]; then + echo "$MCP_SERVER_PID" > "$LOG_DIR/mcp-server.pid" +fi diff --git a/local/seed-plugins.sql b/local/seed-plugins.sql index 4cdc735..4334477 100644 --- a/local/seed-plugins.sql +++ b/local/seed-plugins.sql @@ -1,7 +1,7 @@ -- Seed plugins for local development -- Run with: make seed -INSERT INTO plugins (id, title, description, server_endpoint, category, logo_url, thumbnail_url, images, features, faqs, audited, created_at, updated_at) +INSERT INTO plugins (id, title, description, server_endpoint, category, features, faqs, audited, created_at, updated_at) VALUES ( 'vultisig-fees-feee', @@ -9,9 +9,6 @@ VALUES 'Automatic fee collection for Vultisig transactions', 'http://localhost:8085', 'plugin', - 'https://raw.githubusercontent.com/vultisig/verifier/main/assets/plugins/fees/icon.jpg', - 'https://raw.githubusercontent.com/vultisig/verifier/main/assets/plugins/fees/thumbnail.jpg', - '[]', '["Automatic fee deduction", "Multi-chain support", "Transparent pricing"]', '[]', false, @@ -24,9 +21,6 @@ VALUES 'Automated recurring swaps and transfers', 'http://localhost:8082', 'app', - 'https://raw.githubusercontent.com/vultisig/verifier/main/assets/plugins/dca/icon.jpg', - 'https://raw.githubusercontent.com/vultisig/verifier/main/assets/plugins/dca/thumbnail.jpg', - '[]', '["Recurring swaps", "Multi-chain support", "Flexible scheduling"]', '[]', false, @@ -39,9 +33,6 @@ VALUES 'Automated recurring token transfers', 'http://localhost:8083', 'app', - 'https://raw.githubusercontent.com/vultisig/verifier/main/assets/plugins/recurring-sends/icon.jpg', - 'https://raw.githubusercontent.com/vultisig/verifier/main/assets/plugins/recurring-sends/thumbnail.jpg', - '[]', '["Scheduled transfers", "Multi-chain support", "Reliable execution"]', '[]', false, @@ -62,10 +53,8 @@ VALUES ON CONFLICT (apikey) DO NOTHING; -- Seed plugin pricing (required for policy creation) --- Each plugin needs pricing entries that match the billing types used in policies -- Types: 'once' (one-time fee), 'per-tx' (per transaction), 'recurring' (subscription) -- For 'once' and 'per-tx', frequency must be NULL --- For 'recurring', frequency must be: daily, weekly, biweekly, or monthly -- Note: Delete existing rows first to prevent duplicates (pricings table has no unique constraint on type+plugin_id) DELETE FROM pricings WHERE plugin_id IN ('vultisig-dca-0000', 'vultisig-recurring-sends-0000', 'vultisig-fees-feee'); INSERT INTO pricings (type, frequency, amount, asset, metric, plugin_id, created_at, updated_at)