diff --git a/docs/channels.md b/docs/channels.md index 21a7ff1..0147484 100644 --- a/docs/channels.md +++ b/docs/channels.md @@ -177,6 +177,24 @@ This works on both Slack (via `files.getUploadURLExternal`) and Telegram (via `s Additionally, the runtime tracks large tool outputs (>8000 characters) and attaches them as file parts in the A2A response. This ensures channel adapters receive the complete, untruncated tool output even when the LLM's text summary is truncated by output token limits. JSON tool outputs (e.g. Tavily Research/Search results) are automatically unwrapped into readable markdown before delivery. +## Container Deployment + +When channels are configured in `forge.yaml`, the build pipeline automatically: + +1. **Includes channel config files** — `slack-config.yaml`, `telegram-config.yaml`, etc. are copied into the Docker build context alongside `forge.yaml` +2. **Adds `--with` to the entrypoint** — The container entrypoint becomes `["forge", "run", "--host", "0.0.0.0", "--with", "slack,telegram"]` +3. **Handles auth loopback** — When [external auth](runtime.md#external-authentication) is configured, channel adapters authenticate to the A2A server using an internal token, bypassing the external auth provider + +Pass channel secrets via environment variables: + +```bash +docker run \ + -e SLACK_APP_TOKEN=xapp-... \ + -e SLACK_BOT_TOKEN=xoxb-... \ + -e FORGE_AUTH_URL=https://auth.example.com/verify \ + my-agent +``` + ## Docker Compose Integration ```bash diff --git a/docs/commands.md b/docs/commands.md index 13a47e0..5c6ccac 100644 --- a/docs/commands.md +++ b/docs/commands.md @@ -84,6 +84,15 @@ forge build [flags] Uses global `--config` and `--output-dir` flags. Output is written to `.forge-output/` by default. +### Flags + +| Flag | Default | Description | +|------|---------|-------------| +| `--signing-key` | | Path to Ed25519 private key for signing build output | +| `--slim` | `false` | Minimize image size (skip heavy/optional binaries) | +| `--alpine` | `false` | Prefer Alpine base image | +| `--local-bin` | | Local binary override as `name=/path/to/file` (repeatable) | + ### Examples ```bash @@ -92,6 +101,12 @@ forge build # Build with custom config and output forge build --config agent.yaml --output-dir ./build + +# Build with a local binary override +forge build --local-bin forge=/path/to/linux/forge + +# Build with Alpine base and slim image +forge build --alpine --slim ``` --- @@ -147,6 +162,7 @@ forge run [flags] | `--provider` | | LLM provider: `openai`, `anthropic`, or `ollama` | | `--env` | `.env` | Path to .env file | | `--with` | | Comma-separated channel adapters (e.g., `slack,telegram`) | +| `--auth-url` | | External auth provider URL for token validation | | `--cors-origins` | localhost | Comma-separated CORS allowed origins (e.g., `https://app.example.com,https://admin.example.com`). Use `*` to allow all origins | ### Examples @@ -277,6 +293,9 @@ forge package [flags] | `--builder` | | Force builder: `docker`, `podman`, or `buildah` | | `--skip-build` | `false` | Skip re-running forge build | | `--with-channels` | `false` | Generate docker-compose.yaml with channel adapters | +| `--slim` | `false` | Minimize image size (skip heavy/optional binaries) | +| `--alpine` | `false` | Prefer Alpine base image | +| `--local-bin` | | Local binary override as `name=/path/to/file` (repeatable) | ### Examples @@ -292,6 +311,12 @@ forge package --platform linux/amd64 --no-cache # Generate docker-compose with channels forge package --with-channels + +# Package with a local binary override +forge package --local-bin forge=/path/to/linux/forge + +# Package with slim Alpine image +forge package --alpine --slim ``` --- diff --git a/docs/configuration.md b/docs/configuration.md index 3204bcf..5cbdb9f 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -46,8 +46,18 @@ egress: cors_origins: # CORS allowed origins for A2A server - "https://app.example.com" # (default: localhost variants) -skills: - path: "SKILL.md" +package: + alpine: false # Prefer Alpine base image + slim: false # Minimize image size + bin_overrides: # Per-binary install overrides + forge: + local: "/path/to/linux/forge" # Host path to local binary file + jq: + apt: "jq" # APT package name + custom-tool: + url: "https://example.com/tool.tar.gz" # Direct download URL + dest: "/usr/local/bin/custom-tool" # Install destination + chmod: "0755" # File permissions secrets: providers: # Secret providers (order matters) @@ -96,6 +106,8 @@ schedules: # Recurring scheduled tasks (optional) | `ANTHROPIC_BASE_URL` | Override Anthropic base URL | | `OLLAMA_BASE_URL` | Override Ollama base URL (default: `http://localhost:11434`) | | `FORGE_CORS_ORIGINS` | Comma-separated CORS allowed origins for A2A server | +| `FORGE_AUTH_URL` | External auth provider URL for token validation | +| `FORGE_AUTH_ORG_ID` | Organization ID sent to external auth provider | | `FORGE_PASSPHRASE` | Passphrase for encrypted secrets file | --- diff --git a/docs/runtime.md b/docs/runtime.md index b0f9d6d..94f8caf 100644 --- a/docs/runtime.md +++ b/docs/runtime.md @@ -169,6 +169,7 @@ forge run --host 0.0.0.0 --shutdown-timeout 30s | `--env` | `.env` | Path to env file | | `--enforce-guardrails` | `true` | Enforce guardrail violations as errors | | `--no-guardrails` | `false` | Disable all guardrail enforcement | +| `--auth-url` | — | External auth provider URL for token validation | ### `forge serve` — Background Daemon @@ -200,6 +201,28 @@ forge serve logs The daemon forks `forge run` in the background with `setsid`, writes state to `.forge/serve.json`, and redirects output to `.forge/serve.log`. Passphrase prompting for encrypted secrets happens in the parent process (which has TTY access) before forking. +## External Authentication + +When `--auth-url` is set (or `FORGE_AUTH_URL` env var), the runtime delegates token validation to an external auth provider. On each request, the bearer token is forwarded to the external URL for verification. + +```bash +# Via CLI flag +forge run --auth-url https://auth.example.com/verify + +# Via environment variable (useful in containers) +docker run -e FORGE_AUTH_URL=https://auth.example.com/verify my-agent +``` + +The middleware checks tokens in two layers: an internal token is accepted first (used by channel adapter loopback calls), then the external auth provider is consulted. This ensures channel adapters (Slack, Telegram) can reach the A2A server without needing a valid external token. + +## KUBECONFIG Materialization + +The runtime supports passing kubeconfig content directly via the `KUBECONFIG` environment variable. If `KUBECONFIG` contains inline YAML (detected by newlines or `apiVersion:` markers), the runtime automatically writes it to a file and updates `KUBECONFIG` to point to that file. This is useful for container deployments where mounting files is inconvenient: + +```bash +docker run -e KUBECONFIG="$(cat ~/.kube/config)" my-agent +``` + ## File Output Directory The runtime configures a `FilesDir` for tool-generated files (e.g., from `file_create`). This directory defaults to `/.forge/files/` and is injected into the execution context so tools can write files that other tools can reference by path. diff --git a/docs/skills.md b/docs/skills.md index 9a69840..f82f601 100644 --- a/docs/skills.md +++ b/docs/skills.md @@ -6,11 +6,11 @@ Skills are a progressive disclosure mechanism for defining agent capabilities in ## Overview -Skills bridge the gap between high-level capability descriptions and the tool-calling system. A `SKILL.md` file in your project root defines what the agent can do, and Forge compiles these into JSON artifacts and prompt text for the container. +Skills bridge the gap between high-level capability descriptions and the tool-calling system. Each skill lives in its own subdirectory under `skills/` with a `SKILL.md` file that defines what the agent can do. Forge compiles these into JSON artifacts and prompt text for the container. ## SKILL.md Format -Skills are defined in a Markdown file (default: `SKILL.md`). The file supports optional YAML frontmatter and two body formats. +Skills are defined in Markdown files inside `skills//SKILL.md`. Each file supports optional YAML frontmatter and two body formats. ```markdown --- @@ -499,20 +499,10 @@ The skill compilation pipeline has three stages: The `SkillsStage` runs as part of the build pipeline: -1. Resolves the skills file path (default: `SKILL.md` in work directory) -2. Skips silently if the file doesn't exist -3. Parses, compiles, and writes artifacts -4. Updates the `AgentSpec` with `skills_spec_version` and `forge_skills_ext_version` -5. Records generated files in the build manifest - -## Configuration - -In `forge.yaml`: - -```yaml -skills: - path: SKILL.md # default, can be customized -``` +1. Scans the `skills/` subdirectory for `SKILL.md` files in each subdirectory +2. Parses, compiles, and writes artifacts +3. Updates the `AgentSpec` with `skills_spec_version` and `forge_skills_ext_version` +4. Records generated files in the build manifest ## CLI Workflow diff --git a/docs/tools.md b/docs/tools.md index 627cf96..3357fd8 100644 --- a/docs/tools.md +++ b/docs/tools.md @@ -128,10 +128,10 @@ When `HOME` is overridden to `workDir`, `kubectl` and `helm` lose access to `~/. | Env Var | Value | Purpose | |---------|-------|---------| -| `KUBECONFIG` | `/.kube/config` | Restores access to the real kubeconfig | +| `KUBECONFIG` | Explicit `KUBECONFIG` if set, else `/.kube/config` | Passes through the active kubeconfig | | `NO_PROXY` | K8s API server hostname(s) | Bypasses the egress proxy for cluster connections | -`NO_PROXY` is extracted from the kubeconfig's `clusters[].cluster.server` field. Other binaries do not receive these variables. +If `KUBECONFIG` is explicitly set in the environment (e.g., via `docker run -e KUBECONFIG=...` or after [KUBECONFIG materialization](runtime.md#kubeconfig-materialization)), that value is passed through directly. Otherwise, `cli_execute` falls back to the real `~/.kube/config`. `NO_PROXY` is extracted from the kubeconfig's `clusters[].cluster.server` field. Other binaries do not receive these variables. ## File Create diff --git a/forge-cli/build/dockerfile_stage.go b/forge-cli/build/dockerfile_stage.go index f656523..08ea3f4 100644 --- a/forge-cli/build/dockerfile_stage.go +++ b/forge-cli/build/dockerfile_stage.go @@ -4,13 +4,16 @@ import ( "bytes" "context" "fmt" + "io" "os" "path/filepath" "text/template" "github.com/initializ/forge/forge-cli/templates" "github.com/initializ/forge/forge-core/compiler" + "github.com/initializ/forge/forge-core/packaging" "github.com/initializ/forge/forge-core/pipeline" + "github.com/initializ/forge/forge-skills/contract" ) // DockerfileStage generates a Dockerfile from the embedded template. @@ -19,6 +22,91 @@ type DockerfileStage struct{} func (s *DockerfileStage) Name() string { return "generate-dockerfile" } func (s *DockerfileStage) Execute(ctx context.Context, bc *pipeline.BuildContext) error { + // Copy project source files into the output directory so they are + // included in the Docker build context (COPY . .). + if err := s.copyProjectSources(bc); err != nil { + return err + } + + // Copy local binary overrides into the build context + if err := s.copyLocalBins(bc); err != nil { + return err + } + + // Inject local bin overrides into the BinManifest so they get COPY + // instructions in the generated Dockerfile. This handles binaries + // not declared by skills (e.g. the forge framework binary itself). + s.injectLocalBins(bc) + + // Try smart Dockerfile generation when BinManifest is available + if bc.BinManifest != nil { + if manifest, ok := bc.BinManifest.(*packaging.BinManifest); ok && len(manifest.Requirements) > 0 { + if err := s.generateSmartDockerfile(bc, manifest); err != nil { + return err + } + return s.writeDockerignore(bc) + } + } + + // Fall through to template-based generation + if err := s.generateTemplateDockerfile(bc); err != nil { + return err + } + return s.writeDockerignore(bc) +} + +func (s *DockerfileStage) generateSmartDockerfile(bc *pipeline.BuildContext, manifest *packaging.BinManifest) error { + cfg := bc.Config.Package + binFragment, warnings, err := packaging.GenerateDockerfile(manifest, cfg, bc.PreferAlpine, bc.PreferSlim) + if err != nil { + return fmt.Errorf("generating bin install Dockerfile: %w", err) + } + + // Print resolution progress + for _, w := range warnings { + fmt.Fprintf(os.Stderr, " [bins] warning: %s\n", w) + bc.AddWarning(w) + } + + fmt.Fprintf(os.Stderr, " [bins] resolved %d binaries\n", len(manifest.Requirements)) + + // Now generate the main Dockerfile incorporating the bin fragment + // The bin fragment is a separate stage; we prepend it to the existing template output + tmplData, err := templates.FS.ReadFile("Dockerfile.tmpl") + if err != nil { + return fmt.Errorf("reading Dockerfile template: %w", err) + } + + tmpl, err := template.New("Dockerfile").Parse(string(tmplData)) + if err != nil { + return fmt.Errorf("parsing Dockerfile template: %w", err) + } + + data := compiler.BuildTemplateDataFromContext(bc.Spec, bc) + data.HasBinStage = true + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return fmt.Errorf("rendering Dockerfile: %w", err) + } + + // Combine: bin install stages + main Dockerfile + var combined bytes.Buffer + combined.WriteString("# --- Binary installation stages (auto-generated) ---\n") + combined.WriteString(binFragment) + combined.WriteString("\n# --- Application stage ---\n") + combined.Write(buf.Bytes()) + + outPath := filepath.Join(bc.Opts.OutputDir, "Dockerfile") + if err := os.WriteFile(outPath, combined.Bytes(), 0644); err != nil { + return fmt.Errorf("writing Dockerfile: %w", err) + } + + bc.AddFile("Dockerfile", outPath) + return nil +} + +func (s *DockerfileStage) generateTemplateDockerfile(bc *pipeline.BuildContext) error { tmplData, err := templates.FS.ReadFile("Dockerfile.tmpl") if err != nil { return fmt.Errorf("reading Dockerfile template: %w", err) @@ -42,8 +130,171 @@ func (s *DockerfileStage) Execute(ctx context.Context, bc *pipeline.BuildContext } bc.AddFile("Dockerfile", outPath) + return nil +} - // Generate .dockerignore to prevent secrets from leaking into container images. +// copyProjectSources copies essential project files from the work directory +// into the build output directory so they are available inside the container. +func (s *DockerfileStage) copyProjectSources(bc *pipeline.BuildContext) error { + workDir := bc.Opts.WorkDir + outDir := bc.Opts.OutputDir + + // Individual files to copy + filesToCopy := []string{"forge.yaml"} + // Include channel config files (e.g. slack-config.yaml, telegram-config.yaml) + if bc.Config != nil { + for _, ch := range bc.Config.Channels { + filesToCopy = append(filesToCopy, ch+"-config.yaml") + } + } + for _, name := range filesToCopy { + src := filepath.Join(workDir, name) + if _, err := os.Stat(src); err != nil { + continue + } + if err := copyFile(src, filepath.Join(outDir, name)); err != nil { + return fmt.Errorf("copying %s to output: %w", name, err) + } + bc.AddFile(name, filepath.Join(outDir, name)) + } + + // Copy skills/ subdirectory if present + skillsDir := filepath.Join(workDir, "skills") + if info, err := os.Stat(skillsDir); err == nil && info.IsDir() { + if err := copyDir(skillsDir, filepath.Join(outDir, "skills")); err != nil { + return fmt.Errorf("copying skills/ to output: %w", err) + } + } + + return nil +} + +// injectLocalBins ensures every local bin override is represented in the +// BinManifest so that the Dockerfile generator emits COPY instructions for them. +func (s *DockerfileStage) injectLocalBins(bc *pipeline.BuildContext) { + // Collect local bin names from config overrides + var localNames []string + if bc.Config != nil { + for name, override := range bc.Config.Package.BinOverrides { + if override.LocalPath != "" { + localNames = append(localNames, name) + } + } + } + if len(localNames) == 0 { + return + } + + // Get or create manifest + var manifest *packaging.BinManifest + if bc.BinManifest != nil { + manifest, _ = bc.BinManifest.(*packaging.BinManifest) + } + if manifest == nil { + manifest = &packaging.BinManifest{ + SkillOrigin: make(map[string]string), + } + bc.BinManifest = manifest + } + + // Build set of existing requirements + existing := make(map[string]bool) + for _, req := range manifest.Requirements { + existing[req.Name] = true + } + + // Add missing local bins + for _, name := range localNames { + if !existing[name] { + manifest.Requirements = append(manifest.Requirements, contract.BinRequirement{ + Name: name, + }) + manifest.SkillOrigin[name] = "local-override" + } + } +} + +// copyLocalBins copies local binary files into .local-bins/ in the build output directory. +// It collects binaries from both forge.yaml config (BinOverrides with LocalPath) and +// CLI flags (bc.LocalBins). +func (s *DockerfileStage) copyLocalBins(bc *pipeline.BuildContext) error { + // Collect local bins from config + bins := make(map[string]string) + if bc.Config != nil { + for name, override := range bc.Config.Package.BinOverrides { + if override.LocalPath != "" { + bins[name] = override.LocalPath + } + } + } + // CLI flags (bc.LocalBins) may have additional entries not yet in config + for name, path := range bc.LocalBins { + bins[name] = path + } + + if len(bins) == 0 { + return nil + } + + localBinsDir := filepath.Join(bc.Opts.OutputDir, ".local-bins") + if err := os.MkdirAll(localBinsDir, 0755); err != nil { + return fmt.Errorf("creating .local-bins directory: %w", err) + } + + for name, src := range bins { + dst := filepath.Join(localBinsDir, name) + fmt.Fprintf(os.Stderr, " [local-bin] copying %s → .local-bins/%s\n", src, name) + if err := copyFile(src, dst); err != nil { + return fmt.Errorf("copying local binary %s: %w", name, err) + } + } + + return nil +} + +// copyFile copies a single file from src to dst, preserving permissions. +func copyFile(src, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer func() { _ = in.Close() }() + + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + out, err := os.Create(dst) + if err != nil { + return err + } + defer func() { _ = out.Close() }() + + _, err = io.Copy(out, in) + return err +} + +// copyDir recursively copies a directory tree from src to dst. +func copyDir(src, dst string) error { + return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + rel, err := filepath.Rel(src, path) + if err != nil { + return err + } + target := filepath.Join(dst, rel) + + if info.IsDir() { + return os.MkdirAll(target, 0755) + } + return copyFile(path, target) + }) +} + +func (s *DockerfileStage) writeDockerignore(bc *pipeline.BuildContext) error { dockerignoreContent := `.env .env.* *.enc @@ -56,6 +307,5 @@ secrets.enc return fmt.Errorf("writing .dockerignore: %w", err) } bc.AddFile(".dockerignore", ignorePath) - return nil } diff --git a/forge-cli/build/requirements_stage.go b/forge-cli/build/requirements_stage.go index 35acba3..f04594a 100644 --- a/forge-cli/build/requirements_stage.go +++ b/forge-cli/build/requirements_stage.go @@ -4,6 +4,7 @@ import ( "context" "github.com/initializ/forge/forge-core/agentspec" + "github.com/initializ/forge/forge-core/packaging" "github.com/initializ/forge/forge-core/pipeline" "github.com/initializ/forge/forge-skills/contract" "github.com/initializ/forge/forge-skills/requirements" @@ -76,5 +77,28 @@ func (s *RequirementsStage) Execute(ctx context.Context, bc *pipeline.BuildConte } } + // Build BinManifest from rich requirements for smart Dockerfile generation + if len(reqs.BinRequirements) > 0 { + manifest := &packaging.BinManifest{ + Requirements: reqs.BinRequirements, + SkillOrigin: make(map[string]string), + } + // Populate skill origins from entries if available + if bc.SkillEntries != nil { + if entries, ok := bc.SkillEntries.([]contract.SkillEntry); ok { + for _, e := range entries { + if e.ForgeReqs != nil { + for _, b := range e.ForgeReqs.Bins { + if _, exists := manifest.SkillOrigin[b.Name]; !exists { + manifest.SkillOrigin[b.Name] = e.Name + } + } + } + } + } + } + bc.BinManifest = manifest + } + return nil } diff --git a/forge-cli/build/security_stage_test.go b/forge-cli/build/security_stage_test.go index 6466036..efd302b 100644 --- a/forge-cli/build/security_stage_test.go +++ b/forge-cli/build/security_stage_test.go @@ -43,7 +43,7 @@ func TestSecurityAnalysisStage_CleanSkills(t *testing.T) { { Name: "simple-tool", ForgeReqs: &contract.SkillRequirements{ - Bins: []string{"curl"}, + Bins: []contract.BinRequirement{{Name: "curl"}}, Env: &contract.EnvRequirements{Required: []string{"API_KEY"}}, }, }, @@ -87,7 +87,7 @@ func TestSecurityAnalysisStage_PolicyFail(t *testing.T) { { Name: "danger-tool", ForgeReqs: &contract.SkillRequirements{ - Bins: []string{"nc"}, // denied by default policy + Bins: []contract.BinRequirement{{Name: "nc"}}, // denied by default policy }, }, } diff --git a/forge-cli/build/skills_stage.go b/forge-cli/build/skills_stage.go index 406bfa7..e349737 100644 --- a/forge-cli/build/skills_stage.go +++ b/forge-cli/build/skills_stage.go @@ -9,6 +9,7 @@ import ( cliskills "github.com/initializ/forge/forge-cli/skills" "github.com/initializ/forge/forge-core/pipeline" skillcompiler "github.com/initializ/forge/forge-skills/compiler" + "github.com/initializ/forge/forge-skills/contract" "github.com/initializ/forge/forge-skills/requirements" ) @@ -37,6 +38,16 @@ func (s *SkillsStage) Execute(ctx context.Context, bc *pipeline.BuildContext) er return fmt.Errorf("parsing skills file: %w", err) } + // Scan skills/ subdirectory for additional SKILL.md files + skillsSubDir := filepath.Join(bc.Opts.WorkDir, "skills") + subEntries, subErr := scanSkillsSubDir(skillsSubDir) + if subErr != nil { + fmt.Fprintf(os.Stderr, " [skills] warning: scanning skills/ subdirectory: %v\n", subErr) + } + if len(subEntries) > 0 { + entries = append(entries, subEntries...) + } + if len(entries) == 0 { return nil } @@ -69,3 +80,36 @@ func (s *SkillsStage) Execute(ctx context.Context, bc *pipeline.BuildContext) er bc.AddFile("compiled/prompt.txt", filepath.Join(bc.Opts.OutputDir, "compiled", "prompt.txt")) return nil } + +// scanSkillsSubDir scans the skills/ subdirectory for SKILL.md files in each +// child directory and returns parsed entries merged from all discovered skills. +func scanSkillsSubDir(skillsDir string) ([]contract.SkillEntry, error) { + info, err := os.Stat(skillsDir) + if err != nil || !info.IsDir() { + return nil, nil // skills/ directory does not exist, nothing to scan + } + + dirEntries, err := os.ReadDir(skillsDir) + if err != nil { + return nil, fmt.Errorf("reading skills directory: %w", err) + } + + var allEntries []contract.SkillEntry + for _, de := range dirEntries { + if !de.IsDir() { + continue + } + skillPath := filepath.Join(skillsDir, de.Name(), "SKILL.md") + if _, statErr := os.Stat(skillPath); os.IsNotExist(statErr) { + continue + } + + entries, _, parseErr := cliskills.ParseFileWithMetadata(skillPath) + if parseErr != nil { + fmt.Fprintf(os.Stderr, " [skills] warning: parsing %s: %v\n", skillPath, parseErr) + continue + } + allEntries = append(allEntries, entries...) + } + return allEntries, nil +} diff --git a/forge-cli/cmd/build.go b/forge-cli/cmd/build.go index 87ad846..8ebfbb3 100644 --- a/forge-cli/cmd/build.go +++ b/forge-cli/cmd/build.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/initializ/forge/forge-cli/build" "github.com/initializ/forge/forge-cli/config" @@ -13,11 +14,17 @@ import ( "github.com/initializ/forge/forge-cli/plugins/langchain" "github.com/initializ/forge/forge-core/pipeline" "github.com/initializ/forge/forge-core/plugins" + "github.com/initializ/forge/forge-core/types" "github.com/initializ/forge/forge-core/validate" "github.com/spf13/cobra" ) -var signingKey string +var ( + signingKey string + buildSlim bool + buildAlpine bool + localBins []string +) var buildCmd = &cobra.Command{ Use: "build", @@ -27,6 +34,10 @@ var buildCmd = &cobra.Command{ func init() { buildCmd.Flags().StringVar(&signingKey, "signing-key", "", "path to Ed25519 private key for signing build output") + buildCmd.Flags().BoolVar(&buildSlim, "slim", false, "minimize image size (skip heavy/optional binaries)") + buildCmd.Flags().BoolVar(&buildAlpine, "alpine", false, "prefer Alpine base image") + buildCmd.Flags().StringArrayVar(&localBins, "local-bin", nil, "local binary override as name=/path/to/file (repeatable)") + } func runBuild(cmd *cobra.Command, args []string) error { @@ -53,6 +64,22 @@ func runBuild(cmd *cobra.Command, args []string) error { return fmt.Errorf("config validation failed: %d error(s)", len(result.Errors)) } + // Parse --local-bin flags and merge into config + parsedLocalBins, err := parseLocalBins(localBins) + if err != nil { + return err + } + if len(parsedLocalBins) > 0 { + if cfg.Package.BinOverrides == nil { + cfg.Package.BinOverrides = make(map[string]types.BinOverride) + } + for name, path := range parsedLocalBins { + override := cfg.Package.BinOverrides[name] + override.LocalPath = path + cfg.Package.BinOverrides[name] = override + } + } + outDir := outputDir if outDir == "." { outDir = filepath.Join(filepath.Dir(cfgPath), ".forge-output") @@ -69,6 +96,10 @@ func runBuild(cmd *cobra.Command, args []string) error { }) bc.Config = cfg bc.Verbose = verbose + bc.LocalBins = parsedLocalBins + bc.PreferAlpine = buildAlpine || cfg.Package.Alpine + bc.PreferSlim = buildSlim || cfg.Package.Slim + bc.ForgeCLIVersion = appVersion reg := plugins.NewFrameworkRegistry() reg.Register(&crewai.Plugin{}) @@ -104,3 +135,31 @@ func runBuild(cmd *cobra.Command, args []string) error { fmt.Printf("Build complete. Output: %s\n", outDir) return nil } + +// parseLocalBins parses "name=/path/to/file" pairs and validates file existence. +func parseLocalBins(args []string) (map[string]string, error) { + if len(args) == 0 { + return nil, nil + } + result := make(map[string]string, len(args)) + for _, arg := range args { + parts := strings.SplitN(arg, "=", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return nil, fmt.Errorf("invalid --local-bin format %q: expected name=/path/to/file", arg) + } + name, path := parts[0], parts[1] + absPath, err := filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf("resolving path for --local-bin %s: %w", name, err) + } + info, err := os.Stat(absPath) + if err != nil { + return nil, fmt.Errorf("--local-bin %s: file %q not found: %w", name, absPath, err) + } + if info.IsDir() { + return nil, fmt.Errorf("--local-bin %s: %q is a directory, expected a file", name, absPath) + } + result[name] = absPath + } + return result, nil +} diff --git a/forge-cli/cmd/init.go b/forge-cli/cmd/init.go index 4bf6919..d437e81 100644 --- a/forge-cli/cmd/init.go +++ b/forge-cli/cmd/init.go @@ -881,7 +881,6 @@ func writeEnvFile(dir string, vars []envVarEntry) error { func getFileManifest(opts *initOptions) []fileToRender { files := []fileToRender{ {TemplatePath: "forge.yaml.tmpl", OutputPath: "forge.yaml"}, - {TemplatePath: "SKILL.md.tmpl", OutputPath: "SKILL.md"}, {TemplatePath: "env.example.tmpl", OutputPath: ".env.example"}, {TemplatePath: "gitignore.tmpl", OutputPath: ".gitignore"}, } diff --git a/forge-cli/cmd/init_test.go b/forge-cli/cmd/init_test.go index f99dc44..279b3b8 100644 --- a/forge-cli/cmd/init_test.go +++ b/forge-cli/cmd/init_test.go @@ -213,7 +213,6 @@ func TestGetFileManifestForge(t *testing.T) { files := getFileManifest(opts) // Forge framework should have common files but no entrypoint scaffolding assertContainsTemplate(t, files, "forge.yaml.tmpl") - assertContainsTemplate(t, files, "SKILL.md.tmpl") assertContainsTemplate(t, files, "gitignore.tmpl") // Should NOT have agent.py or similar for _, f := range files { @@ -227,7 +226,6 @@ func TestGetFileManifestCommonFiles(t *testing.T) { opts := &initOptions{Framework: "forge"} files := getFileManifest(opts) assertContainsTemplate(t, files, "forge.yaml.tmpl") - assertContainsTemplate(t, files, "SKILL.md.tmpl") assertContainsTemplate(t, files, "env.example.tmpl") assertContainsTemplate(t, files, "gitignore.tmpl") } @@ -261,7 +259,6 @@ func TestScaffoldIntegration(t *testing.T) { // Verify all expected files exist (forge framework has no entrypoint files) expectedFiles := []string{ "forge.yaml", - "SKILL.md", ".env.example", ".gitignore", } diff --git a/forge-cli/cmd/package.go b/forge-cli/cmd/package.go index 0ca7af0..758a0b0 100644 --- a/forge-cli/cmd/package.go +++ b/forge-cli/cmd/package.go @@ -28,6 +28,9 @@ var ( builderArg string skipBuild bool withChannels bool + pkgSlim bool + pkgAlpine bool + pkgLocalBins []string ) var packageCmd = &cobra.Command{ @@ -48,6 +51,9 @@ func init() { packageCmd.Flags().StringVar(&builderArg, "builder", "", "force specific builder (docker, podman, buildah)") packageCmd.Flags().BoolVar(&skipBuild, "skip-build", false, "skip re-running forge build") packageCmd.Flags().BoolVar(&withChannels, "with-channels", false, "generate docker-compose.yaml with channel adapters") + packageCmd.Flags().BoolVar(&pkgSlim, "slim", false, "minimize image size (skip heavy/optional binaries)") + packageCmd.Flags().BoolVar(&pkgAlpine, "alpine", false, "prefer Alpine base image") + packageCmd.Flags().StringArrayVar(&pkgLocalBins, "local-bin", nil, "local binary override as name=/path/to/file (repeatable)") } func runPackage(cmd *cobra.Command, args []string) error { @@ -88,6 +94,17 @@ func runPackage(cmd *cobra.Command, args []string) error { reg = cfg.Registry } + // Forward package flags to the build step + if len(pkgLocalBins) > 0 { + localBins = pkgLocalBins + } + if pkgSlim { + buildSlim = true + } + if pkgAlpine { + buildAlpine = true + } + // Check if build output exists and is fresh if !skipBuild { if err := ensureBuildOutput(outDir, cfgPath); err != nil { diff --git a/forge-cli/cmd/run.go b/forge-cli/cmd/run.go index 3743b57..8d2a247 100644 --- a/forge-cli/cmd/run.go +++ b/forge-cli/cmd/run.go @@ -30,6 +30,8 @@ var ( runWithChannels string runNoAuth bool runAuthToken string + runAuthURL string + runAuthOrgID string runCORSOrigins string ) @@ -53,6 +55,8 @@ func init() { runCmd.Flags().StringVar(&runWithChannels, "with", "", "comma-separated channel adapters to start (e.g. slack,telegram)") runCmd.Flags().BoolVar(&runNoAuth, "no-auth", false, "disable bearer token authentication (localhost only)") runCmd.Flags().StringVar(&runAuthToken, "auth-token", "", "explicit bearer token (default: auto-generated)") + runCmd.Flags().StringVar(&runAuthURL, "auth-url", "", "external auth provider URL for token validation (e.g. https://auth.example.com/verify)") + runCmd.Flags().StringVar(&runAuthOrgID, "auth-org-id", "", "org_id sent to the external auth provider") runCmd.Flags().StringVar(&runCORSOrigins, "cors-origins", "", "comma-separated CORS allowed origins (default: localhost only, use '*' for wildcard)") } @@ -93,6 +97,8 @@ func runRun(cmd *cobra.Command, args []string) error { Channels: activeChannels, NoAuth: runNoAuth, AuthToken: runAuthToken, + AuthURL: runAuthURL, + AuthOrgID: runAuthOrgID, CORSOrigins: corsOrigins, }) if err != nil { diff --git a/forge-cli/cmd/serve.go b/forge-cli/cmd/serve.go index 9c661ae..036a876 100644 --- a/forge-cli/cmd/serve.go +++ b/forge-cli/cmd/serve.go @@ -36,6 +36,8 @@ var ( serveWithChannels string serveNoAuth bool serveAuthToken string + serveAuthURL string + serveAuthOrgID string serveCORSOrigins string ) @@ -99,6 +101,8 @@ func registerServeFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&serveWithChannels, "with", "", "comma-separated channel adapters to start (e.g. slack,telegram)") cmd.Flags().BoolVar(&serveNoAuth, "no-auth", false, "disable bearer token authentication (localhost only)") cmd.Flags().StringVar(&serveAuthToken, "auth-token", "", "explicit bearer token (default: auto-generated)") + cmd.Flags().StringVar(&serveAuthURL, "auth-url", "", "external auth provider URL for token validation (e.g. https://auth.example.com/verify)") + cmd.Flags().StringVar(&serveAuthOrgID, "auth-org-id", "", "org_id sent to the external auth provider") cmd.Flags().StringVar(&serveCORSOrigins, "cors-origins", "", "comma-separated CORS allowed origins (default: localhost only, use '*' for wildcard)") } @@ -195,6 +199,12 @@ func serveStartRun(cmd *cobra.Command, args []string) error { if serveAuthToken != "" { runArgs = append(runArgs, "--auth-token", serveAuthToken) } + if serveAuthURL != "" { + runArgs = append(runArgs, "--auth-url", serveAuthURL) + } + if serveAuthOrgID != "" { + runArgs = append(runArgs, "--auth-org-id", serveAuthOrgID) + } if serveCORSOrigins != "" { runArgs = append(runArgs, "--cors-origins", serveCORSOrigins) } diff --git a/forge-cli/runtime/runner.go b/forge-cli/runtime/runner.go index 2f63bda..710833f 100644 --- a/forge-cli/runtime/runner.go +++ b/forge-cli/runtime/runner.go @@ -48,6 +48,8 @@ type RunnerConfig struct { Channels []string // active channel adapters from --with flag NoAuth bool // disable bearer token authentication AuthToken string // explicit bearer token (empty = auto-generate) + AuthURL string // external auth provider URL for token validation + AuthOrgID string // org_id sent to external auth provider CORSOrigins []string // CORS allowed origins (from --cors-origins flag) } @@ -122,6 +124,24 @@ func (r *Runner) ResolveAuth() error { if r.authToken != "" || r.cfg.NoAuth { return nil // already resolved } + // Fall back to env vars for external auth configuration. + if r.cfg.AuthURL == "" { + r.cfg.AuthURL = os.Getenv("FORGE_AUTH_URL") + } + if r.cfg.AuthOrgID == "" { + r.cfg.AuthOrgID = os.Getenv("FORGE_AUTH_ORG_ID") + } + // When using an external auth URL, still generate an internal token + // for channel adapter loopback calls, but external requests are + // validated against the auth provider. + if r.cfg.AuthURL != "" { + token, err := auth.GenerateToken() + if err != nil { + return fmt.Errorf("generating internal auth token: %w", err) + } + r.authToken = token + return nil + } local := isLocalhost(r.cfg.Host) if r.cfg.NoAuth && !local { return fmt.Errorf("--no-auth is only allowed when binding to localhost (current host: %s)", r.cfg.Host) @@ -149,7 +169,16 @@ func (r *Runner) AuthToken() string { // Run starts the development server. It blocks until ctx is cancelled. func (r *Runner) Run(ctx context.Context) error { - // 0. Verify build output integrity if checksums.json exists. + // 0. Materialize inline KUBECONFIG content to a file. + if materialized, err := materializeKubeconfig(r.cfg.WorkDir); err != nil { + r.logger.Warn("failed to materialize KUBECONFIG", map[string]any{"error": err.Error()}) + } else if materialized { + r.logger.Info("materialized inline KUBECONFIG to file", map[string]any{ + "path": os.Getenv("KUBECONFIG"), + }) + } + + // 0b. Verify build output integrity if checksums.json exists. outputDir := filepath.Join(r.cfg.WorkDir, ".forge-output") if err := VerifyBuildOutput(outputDir); err != nil { r.logger.Warn("build output verification failed", map[string]any{"error": err.Error()}) @@ -1657,6 +1686,8 @@ func (r *Runner) printBanner(proxyURL string) { // Auth if r.cfg.NoAuth { fmt.Fprintf(os.Stderr, " Auth: disabled (--no-auth)\n") + } else if r.cfg.AuthURL != "" { + fmt.Fprintf(os.Stderr, " Auth: external (%s)\n", r.cfg.AuthURL) } else if r.authToken != "" { fmt.Fprintf(os.Stderr, " Auth: enabled (token in .forge/runtime.token)\n") } @@ -1693,6 +1724,8 @@ func (r *Runner) resolveAuth(auditLogger *coreruntime.AuditLogger) (auth.Config, cfg := auth.Config{ Enabled: true, Token: r.authToken, + AuthURL: r.cfg.AuthURL, + AuthOrgID: r.cfg.AuthOrgID, SkipPaths: auth.DefaultSkipPaths(), OnAuth: func(req *http.Request, success bool) { if auditLogger == nil { @@ -2503,6 +2536,43 @@ func isLocalhost(host string) bool { return host == "" || host == "127.0.0.1" || host == "localhost" || host == "::1" } +// materializeKubeconfig checks whether the KUBECONFIG env var contains inline +// YAML content (rather than a file path). If so, it writes the content to a +// file and updates KUBECONFIG to point to that file. This allows users to pass +// kubeconfig content directly via `-e KUBECONFIG=""`. +// materializeKubeconfig checks whether the KUBECONFIG env var contains inline +// YAML content (rather than a file path). If so, it writes the content to a +// file and updates KUBECONFIG to point to that file. Returns true if content +// was materialized. +func materializeKubeconfig(workDir string) (bool, error) { + val := os.Getenv("KUBECONFIG") + if val == "" { + return false, nil + } + // Heuristic: if the value contains a newline or starts with typical + // kubeconfig YAML markers, treat it as inline content rather than a path. + isInline := strings.Contains(val, "\n") || + strings.HasPrefix(strings.TrimSpace(val), "apiVersion:") || + strings.Contains(val, "certificate-authority-data:") || + strings.Contains(val, "clusters:") + if !isInline { + return false, nil // looks like a file path + } + + kubeDir := filepath.Join(workDir, ".kube") + if err := os.MkdirAll(kubeDir, 0700); err != nil { + return false, fmt.Errorf("creating .kube directory: %w", err) + } + kubePath := filepath.Join(kubeDir, "config") + if err := os.WriteFile(kubePath, []byte(val), 0600); err != nil { + return false, fmt.Errorf("writing kubeconfig file: %w", err) + } + if err := os.Setenv("KUBECONFIG", kubePath); err != nil { + return false, fmt.Errorf("updating KUBECONFIG env: %w", err) + } + return true, nil +} + // initScheduler creates the schedule store and registers schedule tools. func (r *Runner) initScheduler(reg *tools.Registry) scheduler.ScheduleStore { schedPath := filepath.Join(r.cfg.WorkDir, ".forge", "memory", "SCHEDULES.md") diff --git a/forge-cli/templates/Dockerfile.tmpl b/forge-cli/templates/Dockerfile.tmpl index 13e22bd..0298288 100644 --- a/forge-cli/templates/Dockerfile.tmpl +++ b/forge-cli/templates/Dockerfile.tmpl @@ -45,12 +45,33 @@ ENV {{$key}}="{{$val}}" ARG FORGE_DEV=false WORKDIR /app +{{- if .HasBinStage}} +COPY --from=bins /usr/local/bin/ /usr/local/bin/ +{{- end}} {{- if .Runtime.DepsFile}} COPY --from=deps /app/ . {{- end}} COPY . . +RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates && rm -rf /var/lib/apt/lists/* +{{- if .ForgeFramework}} +{{- if eq .ForgeVersion "latest"}} +RUN apt-get update && apt-get install -y --no-install-recommends curl ca-certificates && \ + ARCH=$(uname -m | sed 's/aarch64/arm64/;s/x86_64/x86_64/') && \ + curl -fsSL -o /tmp/forge.tar.gz "https://github.com/initializ/forge/releases/latest/download/forge-Linux-${ARCH}.tar.gz" && \ + tar xz -C /usr/local/bin forge -f /tmp/forge.tar.gz && rm /tmp/forge.tar.gz && \ + chmod +x /usr/local/bin/forge && \ + apt-get purge -y curl && apt-get autoremove -y && rm -rf /var/lib/apt/lists/* +{{- else}} +RUN apt-get update && apt-get install -y --no-install-recommends curl ca-certificates && \ + ARCH=$(uname -m | sed 's/aarch64/arm64/;s/x86_64/x86_64/') && \ + curl -fsSL -o /tmp/forge.tar.gz "https://github.com/initializ/forge/releases/download/{{.ForgeVersion}}/forge-Linux-${ARCH}.tar.gz" && \ + tar xz -C /usr/local/bin forge -f /tmp/forge.tar.gz && rm /tmp/forge.tar.gz && \ + chmod +x /usr/local/bin/forge && \ + apt-get purge -y curl && apt-get autoremove -y && rm -rf /var/lib/apt/lists/* +{{- end}} +{{- end}} {{- if .HasSkills}} COPY compiled/skills/ /app/skills/ diff --git a/forge-cli/templates/deployment.yaml.tmpl b/forge-cli/templates/deployment.yaml.tmpl index 8c55511..c718e9f 100644 --- a/forge-cli/templates/deployment.yaml.tmpl +++ b/forge-cli/templates/deployment.yaml.tmpl @@ -28,34 +28,25 @@ spec: ports: - containerPort: {{.Runtime.Port}} {{- end}} - {{- if .Runtime.Env}} + {{- if or .Runtime.Env .RequiredEnvVars .OptionalEnvVars}} env: {{- range $key, $val := .Runtime.Env}} - name: {{$key}} value: "{{$val}}" {{- end}} - - name: FORGE_API_KEY + {{- range .RequiredEnvVars}} + - name: {{.}} valueFrom: secretKeyRef: - name: {{.AgentID}}-secrets - key: api-key - optional: true - - name: FORGE_BASE_URL - valueFrom: - secretKeyRef: - name: {{.AgentID}}-secrets - key: base-url - optional: true - - name: FORGE_TEMPERATURE - valueFrom: - secretKeyRef: - name: {{.AgentID}}-secrets - key: temperature - optional: true - - name: FORGE_MAX_TOKENS + name: {{$.AgentID}}-secrets + key: {{.}} + {{- end}} + {{- range .OptionalEnvVars}} + - name: {{.}} valueFrom: secretKeyRef: - name: {{.AgentID}}-secrets - key: max-tokens + name: {{$.AgentID}}-secrets + key: {{.}} optional: true + {{- end}} {{- end}} diff --git a/forge-cli/templates/init/SKILL.md.tmpl b/forge-cli/templates/init/SKILL.md.tmpl deleted file mode 100644 index 3065d8c..0000000 --- a/forge-cli/templates/init/SKILL.md.tmpl +++ /dev/null @@ -1,19 +0,0 @@ -# {{.Name}} Skills - -## Tool: example_tool - -A sample tool that demonstrates tool integration. - -**Input:** A text query. - -**Output:** The processed result. -{{- range .Tools}} - -## Tool: {{.Name}} - -Description for {{.Name}}. - -**Input:** TBD - -**Output:** TBD -{{- end}} diff --git a/forge-cli/templates/init/forge.yaml.tmpl b/forge-cli/templates/init/forge.yaml.tmpl index 9a874be..6ebf375 100644 --- a/forge-cli/templates/init/forge.yaml.tmpl +++ b/forge-cli/templates/init/forge.yaml.tmpl @@ -41,11 +41,6 @@ builtin_tools: - {{.}} {{- end}} {{- end}} -{{- if .SkillEntries}} - -skills: - path: SKILL.md -{{- end}} {{- if .EgressDomains}} egress: diff --git a/forge-cli/templates/secrets.yaml.tmpl b/forge-cli/templates/secrets.yaml.tmpl index ab0c773..7a787d4 100644 --- a/forge-cli/templates/secrets.yaml.tmpl +++ b/forge-cli/templates/secrets.yaml.tmpl @@ -4,16 +4,12 @@ metadata: name: {{.AgentID}}-secrets labels: app: {{.AgentID}} - {{- if .RequiredEnvVars}} + {{- if or .RequiredEnvVars .OptionalEnvVars}} annotations: forge.initializ.ai/generated: "true" {{- end}} type: Opaque stringData: - api-key: "" - base-url: "" - temperature: "" - max-tokens: "" {{- range .RequiredEnvVars}} {{.}}: "" {{- end}} @@ -21,3 +17,7 @@ stringData: # optional {{.}}: "" {{- end}} +{{- if not (or .RequiredEnvVars .OptionalEnvVars)}} + # No env vars discovered from skills; add your secrets here. + # EXAMPLE_KEY: "" +{{- end}} diff --git a/forge-cli/tools/cli_execute.go b/forge-cli/tools/cli_execute.go index 1728999..b97d418 100644 --- a/forge-cli/tools/cli_execute.go +++ b/forge-cli/tools/cli_execute.go @@ -286,9 +286,13 @@ func (t *CLIExecuteTool) buildEnv(binary string) []string { env = append(env, "GH_CONFIG_DIR="+filepath.Join(realHome, ".config", "gh")) } case "kubectl", "helm": - // Preserve KUBECONFIG so kubectl/helm find cluster credentials at + // Preserve KUBECONFIG so kubectl/helm find cluster credentials. + // If KUBECONFIG is explicitly set (e.g. via env or materialized + // from inline content), pass it through. Otherwise fall back to // the real ~/.kube/config when HOME has been overridden. - if _, ok := os.LookupEnv("KUBECONFIG"); !ok { + if kubecfg, ok := os.LookupEnv("KUBECONFIG"); ok { + env = append(env, "KUBECONFIG="+kubecfg) + } else { defaultKubeconfig := filepath.Join(realHome, ".kube", "config") if _, err := os.Stat(defaultKubeconfig); err == nil { env = append(env, "KUBECONFIG="+defaultKubeconfig) diff --git a/forge-core/auth/middleware.go b/forge-core/auth/middleware.go index d397e25..e94bbb3 100644 --- a/forge-core/auth/middleware.go +++ b/forge-core/auth/middleware.go @@ -1,9 +1,12 @@ package auth import ( + "bytes" "encoding/json" + "fmt" "net/http" "strings" + "time" ) // Config controls bearer-token authentication for the A2A server. @@ -11,9 +14,18 @@ type Config struct { // Enabled controls whether authentication is enforced. Enabled bool - // Token is the expected bearer token value. + // Token is the expected bearer token value (local validation). Token string + // AuthURL is an external auth provider endpoint. When set, the middleware + // forwards the bearer token to this URL via POST for validation instead + // of comparing against the local Token value. + AuthURL string + + // AuthOrgID is the default org_id sent to the external auth provider. + // Overridden per-request by the X-Org-ID header when present. + AuthOrgID string + // SkipPaths maps "METHOD /path" keys that bypass authentication. // Example: "GET /" → true allows unauthenticated GET on root. SkipPaths map[string]bool @@ -23,6 +35,54 @@ type Config struct { OnAuth func(r *http.Request, success bool) } +// authHTTPClient is a shared client with reasonable timeouts for auth requests. +var authHTTPClient = &http.Client{Timeout: 10 * time.Second} + +// externalVerifyRequest is the request body sent to the external auth URL. +type externalVerifyRequest struct { + Token string `json:"token"` + OrgID string `json:"org_id"` +} + +// externalVerifyResponse is the response from the external auth URL. +type externalVerifyResponse struct { + Valid bool `json:"valid"` + Error string `json:"error,omitempty"` + UserID string `json:"user_id,omitempty"` + OrgID string `json:"org_id,omitempty"` + Email string `json:"email,omitempty"` + WorkspaceID string `json:"workspace_id,omitempty"` +} + +// validateExternal sends the bearer token to the external auth URL and returns +// whether the token is valid. +func validateExternal(authURL, token, orgID string) (bool, error) { + body, err := json.Marshal(externalVerifyRequest{Token: token, OrgID: orgID}) + if err != nil { + return false, fmt.Errorf("marshalling auth request: %w", err) + } + + resp, err := authHTTPClient.Post(authURL, "application/json", bytes.NewReader(body)) + if err != nil { + return false, fmt.Errorf("calling auth URL: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode == http.StatusUnauthorized { + return false, nil + } + if resp.StatusCode != http.StatusOK { + return false, fmt.Errorf("auth URL returned status %d", resp.StatusCode) + } + + var result externalVerifyResponse + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return false, fmt.Errorf("decoding auth response: %w", err) + } + + return result.Valid, nil +} + // DefaultSkipPaths returns the default set of public endpoints // that do not require authentication (agent card, health checks). func DefaultSkipPaths() map[string]bool { @@ -46,6 +106,7 @@ type errorResponse struct { // Middleware returns an http.Handler that enforces bearer token authentication. // If cfg.Enabled is false, requests pass through without checks. +// When cfg.AuthURL is set, tokens are validated against the external provider. func Middleware(cfg Config) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -63,6 +124,42 @@ func Middleware(cfg Config) func(http.Handler) http.Handler { // Extract bearer token from Authorization header. token := extractBearerToken(r) + if token == "" { + authFail(w, r, cfg, "valid bearer token required") + return + } + + // When external auth is configured, first check the internal + // token (used by channel adapter loopback calls), then fall + // through to the external auth provider for other tokens. + if cfg.AuthURL != "" { + // Accept internal token for loopback (channel adapters) + if cfg.Token != "" && ValidateToken(token, cfg.Token) { + if cfg.OnAuth != nil { + cfg.OnAuth(r, true) + } + next.ServeHTTP(w, r) + return + } + // Use org_id from request header (multiple header names), fall back to config. + orgID := extractOrgID(r, cfg.AuthOrgID) + valid, err := validateExternal(cfg.AuthURL, token, orgID) + if err != nil { + authFail(w, r, cfg, "auth provider error") + return + } + if valid { + if cfg.OnAuth != nil { + cfg.OnAuth(r, true) + } + next.ServeHTTP(w, r) + return + } + authFail(w, r, cfg, "token rejected by auth provider") + return + } + + // Local token validation. if ValidateToken(token, cfg.Token) { if cfg.OnAuth != nil { cfg.OnAuth(r, true) @@ -71,20 +168,36 @@ func Middleware(cfg Config) func(http.Handler) http.Handler { return } - // Authentication failed. - if cfg.OnAuth != nil { - cfg.OnAuth(r, false) - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusUnauthorized) - json.NewEncoder(w).Encode(errorResponse{ //nolint:errcheck - Error: "unauthorized", - Message: "valid bearer token required", - }) + authFail(w, r, cfg, "valid bearer token required") }) } } +// authFail sends a 401 response and fires the OnAuth callback. +func authFail(w http.ResponseWriter, r *http.Request, cfg Config, msg string) { + if cfg.OnAuth != nil { + cfg.OnAuth(r, false) + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusUnauthorized) + json.NewEncoder(w).Encode(errorResponse{ //nolint:errcheck + Error: "unauthorized", + Message: msg, + }) +} + +// extractOrgID reads the org ID from the request headers, checking multiple +// common header names: "X-Org-ID", "org-id", "org_id". Falls back to the +// provided default if none are set. +func extractOrgID(r *http.Request, fallback string) string { + for _, h := range []string{"X-Org-ID", "org-id", "org_id"} { + if v := r.Header.Get(h); v != "" { + return v + } + } + return fallback +} + // extractBearerToken extracts the token from "Authorization: Bearer ". func extractBearerToken(r *http.Request) string { auth := r.Header.Get("Authorization") diff --git a/forge-core/compiler/agentspec_gen.go b/forge-core/compiler/agentspec_gen.go index 689be5c..263b168 100644 --- a/forge-core/compiler/agentspec_gen.go +++ b/forge-core/compiler/agentspec_gen.go @@ -21,6 +21,13 @@ func ConfigToAgentSpec(cfg *types.ForgeConfig) *agentspec.AgentSpec { } fields := strings.Fields(cfg.Entrypoint) + // Default entrypoint for forge framework agents + if cfg.Framework == "forge" && len(fields) == 0 { + fields = []string{"forge", "run", "--host", "0.0.0.0"} + if len(cfg.Channels) > 0 { + fields = append(fields, "--with", strings.Join(cfg.Channels, ",")) + } + } spec.Runtime = &agentspec.RuntimeConfig{ Image: InferBaseImage(fields), Entrypoint: fields, @@ -86,6 +93,8 @@ func InferBaseImage(entrypoint []string) string { return "golang:1.23-alpine" case entrypoint[0] == "node": return "node:20-slim" + case entrypoint[0] == "forge": + return "debian:bookworm-slim" default: return "ubuntu:latest" } diff --git a/forge-core/compiler/template_data.go b/forge-core/compiler/template_data.go index 798dfe9..ba0dfee 100644 --- a/forge-core/compiler/template_data.go +++ b/forge-core/compiler/template_data.go @@ -28,6 +28,13 @@ type TemplateSpecData struct { RequiredEnvVars []string OptionalEnvVars []string RequiredBins []string + + // Framework + ForgeFramework bool + ForgeVersion string // forge CLI version for GitHub release download (e.g. "v0.9.0") + + // Multi-stage build + HasBinStage bool // true when a bins stage exists with installed binaries } // TemplateRuntimeData holds runtime-specific template data. @@ -113,5 +120,26 @@ func BuildTemplateDataFromContext(spec *agentspec.AgentSpec, bc *pipeline.BuildC d.RequiredBins = spec.Requirements.Bins } + // Set forge framework flag and version. + // Skip the remote download when a local binary override for "forge" is provided + // (--local-bin forge=... or forge.yaml bin_overrides.forge.local), since the + // local binary is already installed via the bins stage. + if bc.Config != nil && bc.Config.Framework == "forge" { + hasLocalForge := false + if override, ok := bc.Config.Package.BinOverrides["forge"]; ok && override.LocalPath != "" { + hasLocalForge = true + } + if !hasLocalForge { + d.ForgeFramework = true + v := bc.ForgeCLIVersion + if v == "" || v == "dev" { + v = "latest" + } else if v[0] != 'v' { + v = "v" + v + } + d.ForgeVersion = v + } + } + return d } diff --git a/forge-core/llm/providers/openai.go b/forge-core/llm/providers/openai.go index bf54f51..a7315fc 100644 --- a/forge-core/llm/providers/openai.go +++ b/forge-core/llm/providers/openai.go @@ -136,7 +136,7 @@ type streamOptions struct { type openaiMessage struct { Role string `json:"role"` - Content string `json:"content,omitempty"` + Content *string `json:"content,omitempty"` ToolCalls []llm.ToolCall `json:"tool_calls,omitempty"` ToolCallID string `json:"tool_call_id,omitempty"` Name string `json:"name,omitempty"` @@ -150,13 +150,21 @@ func (c *OpenAIClient) toOpenAIRequest(req *llm.ChatRequest, stream bool) openai msgs := make([]openaiMessage, len(req.Messages)) for i, m := range req.Messages { - msgs[i] = openaiMessage{ + msg := openaiMessage{ Role: m.Role, - Content: m.Content, ToolCalls: m.ToolCalls, ToolCallID: m.ToolCallID, Name: m.Name, } + // Assistant messages with tool_calls may omit content (valid per OpenAI spec). + // All other roles must always include content as a string. + if m.Role == "assistant" && len(m.ToolCalls) > 0 && m.Content == "" { + // Leave Content nil — omitempty will omit the field entirely. + } else { + content := m.Content + msg.Content = &content + } + msgs[i] = msg } r := openaiRequest{ diff --git a/forge-core/packaging/base_image_selector.go b/forge-core/packaging/base_image_selector.go new file mode 100644 index 0000000..1e80d87 --- /dev/null +++ b/forge-core/packaging/base_image_selector.go @@ -0,0 +1,39 @@ +package packaging + +// BaseImage holds the selected base image information. +type BaseImage struct { + Image string // e.g. "debian:bookworm-slim", "alpine:3.20" + IsAlpine bool +} + +// SelectBaseImage chooses the appropriate base image based on resolved binaries and config. +// Priority: cfg.BaseImage → alpine flag → RequiresUbuntu detection → default debian:bookworm-slim. +func SelectBaseImage(resolutions []BinResolution, baseImage string, alpine bool) BaseImage { + // 1. Explicit base image from config + if baseImage != "" { + return BaseImage{Image: baseImage, IsAlpine: alpine} + } + + // 2. Check if any binary requires Ubuntu (incompatible with Alpine) + requiresUbuntu := false + for _, r := range resolutions { + if r.RequiresUbuntu { + requiresUbuntu = true + break + } + } + + // 3. Alpine requested and possible + if alpine && !requiresUbuntu { + return BaseImage{Image: "alpine:3.20", IsAlpine: true} + } + + // 4. Alpine requested but blocked + if alpine && requiresUbuntu { + // Fall through to debian — caller should warn + return BaseImage{Image: "debian:bookworm-slim", IsAlpine: false} + } + + // 5. Default + return BaseImage{Image: "debian:bookworm-slim", IsAlpine: false} +} diff --git a/forge-core/packaging/base_image_selector_test.go b/forge-core/packaging/base_image_selector_test.go new file mode 100644 index 0000000..0689f5f --- /dev/null +++ b/forge-core/packaging/base_image_selector_test.go @@ -0,0 +1,54 @@ +package packaging + +import "testing" + +func TestSelectBaseImage_Default(t *testing.T) { + bi := SelectBaseImage(nil, "", false) + if bi.Image != "debian:bookworm-slim" { + t.Errorf("Image = %q, want debian:bookworm-slim", bi.Image) + } + if bi.IsAlpine { + t.Error("should not be alpine") + } +} + +func TestSelectBaseImage_ExplicitOverride(t *testing.T) { + bi := SelectBaseImage(nil, "ubuntu:24.04", false) + if bi.Image != "ubuntu:24.04" { + t.Errorf("Image = %q, want ubuntu:24.04", bi.Image) + } +} + +func TestSelectBaseImage_Alpine(t *testing.T) { + bi := SelectBaseImage(nil, "", true) + if bi.Image != "alpine:3.20" { + t.Errorf("Image = %q, want alpine:3.20", bi.Image) + } + if !bi.IsAlpine { + t.Error("should be alpine") + } +} + +func TestSelectBaseImage_AlpineBlockedByUbuntu(t *testing.T) { + resolutions := []BinResolution{ + {Name: "playwright", RequiresUbuntu: true}, + } + bi := SelectBaseImage(resolutions, "", true) + if bi.Image != "debian:bookworm-slim" { + t.Errorf("Image = %q, want debian:bookworm-slim (ubuntu required)", bi.Image) + } + if bi.IsAlpine { + t.Error("should not be alpine when ubuntu is required") + } +} + +func TestSelectBaseImage_NoUbuntuRequired(t *testing.T) { + resolutions := []BinResolution{ + {Name: "jq"}, + {Name: "curl"}, + } + bi := SelectBaseImage(resolutions, "", true) + if bi.Image != "alpine:3.20" { + t.Errorf("Image = %q, want alpine:3.20", bi.Image) + } +} diff --git a/forge-core/packaging/bin_classifier.go b/forge-core/packaging/bin_classifier.go new file mode 100644 index 0000000..a714dc2 --- /dev/null +++ b/forge-core/packaging/bin_classifier.go @@ -0,0 +1,366 @@ +package packaging + +import ( + "fmt" + + "github.com/initializ/forge/forge-core/types" + "github.com/initializ/forge/forge-skills/contract" + "github.com/initializ/forge/forge-skills/registry" +) + +// InstallMethod describes how a binary will be installed. +type InstallMethod string + +const ( + MethodApt InstallMethod = "apt" + MethodApk InstallMethod = "apk" + MethodDirectURL InstallMethod = "direct-url" + MethodCustomRun InstallMethod = "custom-run" + MethodImageCopy InstallMethod = "image-copy" + MethodLocalFile InstallMethod = "local-file" // local binary copied into build context + MethodSkip InstallMethod = "skip" // dependency already provided by another binary +) + +// BinResolution is the resolved install plan for a single binary. +type BinResolution struct { + Name string + Method InstallMethod + Package string // apt/apk package name + URL string // expanded URL for direct download + Dest string // install destination path + Chmod string // permission bits + RunLines []string // custom RUN commands + Image string // companion image for image-copy + LocalPath string // host file path for local-file method + Version string // resolved version + Optional bool + RequiresUbuntu bool + RequiresFirst []string // dependencies +} + +// BinClassifier resolves binary requirements into install plans. +type BinClassifier struct { + cfg types.PackageConfig + slim bool + alpine bool + reg *registry.ImageRegistry +} + +// NewBinClassifier creates a classifier with the given config. +func NewBinClassifier(cfg types.PackageConfig, slim, alpine bool) (*BinClassifier, error) { + reg, err := registry.Default() + if err != nil { + return nil, fmt.Errorf("loading image registry: %w", err) + } + return &BinClassifier{cfg: cfg, slim: slim, alpine: alpine, reg: reg}, nil +} + +// Classify resolves all bin requirements into install plans. +// Returns (resolutions, warnings, error). +func (c *BinClassifier) Classify(manifest *BinManifest) ([]BinResolution, []string, error) { + var resolutions []BinResolution + var warnings []string + seen := make(map[string]bool) + + for _, req := range manifest.Requirements { + if seen[req.Name] { + continue + } + seen[req.Name] = true + + res, w, err := c.resolveOne(req) + if err != nil { + return nil, nil, fmt.Errorf("resolving %q: %w", req.Name, err) + } + if w != "" { + warnings = append(warnings, w) + } + resolutions = append(resolutions, res) + } + + // Topological sort for dependency ordering + sorted, err := topoSort(resolutions) + if err != nil { + return nil, nil, err + } + + return sorted, warnings, nil +} + +// resolveOne resolves a single binary using the priority chain: +// 0. Local file override → 1. Skill-local override → 2. forge.yaml override → 3. Registry → 4. Best-effort apt +func (c *BinClassifier) resolveOne(req contract.BinRequirement) (BinResolution, string, error) { + res := BinResolution{ + Name: req.Name, + Optional: req.Optional, + Dest: req.Dest, + Chmod: req.Chmod, + } + + // 0. Local file override (highest priority) + if override, ok := c.cfg.BinOverrides[req.Name]; ok && override.LocalPath != "" { + return c.applyLocalOverride(res, override), "", nil + } + + // 1. Skill-local override (fields set directly on BinRequirement) + if req.DirectURL != "" || len(req.CustomLines) > 0 || req.AptPackage != "" || req.ApkPackage != "" { + return c.applySkillOverride(res, req) + } + + // 2. forge.yaml override + if override, ok := c.cfg.BinOverrides[req.Name]; ok { + return c.applyConfigOverride(res, override, req.Version) + } + + // 3. Registry lookup + if entry, ok := c.reg.Lookup(req.Name); ok { + return c.applyRegistry(res, entry, req.Version) + } + + // 4. Best-effort: assume apt package name matches binary name + warning := fmt.Sprintf("binary %q not found in registry; assuming apt package %q", req.Name, req.Name) + if c.alpine { + res.Method = MethodApk + res.Package = req.Name + } else { + res.Method = MethodApt + res.Package = req.Name + } + return res, warning, nil +} + +func (c *BinClassifier) applyLocalOverride(res BinResolution, override types.BinOverride) BinResolution { + res.Method = MethodLocalFile + res.LocalPath = override.LocalPath + if override.Dest != "" { + res.Dest = override.Dest + } else if res.Dest == "" { + res.Dest = "/usr/local/bin/" + res.Name + } + if override.Chmod != "" { + res.Chmod = override.Chmod + } else if res.Chmod == "" { + res.Chmod = "0755" + } + return res +} + +func (c *BinClassifier) applySkillOverride(res BinResolution, req contract.BinRequirement) (BinResolution, string, error) { + if len(req.CustomLines) > 0 { + res.Method = MethodCustomRun + res.RunLines = expandRunLines(req.CustomLines, req.Version) + return res, "", nil + } + if req.DirectURL != "" { + expanded, err := registry.ExpandTemplate(req.DirectURL, req.Version) + if err != nil { + return res, "", err + } + res.Method = MethodDirectURL + res.URL = expanded + if res.Dest == "" { + res.Dest = "/usr/local/bin/" + req.Name + } + if res.Chmod == "" { + res.Chmod = "0755" + } + return res, "", nil + } + if c.alpine && req.ApkPackage != "" { + res.Method = MethodApk + res.Package = req.ApkPackage + return res, "", nil + } + if req.AptPackage != "" { + res.Method = MethodApt + res.Package = req.AptPackage + return res, "", nil + } + // Fallback for alpine when only apt specified + if req.AptPackage != "" { + res.Method = MethodApt + res.Package = req.AptPackage + } + return res, "", nil +} + +func (c *BinClassifier) applyConfigOverride(res BinResolution, override types.BinOverride, version string) (BinResolution, string, error) { + if len(override.CustomLines) > 0 { + res.Method = MethodCustomRun + res.RunLines = expandRunLines(override.CustomLines, version) + return res, "", nil + } + if override.DirectURL != "" { + expanded, err := registry.ExpandTemplate(override.DirectURL, version) + if err != nil { + return res, "", err + } + res.Method = MethodDirectURL + res.URL = expanded + if override.Dest != "" { + res.Dest = override.Dest + } else if res.Dest == "" { + res.Dest = "/usr/local/bin/" + res.Name + } + if override.Chmod != "" { + res.Chmod = override.Chmod + } else if res.Chmod == "" { + res.Chmod = "0755" + } + return res, "", nil + } + if c.alpine && override.ApkPackage != "" { + res.Method = MethodApk + res.Package = override.ApkPackage + return res, "", nil + } + if override.AptPackage != "" { + res.Method = MethodApt + res.Package = override.AptPackage + return res, "", nil + } + return res, "", nil +} + +func (c *BinClassifier) applyRegistry(res BinResolution, entry registry.RegistryEntry, reqVersion string) (BinResolution, string, error) { + version := entry.ResolveVersion(reqVersion) + res.Version = version + res.RequiresUbuntu = entry.RequiresUbuntu + res.RequiresFirst = entry.RequiresFirst + + // Heavy binaries use companion image + if entry.Heavy && entry.Image != "" { + expanded, err := registry.ExpandTemplate(entry.Image, version) + if err != nil { + return res, "", err + } + res.Method = MethodImageCopy + res.Image = expanded + return res, "", nil + } + + // Custom run lines + if len(entry.Run) > 0 { + res.Method = MethodCustomRun + res.RunLines = expandRunLines(entry.Run, version) + return res, "", nil + } + + // Direct URL + if entry.URL != "" && !c.alpine { + expanded, err := registry.ExpandTemplate(entry.URL, version) + if err != nil { + return res, "", err + } + res.Method = MethodDirectURL + res.URL = expanded + if entry.Dest != "" { + res.Dest = entry.Dest + } else if res.Dest == "" { + res.Dest = "/usr/local/bin/" + res.Name + } + if entry.Chmod != "" { + res.Chmod = entry.Chmod + } else if res.Chmod == "" { + res.Chmod = "0755" + } + return res, "", nil + } + + // Package manager + if c.alpine { + if entry.Apk != "" { + res.Method = MethodApk + res.Package = entry.Apk + } else if entry.Apt != "" { + // Alpine but only apt available — warn + res.Method = MethodApt + res.Package = entry.Apt + return res, fmt.Sprintf("binary %q has no apk package; falling back to apt (may fail on Alpine)", res.Name), nil + } else { + res.Method = MethodApt + res.Package = res.Name + return res, fmt.Sprintf("binary %q has no package info; assuming apt package %q", res.Name, res.Name), nil + } + } else { + if entry.Apt != "" { + res.Method = MethodApt + res.Package = entry.Apt + } else { + res.Method = MethodApt + res.Package = res.Name + } + } + + return res, "", nil +} + +func expandRunLines(lines []string, version string) []string { + if version == "" { + return lines + } + expanded := make([]string, len(lines)) + for i, line := range lines { + result, err := registry.ExpandTemplate(line, version) + if err != nil { + expanded[i] = line // keep original on error + } else { + expanded[i] = result + } + } + return expanded +} + +// topoSort performs topological sort using Kahn's algorithm based on RequiresFirst. +func topoSort(resolutions []BinResolution) ([]BinResolution, error) { + if len(resolutions) <= 1 { + return resolutions, nil + } + + byName := make(map[string]int) + for i, r := range resolutions { + byName[r.Name] = i + } + + // Build in-degree counts and adjacency + inDegree := make(map[int]int) + dependents := make(map[int][]int) // dep index → list of dependent indices + + for i, r := range resolutions { + inDegree[i] = 0 + for _, dep := range r.RequiresFirst { + if depIdx, ok := byName[dep]; ok { + dependents[depIdx] = append(dependents[depIdx], i) + inDegree[i]++ + } + } + } + + // Find all nodes with no incoming edges + var queue []int + for i := range resolutions { + if inDegree[i] == 0 { + queue = append(queue, i) + } + } + + var sorted []BinResolution + for len(queue) > 0 { + idx := queue[0] + queue = queue[1:] + sorted = append(sorted, resolutions[idx]) + + for _, depIdx := range dependents[idx] { + inDegree[depIdx]-- + if inDegree[depIdx] == 0 { + queue = append(queue, depIdx) + } + } + } + + if len(sorted) != len(resolutions) { + return nil, fmt.Errorf("circular dependency detected in bin requirements") + } + + return sorted, nil +} diff --git a/forge-core/packaging/bin_classifier_test.go b/forge-core/packaging/bin_classifier_test.go new file mode 100644 index 0000000..571cd16 --- /dev/null +++ b/forge-core/packaging/bin_classifier_test.go @@ -0,0 +1,328 @@ +package packaging + +import ( + "testing" + + "github.com/initializ/forge/forge-core/types" + "github.com/initializ/forge/forge-skills/contract" +) + +func TestClassify_SimpleApt(t *testing.T) { + c, err := NewBinClassifier(types.PackageConfig{}, false, false) + if err != nil { + t.Fatalf("NewBinClassifier: %v", err) + } + + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "jq"}, + {Name: "curl"}, + }, + } + + resolutions, warnings, err := c.Classify(manifest) + if err != nil { + t.Fatalf("Classify: %v", err) + } + + if len(resolutions) != 2 { + t.Fatalf("expected 2 resolutions, got %d", len(resolutions)) + } + + for _, r := range resolutions { + if r.Method != MethodApt { + t.Errorf("%s: method = %v, want apt", r.Name, r.Method) + } + } + _ = warnings +} + +func TestClassify_Alpine(t *testing.T) { + c, err := NewBinClassifier(types.PackageConfig{}, false, true) + if err != nil { + t.Fatalf("NewBinClassifier: %v", err) + } + + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "jq"}, + }, + } + + resolutions, _, err := c.Classify(manifest) + if err != nil { + t.Fatalf("Classify: %v", err) + } + + if len(resolutions) != 1 { + t.Fatalf("expected 1 resolution, got %d", len(resolutions)) + } + if resolutions[0].Method != MethodApk { + t.Errorf("method = %v, want apk", resolutions[0].Method) + } + if resolutions[0].Package != "jq" { + t.Errorf("package = %q, want jq", resolutions[0].Package) + } +} + +func TestClassify_RegistryURL(t *testing.T) { + c, err := NewBinClassifier(types.PackageConfig{}, false, false) + if err != nil { + t.Fatalf("NewBinClassifier: %v", err) + } + + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "kubectl"}, + }, + } + + resolutions, _, err := c.Classify(manifest) + if err != nil { + t.Fatalf("Classify: %v", err) + } + + if len(resolutions) != 1 { + t.Fatalf("expected 1 resolution, got %d", len(resolutions)) + } + // kubectl has a URL in registry and custom run is not set, so should use direct-url + r := resolutions[0] + if r.Method != MethodDirectURL { + t.Errorf("method = %v, want direct-url", r.Method) + } + if r.URL == "" { + t.Error("URL should not be empty for kubectl") + } +} + +func TestClassify_SkillOverride(t *testing.T) { + c, err := NewBinClassifier(types.PackageConfig{}, false, false) + if err != nil { + t.Fatalf("NewBinClassifier: %v", err) + } + + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "custom-bin", AptPackage: "custom-pkg"}, + }, + } + + resolutions, _, err := c.Classify(manifest) + if err != nil { + t.Fatalf("Classify: %v", err) + } + + if resolutions[0].Method != MethodApt { + t.Errorf("method = %v, want apt", resolutions[0].Method) + } + if resolutions[0].Package != "custom-pkg" { + t.Errorf("package = %q, want custom-pkg", resolutions[0].Package) + } +} + +func TestClassify_ConfigOverride(t *testing.T) { + cfg := types.PackageConfig{ + BinOverrides: map[string]types.BinOverride{ + "jq": {AptPackage: "jq-special"}, + }, + } + c, err := NewBinClassifier(cfg, false, false) + if err != nil { + t.Fatalf("NewBinClassifier: %v", err) + } + + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "jq"}, + }, + } + + resolutions, _, err := c.Classify(manifest) + if err != nil { + t.Fatalf("Classify: %v", err) + } + + if resolutions[0].Package != "jq-special" { + t.Errorf("package = %q, want jq-special (config override)", resolutions[0].Package) + } +} + +func TestClassify_LocalFile(t *testing.T) { + cfg := types.PackageConfig{ + BinOverrides: map[string]types.BinOverride{ + "forge": {LocalPath: "/usr/local/bin/forge"}, + }, + } + c, err := NewBinClassifier(cfg, false, false) + if err != nil { + t.Fatalf("NewBinClassifier: %v", err) + } + + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "forge"}, + }, + } + + resolutions, _, err := c.Classify(manifest) + if err != nil { + t.Fatalf("Classify: %v", err) + } + + if len(resolutions) != 1 { + t.Fatalf("expected 1 resolution, got %d", len(resolutions)) + } + r := resolutions[0] + if r.Method != MethodLocalFile { + t.Errorf("method = %v, want local-file", r.Method) + } + if r.LocalPath != "/usr/local/bin/forge" { + t.Errorf("local path = %q, want /usr/local/bin/forge", r.LocalPath) + } + if r.Dest != "/usr/local/bin/forge" { + t.Errorf("dest = %q, want /usr/local/bin/forge", r.Dest) + } + if r.Chmod != "0755" { + t.Errorf("chmod = %q, want 0755", r.Chmod) + } +} + +func TestClassify_LocalFileOverridesSkill(t *testing.T) { + cfg := types.PackageConfig{ + BinOverrides: map[string]types.BinOverride{ + "mybin": {LocalPath: "/tmp/mybin", Dest: "/opt/bin/mybin"}, + }, + } + c, err := NewBinClassifier(cfg, false, false) + if err != nil { + t.Fatalf("NewBinClassifier: %v", err) + } + + // Even though skill sets AptPackage, local file override should win + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "mybin", AptPackage: "mybin-pkg"}, + }, + } + + resolutions, _, err := c.Classify(manifest) + if err != nil { + t.Fatalf("Classify: %v", err) + } + + if resolutions[0].Method != MethodLocalFile { + t.Errorf("method = %v, want local-file (should override skill)", resolutions[0].Method) + } + if resolutions[0].Dest != "/opt/bin/mybin" { + t.Errorf("dest = %q, want /opt/bin/mybin", resolutions[0].Dest) + } +} + +func TestClassify_Unknown(t *testing.T) { + c, err := NewBinClassifier(types.PackageConfig{}, false, false) + if err != nil { + t.Fatalf("NewBinClassifier: %v", err) + } + + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "unknown-thing-xyz"}, + }, + } + + resolutions, warnings, err := c.Classify(manifest) + if err != nil { + t.Fatalf("Classify: %v", err) + } + + if len(warnings) == 0 { + t.Error("expected warning for unknown binary") + } + if resolutions[0].Method != MethodApt { + t.Errorf("method = %v, want apt (best-effort)", resolutions[0].Method) + } +} + +func TestClassify_Dedup(t *testing.T) { + c, err := NewBinClassifier(types.PackageConfig{}, false, false) + if err != nil { + t.Fatalf("NewBinClassifier: %v", err) + } + + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "jq"}, + {Name: "jq"}, + {Name: "curl"}, + }, + } + + resolutions, _, err := c.Classify(manifest) + if err != nil { + t.Fatalf("Classify: %v", err) + } + + if len(resolutions) != 2 { + t.Errorf("expected 2 resolutions (deduped), got %d", len(resolutions)) + } +} + +func TestClassify_Heavy(t *testing.T) { + c, err := NewBinClassifier(types.PackageConfig{}, false, false) + if err != nil { + t.Fatalf("NewBinClassifier: %v", err) + } + + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "playwright"}, + }, + } + + resolutions, _, err := c.Classify(manifest) + if err != nil { + t.Fatalf("Classify: %v", err) + } + + if resolutions[0].Method != MethodImageCopy { + t.Errorf("method = %v, want image-copy", resolutions[0].Method) + } + if resolutions[0].Image == "" { + t.Error("image should not be empty for heavy binary") + } +} + +func TestTopoSort_Basic(t *testing.T) { + resolutions := []BinResolution{ + {Name: "aws", RequiresFirst: []string{"unzip"}}, + {Name: "unzip"}, + } + + sorted, err := topoSort(resolutions) + if err != nil { + t.Fatalf("topoSort: %v", err) + } + + if sorted[0].Name != "unzip" { + t.Errorf("expected unzip first, got %s", sorted[0].Name) + } + if sorted[1].Name != "aws" { + t.Errorf("expected aws second, got %s", sorted[1].Name) + } +} + +func TestTopoSort_NoDeps(t *testing.T) { + resolutions := []BinResolution{ + {Name: "jq"}, + {Name: "curl"}, + } + + sorted, err := topoSort(resolutions) + if err != nil { + t.Fatalf("topoSort: %v", err) + } + + if len(sorted) != 2 { + t.Errorf("expected 2 items, got %d", len(sorted)) + } +} diff --git a/forge-core/packaging/bin_manifest.go b/forge-core/packaging/bin_manifest.go new file mode 100644 index 0000000..4497332 --- /dev/null +++ b/forge-core/packaging/bin_manifest.go @@ -0,0 +1,9 @@ +package packaging + +import "github.com/initializ/forge/forge-skills/contract" + +// BinManifest aggregates all binary requirements from skills. +type BinManifest struct { + Requirements []contract.BinRequirement + SkillOrigin map[string]string // bin name → skill that declared it +} diff --git a/forge-core/packaging/doc.go b/forge-core/packaging/doc.go new file mode 100644 index 0000000..b6205b6 --- /dev/null +++ b/forge-core/packaging/doc.go @@ -0,0 +1,3 @@ +// Package packaging provides bin classification, base image selection, +// and Dockerfile generation for container packaging of Forge agents. +package packaging diff --git a/forge-core/packaging/dockerfile_generator.go b/forge-core/packaging/dockerfile_generator.go new file mode 100644 index 0000000..9de8c13 --- /dev/null +++ b/forge-core/packaging/dockerfile_generator.go @@ -0,0 +1,175 @@ +package packaging + +import ( + "fmt" + "strings" + + "github.com/initializ/forge/forge-core/types" +) + +// GenerateDockerfile produces a Dockerfile with actual install commands for all resolved binaries. +// Returns (dockerfile content, warnings, error). +func GenerateDockerfile(manifest *BinManifest, cfg types.PackageConfig, alpine, slim bool) (string, []string, error) { + if manifest == nil || len(manifest.Requirements) == 0 { + return "", nil, nil + } + + classifier, err := NewBinClassifier(cfg, slim, alpine) + if err != nil { + return "", nil, err + } + + resolutions, warnings, err := classifier.Classify(manifest) + if err != nil { + return "", nil, fmt.Errorf("classifying binaries: %w", err) + } + + baseImg := SelectBaseImage(resolutions, cfg.BaseImage, alpine) + + // Check if alpine was requested but blocked + if alpine && !baseImg.IsAlpine { + warnings = append(warnings, "Alpine base image requested but a binary requires Ubuntu; using debian:bookworm-slim") + } + + var b strings.Builder + + // Companion stages for heavy (image-copy) binaries + for _, r := range resolutions { + if r.Method == MethodImageCopy && r.Image != "" { + fmt.Fprintf(&b, "FROM %s AS bin-%s\n\n", r.Image, r.Name) + } + } + + // Base stage + fmt.Fprintf(&b, "FROM %s AS bins\n", baseImg.Image) + + // Batch apt/apk packages + var aptPkgs, apkPkgs []string + hasDirectURL := false + for _, r := range resolutions { + switch r.Method { + case MethodApt: + aptPkgs = append(aptPkgs, r.Package) + case MethodApk: + apkPkgs = append(apkPkgs, r.Package) + case MethodDirectURL: + hasDirectURL = true + } + } + + // Ensure curl and ca-certificates are available for direct URL downloads + if hasDirectURL && !alpine { + hasCurl := false + for _, p := range aptPkgs { + if p == "curl" { + hasCurl = true + break + } + } + if !hasCurl { + aptPkgs = append([]string{"curl", "ca-certificates"}, aptPkgs...) + } + } + if hasDirectURL && alpine { + hasCurl := false + for _, p := range apkPkgs { + if p == "curl" { + hasCurl = true + break + } + } + if !hasCurl { + apkPkgs = append([]string{"curl", "ca-certificates"}, apkPkgs...) + } + } + + if len(aptPkgs) > 0 { + fmt.Fprintf(&b, "RUN apt-get update && apt-get install -y --no-install-recommends \\\n") + for i, pkg := range aptPkgs { + if i < len(aptPkgs)-1 { + fmt.Fprintf(&b, " %s \\\n", pkg) + } else { + fmt.Fprintf(&b, " %s \\\n", pkg) + } + } + fmt.Fprintf(&b, " && rm -rf /var/lib/apt/lists/*\n") + } + + if len(apkPkgs) > 0 { + fmt.Fprintf(&b, "RUN apk add --no-cache \\\n") + for i, pkg := range apkPkgs { + if i < len(apkPkgs)-1 { + fmt.Fprintf(&b, " %s \\\n", pkg) + } else { + fmt.Fprintf(&b, " %s\n", pkg) + } + } + } + + // Direct URL downloads + for _, r := range resolutions { + if r.Method == MethodDirectURL { + dest := r.Dest + if dest == "" { + dest = "/usr/local/bin/" + r.Name + } + chmod := r.Chmod + if chmod == "" { + chmod = "0755" + } + fmt.Fprintf(&b, "RUN curl -fsSL %q -o %s && chmod %s %s\n", r.URL, dest, chmod, dest) + } + } + + // Custom RUN lines + for _, r := range resolutions { + if r.Method == MethodCustomRun { + for _, line := range r.RunLines { + fmt.Fprintf(&b, "RUN %s\n", line) + } + } + } + + // Image-copy COPY instructions + for _, r := range resolutions { + if r.Method == MethodImageCopy { + dest := r.Dest + if dest == "" { + dest = "/usr/local/bin/" + r.Name + } + fmt.Fprintf(&b, "COPY --from=bin-%s %s %s\n", r.Name, dest, dest) + } + } + + // Local file COPY instructions + for _, r := range resolutions { + if r.Method == MethodLocalFile { + dest := r.Dest + if dest == "" { + dest = "/usr/local/bin/" + r.Name + } + chmod := r.Chmod + if chmod == "" { + chmod = "0755" + } + fmt.Fprintf(&b, "COPY .local-bins/%s %s\n", r.Name, dest) + fmt.Fprintf(&b, "RUN chmod %s %s\n", chmod, dest) + } + } + + // PATH extensions + var pathExts []string + for _, r := range resolutions { + if r.Dest != "" && r.Dest != "/usr/local/bin/"+r.Name { + dir := r.Dest[:strings.LastIndex(r.Dest, "/")] + if dir != "" && dir != "/usr/local/bin" && dir != "/usr/bin" { + pathExts = append(pathExts, dir) + } + } + } + if len(pathExts) > 0 { + fmt.Fprintf(&b, "ENV PATH=\"%s:$PATH\"\n", strings.Join(pathExts, ":")) + } + + return b.String(), warnings, nil +} diff --git a/forge-core/packaging/dockerfile_generator_test.go b/forge-core/packaging/dockerfile_generator_test.go new file mode 100644 index 0000000..4ba34e0 --- /dev/null +++ b/forge-core/packaging/dockerfile_generator_test.go @@ -0,0 +1,178 @@ +package packaging + +import ( + "strings" + "testing" + + "github.com/initializ/forge/forge-core/types" + "github.com/initializ/forge/forge-skills/contract" +) + +func TestGenerateDockerfile_NoBins(t *testing.T) { + content, warnings, err := GenerateDockerfile(nil, types.PackageConfig{}, false, false) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if content != "" { + t.Errorf("expected empty content, got %q", content) + } + if len(warnings) != 0 { + t.Errorf("expected no warnings, got %v", warnings) + } +} + +func TestGenerateDockerfile_AptBatch(t *testing.T) { + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "jq"}, + {Name: "curl"}, + }, + } + + content, _, err := GenerateDockerfile(manifest, types.PackageConfig{}, false, false) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !strings.Contains(content, "FROM debian:bookworm-slim") { + t.Error("expected debian:bookworm-slim base image") + } + if !strings.Contains(content, "apt-get install") { + t.Error("expected apt-get install") + } + if !strings.Contains(content, "jq") { + t.Error("expected jq package") + } + if !strings.Contains(content, "curl") { + t.Error("expected curl package") + } +} + +func TestGenerateDockerfile_Alpine(t *testing.T) { + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "jq"}, + }, + } + + content, _, err := GenerateDockerfile(manifest, types.PackageConfig{}, true, false) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !strings.Contains(content, "FROM alpine:3.20") { + t.Error("expected alpine:3.20 base image") + } + if !strings.Contains(content, "apk add") { + t.Error("expected apk add") + } +} + +func TestGenerateDockerfile_DirectURL(t *testing.T) { + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "kubectl"}, + }, + } + + content, _, err := GenerateDockerfile(manifest, types.PackageConfig{}, false, false) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !strings.Contains(content, "curl -fsSL") { + t.Error("expected curl download command for kubectl") + } +} + +func TestGenerateDockerfile_CustomBaseImage(t *testing.T) { + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "jq"}, + }, + } + + cfg := types.PackageConfig{BaseImage: "ubuntu:24.04"} + content, _, err := GenerateDockerfile(manifest, cfg, false, false) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !strings.Contains(content, "FROM ubuntu:24.04") { + t.Error("expected custom base image ubuntu:24.04") + } +} + +func TestGenerateDockerfile_Heavy(t *testing.T) { + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "playwright"}, + }, + } + + content, _, err := GenerateDockerfile(manifest, types.PackageConfig{}, false, false) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !strings.Contains(content, "FROM mcr.microsoft.com/playwright") { + t.Error("expected playwright companion image stage") + } + if !strings.Contains(content, "COPY --from=bin-playwright") { + t.Error("expected COPY from companion stage") + } +} + +func TestGenerateDockerfile_LocalFile(t *testing.T) { + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "forge"}, + }, + } + + cfg := types.PackageConfig{ + BinOverrides: map[string]types.BinOverride{ + "forge": {LocalPath: "/usr/local/bin/forge"}, + }, + } + + content, _, err := GenerateDockerfile(manifest, cfg, false, false) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !strings.Contains(content, "COPY .local-bins/forge /usr/local/bin/forge") { + t.Errorf("expected COPY .local-bins/forge instruction, got:\n%s", content) + } + if !strings.Contains(content, "RUN chmod 0755 /usr/local/bin/forge") { + t.Errorf("expected chmod instruction, got:\n%s", content) + } +} + +func TestGenerateDockerfile_AlpineBlockedByUbuntu(t *testing.T) { + manifest := &BinManifest{ + Requirements: []contract.BinRequirement{ + {Name: "playwright"}, + {Name: "jq"}, + }, + } + + content, warnings, err := GenerateDockerfile(manifest, types.PackageConfig{}, true, false) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !strings.Contains(content, "FROM debian:bookworm-slim") { + t.Error("expected fallback to debian when alpine blocked") + } + + hasAlpineWarning := false + for _, w := range warnings { + if strings.Contains(w, "Alpine") { + hasAlpineWarning = true + } + } + if !hasAlpineWarning { + t.Error("expected warning about alpine being blocked") + } +} diff --git a/forge-core/pipeline/context.go b/forge-core/pipeline/context.go index c855837..4fe4682 100644 --- a/forge-core/pipeline/context.go +++ b/forge-core/pipeline/context.go @@ -26,6 +26,18 @@ type BuildContext struct { SecurityAudit any // *analyzer.AuditReport (avoid import cycle) SkillsCount int ToolCategoryCounts map[string]int + + // Bin resolution for smart Dockerfile generation + BinManifest any // *packaging.BinManifest (avoid import cycle) + PreferAlpine bool + PreferSlim bool + + // ForgeCLIVersion is the version of the forge CLI binary (e.g. "v0.9.0"). + // Used to pull the correct release when framework is "forge". + ForgeCLIVersion string + + // LocalBins maps binary name → host file path (from --local-bin flags). + LocalBins map[string]string } // NewBuildContext creates a BuildContext with the given options and initialized maps. diff --git a/forge-core/types/config.go b/forge-core/types/config.go index 3593bea..77330a3 100644 --- a/forge-core/types/config.go +++ b/forge-core/types/config.go @@ -24,6 +24,7 @@ type ForgeConfig struct { Secrets SecretsConfig `yaml:"secrets,omitempty"` Schedules []ScheduleConfig `yaml:"schedules,omitempty"` CORSOrigins []string `yaml:"cors_origins,omitempty"` + Package PackageConfig `yaml:"package,omitempty"` } // ScheduleConfig defines a recurring scheduled task in forge.yaml. @@ -97,6 +98,25 @@ type ToolRef struct { Config map[string]any `yaml:"config,omitempty"` } +// PackageConfig controls container packaging behavior. +type PackageConfig struct { + BaseImage string `yaml:"base_image,omitempty"` + Alpine bool `yaml:"alpine,omitempty"` + Slim bool `yaml:"slim,omitempty"` + BinOverrides map[string]BinOverride `yaml:"bin_overrides,omitempty"` +} + +// BinOverride provides explicit install instructions for a binary in the container. +type BinOverride struct { + AptPackage string `yaml:"apt,omitempty"` + ApkPackage string `yaml:"apk,omitempty"` + DirectURL string `yaml:"url,omitempty"` + Dest string `yaml:"dest,omitempty"` + Chmod string `yaml:"chmod,omitempty"` + CustomLines []string `yaml:"run,omitempty"` + LocalPath string `yaml:"local,omitempty"` // host path to local binary file +} + // ParseForgeConfig parses raw YAML bytes into a ForgeConfig and validates required fields. func ParseForgeConfig(data []byte) (*ForgeConfig, error) { var cfg ForgeConfig diff --git a/forge-skills/analyzer/policy.go b/forge-skills/analyzer/policy.go index 5585f48..333f588 100644 --- a/forge-skills/analyzer/policy.go +++ b/forge-skills/analyzer/policy.go @@ -128,7 +128,9 @@ func entryToDescriptor(entry *contract.SkillEntry) *contract.SkillDescriptor { sd.Tags = entry.Metadata.Tags } if entry.ForgeReqs != nil { - sd.RequiredBins = entry.ForgeReqs.Bins + for _, b := range entry.ForgeReqs.Bins { + sd.RequiredBins = append(sd.RequiredBins, b.Name) + } if entry.ForgeReqs.Env != nil { sd.RequiredEnv = entry.ForgeReqs.Env.Required sd.OneOfEnv = entry.ForgeReqs.Env.OneOf diff --git a/forge-skills/analyzer/report.go b/forge-skills/analyzer/report.go index 1ae8843..450ae72 100644 --- a/forge-skills/analyzer/report.go +++ b/forge-skills/analyzer/report.go @@ -23,8 +23,12 @@ func GenerateReport(registry contract.SkillRegistry, policy SecurityPolicy) (*Au Description: sd.Description, } if len(sd.RequiredBins) > 0 || len(sd.RequiredEnv) > 0 || len(sd.OneOfEnv) > 0 || len(sd.OptionalEnv) > 0 || len(sd.EgressDomains) > 0 { + var binReqs []contract.BinRequirement + for _, name := range sd.RequiredBins { + binReqs = append(binReqs, contract.BinRequirement{Name: name}) + } entry.ForgeReqs = &contract.SkillRequirements{ - Bins: sd.RequiredBins, + Bins: binReqs, } if len(sd.RequiredEnv) > 0 || len(sd.OneOfEnv) > 0 || len(sd.OptionalEnv) > 0 { entry.ForgeReqs.Env = &contract.EnvRequirements{ diff --git a/forge-skills/analyzer/report_test.go b/forge-skills/analyzer/report_test.go index 0e39ca4..d9a04ea 100644 --- a/forge-skills/analyzer/report_test.go +++ b/forge-skills/analyzer/report_test.go @@ -13,7 +13,7 @@ func TestGenerateReportFromEntries(t *testing.T) { { Name: "github", ForgeReqs: &contract.SkillRequirements{ - Bins: []string{"gh"}, + Bins: []contract.BinRequirement{{Name: "gh"}}, Env: &contract.EnvRequirements{Required: []string{"GH_TOKEN"}}, }, Metadata: &contract.SkillMetadata{ @@ -48,7 +48,7 @@ func TestGenerateReportFromEntries_PolicyFail(t *testing.T) { { Name: "hacker", ForgeReqs: &contract.SkillRequirements{ - Bins: []string{"nc"}, + Bins: []contract.BinRequirement{{Name: "nc"}}, }, }, } @@ -88,7 +88,7 @@ func TestFormatText(t *testing.T) { { Name: "github", ForgeReqs: &contract.SkillRequirements{ - Bins: []string{"gh"}, + Bins: []contract.BinRequirement{{Name: "gh"}}, Env: &contract.EnvRequirements{Required: []string{"GH_TOKEN"}}, }, Metadata: &contract.SkillMetadata{ @@ -139,7 +139,7 @@ func TestAggregateScore_Average(t *testing.T) { entries := []contract.SkillEntry{ {Name: "a"}, {Name: "b", ForgeReqs: &contract.SkillRequirements{ - Bins: []string{"bash"}, // 15 points + Bins: []contract.BinRequirement{{Name: "bash"}}, // 15 points }}, } diff --git a/forge-skills/analyzer/scoring.go b/forge-skills/analyzer/scoring.go index 34fd9bd..8ea0462 100644 --- a/forge-skills/analyzer/scoring.go +++ b/forge-skills/analyzer/scoring.go @@ -74,7 +74,9 @@ func AnalyzeSkillEntry(entry *contract.SkillEntry, hasScript bool) SkillRiskAsse var reqEnv, oneOfEnv, optEnv []string if entry.ForgeReqs != nil { - bins = entry.ForgeReqs.Bins + for _, b := range entry.ForgeReqs.Bins { + bins = append(bins, b.Name) + } if entry.ForgeReqs.Env != nil { reqEnv = entry.ForgeReqs.Env.Required oneOfEnv = entry.ForgeReqs.Env.OneOf diff --git a/forge-skills/contract/types.go b/forge-skills/contract/types.go index 26bd603..f34a1ad 100644 --- a/forge-skills/contract/types.go +++ b/forge-skills/contract/types.go @@ -1,5 +1,11 @@ package contract +import ( + "fmt" + + "gopkg.in/yaml.v3" +) + // SkillDescriptor describes a skill available in a registry. type SkillDescriptor struct { Name string @@ -70,9 +76,49 @@ type SkillOutputFilter struct { Action string `yaml:"action" json:"action"` // "block" or "redact" } +// BinRequirement describes a binary dependency with optional install metadata. +// It supports both scalar YAML ("jq") and mapping YAML ({name: jq, version: "1.6"}). +type BinRequirement struct { + Name string `yaml:"name" json:"name"` + Version string `yaml:"version,omitempty" json:"version,omitempty"` + Optional bool `yaml:"optional,omitempty" json:"optional,omitempty"` + AptPackage string `yaml:"apt,omitempty" json:"apt,omitempty"` + ApkPackage string `yaml:"apk,omitempty" json:"apk,omitempty"` + DirectURL string `yaml:"url,omitempty" json:"url,omitempty"` + Dest string `yaml:"dest,omitempty" json:"dest,omitempty"` + Chmod string `yaml:"chmod,omitempty" json:"chmod,omitempty"` + CustomLines []string `yaml:"run,omitempty" json:"run,omitempty"` +} + +// UnmarshalYAML handles both scalar ("jq") and mapping ({name: jq, ...}) YAML nodes. +func (b *BinRequirement) UnmarshalYAML(value *yaml.Node) error { + switch value.Kind { + case yaml.ScalarNode: + b.Name = value.Value + if b.Name == "" { + return fmt.Errorf("bin requirement: name cannot be empty") + } + return nil + case yaml.MappingNode: + // Decode into an alias to avoid infinite recursion. + type binReqAlias BinRequirement + var alias binReqAlias + if err := value.Decode(&alias); err != nil { + return fmt.Errorf("bin requirement: %w", err) + } + if alias.Name == "" { + return fmt.Errorf("bin requirement: name is required in mapping form") + } + *b = BinRequirement(alias) + return nil + default: + return fmt.Errorf("bin requirement: expected string or mapping, got %v", value.Kind) + } +} + // SkillRequirements declares CLI binaries and environment variables a skill needs. type SkillRequirements struct { - Bins []string `yaml:"bins,omitempty" json:"bins,omitempty"` + Bins []BinRequirement `yaml:"bins,omitempty" json:"bins,omitempty"` Env *EnvRequirements `yaml:"env,omitempty" json:"env,omitempty"` } @@ -111,7 +157,8 @@ type SkillFilter struct { // AggregatedRequirements is the union of all skill requirements. type AggregatedRequirements struct { - Bins []string // union of all bins, deduplicated, sorted + Bins []string // union of all bin names, deduplicated, sorted + BinRequirements []BinRequirement // rich requirements, deduplicated by name (richer entry wins) EnvRequired []string // union of required vars (promoted from optional if needed) EnvOneOf [][]string // separate groups per skill (not merged across skills) EnvOptional []string // union of optional vars minus those promoted to required diff --git a/forge-skills/local/scanner.go b/forge-skills/local/scanner.go index 961e53c..07dd6d5 100644 --- a/forge-skills/local/scanner.go +++ b/forge-skills/local/scanner.go @@ -187,12 +187,17 @@ func extractFromForgeMap(forgeMap map[string]any) (bins, reqEnv, oneOfEnv, optEn return } - // bins + // bins — handle both string items and map items (rich BinRequirement) if binsRaw, ok := reqMap["bins"]; ok { if arr, ok := binsRaw.([]any); ok { for _, v := range arr { - if s, ok := v.(string); ok { - bins = append(bins, s) + switch item := v.(type) { + case string: + bins = append(bins, item) + case map[string]any: + if name, ok := item["name"].(string); ok && name != "" { + bins = append(bins, name) + } } } } diff --git a/forge-skills/parser/parser_test.go b/forge-skills/parser/parser_test.go index e05ee78..ecd6eaa 100644 --- a/forge-skills/parser/parser_test.go +++ b/forge-skills/parser/parser_test.go @@ -4,6 +4,8 @@ import ( "reflect" "strings" "testing" + + "github.com/initializ/forge/forge-skills/contract" ) func TestParse_HeadingFormat(t *testing.T) { @@ -290,8 +292,9 @@ Create a GitHub issue. if entries[0].ForgeReqs == nil { t.Fatal("expected non-nil ForgeReqs") } - if !reflect.DeepEqual(entries[0].ForgeReqs.Bins, []string{"curl", "jq"}) { - t.Errorf("Bins = %v, want [curl jq]", entries[0].ForgeReqs.Bins) + wantBins := []contract.BinRequirement{{Name: "curl"}, {Name: "jq"}} + if !reflect.DeepEqual(entries[0].ForgeReqs.Bins, wantBins) { + t.Errorf("Bins = %v, want %v", entries[0].ForgeReqs.Bins, wantBins) } if entries[0].ForgeReqs.Env == nil { t.Fatal("expected non-nil Env") @@ -338,8 +341,9 @@ Does things. if entries[0].ForgeReqs == nil { t.Fatal("expected non-nil ForgeReqs") } - if !reflect.DeepEqual(entries[0].ForgeReqs.Bins, []string{"python"}) { - t.Errorf("Bins = %v, want [python]", entries[0].ForgeReqs.Bins) + wantPythonBins := []contract.BinRequirement{{Name: "python"}} + if !reflect.DeepEqual(entries[0].ForgeReqs.Bins, wantPythonBins) { + t.Errorf("Bins = %v, want %v", entries[0].ForgeReqs.Bins, wantPythonBins) } } @@ -422,8 +426,9 @@ Create a GitHub issue. if entries[0].ForgeReqs == nil { t.Fatal("expected non-nil ForgeReqs") } - if !reflect.DeepEqual(entries[0].ForgeReqs.Bins, []string{"gh"}) { - t.Errorf("Bins = %v, want [gh]", entries[0].ForgeReqs.Bins) + wantGhBins := []contract.BinRequirement{{Name: "gh"}} + if !reflect.DeepEqual(entries[0].ForgeReqs.Bins, wantGhBins) { + t.Errorf("Bins = %v, want %v", entries[0].ForgeReqs.Bins, wantGhBins) } } @@ -661,8 +666,54 @@ Estimate K8s costs. if entries[0].ForgeReqs == nil { t.Fatal("expected non-nil ForgeReqs") } - if !reflect.DeepEqual(entries[0].ForgeReqs.Bins, []string{"kubectl"}) { - t.Errorf("Bins = %v, want [kubectl]", entries[0].ForgeReqs.Bins) + wantKubectlBins := []contract.BinRequirement{{Name: "kubectl"}} + if !reflect.DeepEqual(entries[0].ForgeReqs.Bins, wantKubectlBins) { + t.Errorf("Bins = %v, want %v", entries[0].ForgeReqs.Bins, wantKubectlBins) + } +} + +func TestParseWithMetadata_RichBinRequirements(t *testing.T) { + input := `--- +name: infra +description: Infrastructure skill +metadata: + forge: + requires: + bins: + - jq + - name: playwright + version: "1.50.0" + optional: true + - name: kubectl + apt: kubectl +--- +## Tool: infra_tool +Manage infrastructure. +` + entries, _, err := ParseWithMetadata(strings.NewReader(input)) + if err != nil { + t.Fatalf("ParseWithMetadata error: %v", err) + } + if len(entries) != 1 { + t.Fatalf("expected 1 entry, got %d", len(entries)) + } + if entries[0].ForgeReqs == nil { + t.Fatal("expected non-nil ForgeReqs") + } + bins := entries[0].ForgeReqs.Bins + if len(bins) != 3 { + t.Fatalf("expected 3 bins, got %d: %+v", len(bins), bins) + } + // scalar form + if bins[0].Name != "jq" || bins[0].Version != "" { + t.Errorf("bins[0] = %+v, want {Name: jq}", bins[0]) + } + // rich mapping form + if bins[1].Name != "playwright" || bins[1].Version != "1.50.0" || !bins[1].Optional { + t.Errorf("bins[1] = %+v, want {Name: playwright, Version: 1.50.0, Optional: true}", bins[1]) + } + if bins[2].Name != "kubectl" || bins[2].AptPackage != "kubectl" { + t.Errorf("bins[2] = %+v, want {Name: kubectl, Apt: kubectl}", bins[2]) } } diff --git a/forge-skills/registry/image-registry.yaml b/forge-skills/registry/image-registry.yaml new file mode 100644 index 0000000..cfb08d9 --- /dev/null +++ b/forge-skills/registry/image-registry.yaml @@ -0,0 +1,267 @@ +# Image Registry — maps binary names to install methods for container packaging. +# +# Fields: +# apt: Debian/Ubuntu package name (default: same as bin name) +# apk: Alpine package name +# url: Direct download URL (supports {{.Version}} template) +# default_version: Version used when skill doesn't specify one +# dest: Install destination (default: /usr/local/bin/) +# chmod: Permission bits (default: "0755") +# heavy: If true, pulled from a companion Docker image instead of apt/url +# image: Companion image for heavy binaries (used with heavy: true) +# requires_ubuntu: If true, forces Ubuntu/Debian base image (incompatible with Alpine) +# requires_first: List of bins that must be installed before this one +# run: Custom RUN lines (replaces apt/url logic entirely) + +bins: + # ── Core CLI tools ─────────────────────────────────────────────── + jq: + apt: jq + apk: jq + yq: + url: "https://github.com/mikefarah/yq/releases/download/v{{.Version}}/yq_linux_amd64" + default_version: "4.44.1" + curl: + apt: curl + apk: curl + wget: + apt: wget + apk: wget + git: + apt: git + apk: git + unzip: + apt: unzip + apk: unzip + zip: + apt: zip + apk: zip + tar: + apt: tar + apk: tar + gzip: + apt: gzip + apk: gzip + make: + apt: make + apk: make + gcc: + apt: gcc + apk: gcc + g++: + apt: g++ + apk: g++ + openssh-client: + apt: openssh-client + apk: openssh-client + rsync: + apt: rsync + apk: rsync + tree: + apt: tree + apk: tree + file: + apt: file + apk: file + bc: + apt: bc + apk: bc + envsubst: + apt: gettext-base + apk: gettext + xargs: + apt: findutils + apk: findutils + sed: + apt: sed + apk: sed + awk: + apt: gawk + apk: gawk + grep: + apt: grep + apk: grep + diffutils: + apt: diffutils + apk: diffutils + + # ── Cloud CLIs ─────────────────────────────────────────────────── + kubectl: + url: "https://dl.k8s.io/release/v{{.Version}}/bin/linux/amd64/kubectl" + default_version: "1.31.0" + helm: + url: "https://get.helm.sh/helm-v{{.Version}}-linux-amd64.tar.gz" + default_version: "3.16.0" + run: + - "curl -fsSL https://get.helm.sh/helm-v{{.Version}}-linux-amd64.tar.gz | tar xz -C /tmp" + - "mv /tmp/linux-amd64/helm /usr/local/bin/helm" + - "chmod 0755 /usr/local/bin/helm" + gh: + url: "https://github.com/cli/cli/releases/download/v{{.Version}}/gh_{{.Version}}_linux_amd64.tar.gz" + default_version: "2.60.0" + run: + - "curl -fsSL https://github.com/cli/cli/releases/download/v{{.Version}}/gh_{{.Version}}_linux_amd64.tar.gz | tar xz -C /tmp" + - "mv /tmp/gh_{{.Version}}_linux_amd64/bin/gh /usr/local/bin/gh" + - "chmod 0755 /usr/local/bin/gh" + aws: + run: + - "curl -fsSL https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip -o /tmp/awscliv2.zip" + - "unzip -q /tmp/awscliv2.zip -d /tmp" + - "/tmp/aws/install" + - "rm -rf /tmp/awscliv2.zip /tmp/aws" + requires_first: [unzip] + requires_ubuntu: true + gcloud: + run: + - "curl -fsSL https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-cli-linux-x86_64.tar.gz | tar xz -C /opt" + - "/opt/google-cloud-sdk/install.sh --quiet --path-update true" + requires_ubuntu: true + az: + run: + - "curl -sL https://aka.ms/InstallAzureCLIDeb | bash" + requires_ubuntu: true + terraform: + url: "https://releases.hashicorp.com/terraform/{{.Version}}/terraform_{{.Version}}_linux_amd64.zip" + default_version: "1.9.0" + run: + - "curl -fsSL https://releases.hashicorp.com/terraform/{{.Version}}/terraform_{{.Version}}_linux_amd64.zip -o /tmp/terraform.zip" + - "unzip -q /tmp/terraform.zip -d /usr/local/bin" + - "rm /tmp/terraform.zip" + requires_first: [unzip] + pulumi: + url: "https://get.pulumi.com/releases/sdk/pulumi-v{{.Version}}-linux-x64.tar.gz" + default_version: "3.130.0" + run: + - "curl -fsSL https://get.pulumi.com/releases/sdk/pulumi-v{{.Version}}-linux-x64.tar.gz | tar xz -C /usr/local/bin --strip-components=1" + + # ── Databases ──────────────────────────────────────────────────── + psql: + apt: postgresql-client + apk: postgresql-client + mysql: + apt: default-mysql-client + apk: mysql-client + redis-cli: + apt: redis-tools + apk: redis + mongosh: + run: + - "curl -fsSL https://downloads.mongodb.com/compass/mongosh-{{.Version}}-linux-x64.tgz | tar xz -C /tmp" + - "mv /tmp/mongosh-*/bin/mongosh /usr/local/bin/" + - "chmod 0755 /usr/local/bin/mongosh" + default_version: "2.3.0" + sqlite3: + apt: sqlite3 + apk: sqlite + + # ── Languages & runtimes ───────────────────────────────────────── + python3: + apt: python3 + apk: python3 + pip: + apt: python3-pip + apk: py3-pip + node: + url: "https://nodejs.org/dist/v{{.Version}}/node-v{{.Version}}-linux-x64.tar.xz" + default_version: "22.9.0" + run: + - "curl -fsSL https://nodejs.org/dist/v{{.Version}}/node-v{{.Version}}-linux-x64.tar.xz | tar xJ -C /usr/local --strip-components=1" + npm: + requires_first: [node] + npx: + requires_first: [node] + ruby: + apt: ruby-full + apk: ruby + go: + url: "https://go.dev/dl/go{{.Version}}.linux-amd64.tar.gz" + default_version: "1.23.0" + run: + - "curl -fsSL https://go.dev/dl/go{{.Version}}.linux-amd64.tar.gz | tar xz -C /usr/local" + dest: /usr/local/go/bin/go + rustup: + run: + - "curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y" + java: + apt: default-jdk-headless + apk: openjdk17-jre-headless + deno: + url: "https://github.com/denoland/deno/releases/download/v{{.Version}}/deno-x86_64-unknown-linux-gnu.zip" + default_version: "1.46.0" + run: + - "curl -fsSL https://github.com/denoland/deno/releases/download/v{{.Version}}/deno-x86_64-unknown-linux-gnu.zip -o /tmp/deno.zip" + - "unzip -q /tmp/deno.zip -d /usr/local/bin" + - "rm /tmp/deno.zip" + requires_first: [unzip] + bun: + url: "https://github.com/oven-sh/bun/releases/download/bun-v{{.Version}}/bun-linux-x64.zip" + default_version: "1.1.30" + run: + - "curl -fsSL https://github.com/oven-sh/bun/releases/download/bun-v{{.Version}}/bun-linux-x64.zip -o /tmp/bun.zip" + - "unzip -q /tmp/bun.zip -d /tmp" + - "mv /tmp/bun-linux-x64/bun /usr/local/bin/bun" + - "chmod 0755 /usr/local/bin/bun" + - "rm -rf /tmp/bun.zip /tmp/bun-linux-x64" + requires_first: [unzip] + + # ── Networking & HTTP ──────────────────────────────────────────── + httpie: + apt: httpie + apk: httpie + nmap: + apt: nmap + apk: nmap + dig: + apt: dnsutils + apk: bind-tools + nslookup: + apt: dnsutils + apk: bind-tools + ping: + apt: iputils-ping + apk: iputils + traceroute: + apt: traceroute + apk: traceroute + netcat: + apt: netcat-openbsd + apk: netcat-openbsd + socat: + apt: socat + apk: socat + openssl: + apt: openssl + apk: openssl + ssh: + apt: openssh-client + apk: openssh-client + + # ── Heavy / companion-image binaries ───────────────────────────── + playwright: + heavy: true + image: "mcr.microsoft.com/playwright:v{{.Version}}-jammy" + default_version: "1.48.0" + requires_ubuntu: true + chromium: + apt: chromium-browser + apk: chromium + ffmpeg: + apt: ffmpeg + apk: ffmpeg + imagemagick: + apt: imagemagick + apk: imagemagick + pandoc: + apt: pandoc + apk: pandoc + wkhtmltopdf: + apt: wkhtmltopdf + graphviz: + apt: graphviz + apk: graphviz + poppler-utils: + apt: poppler-utils + apk: poppler-utils + tesseract: + apt: tesseract-ocr + apk: tesseract-ocr diff --git a/forge-skills/registry/registry.go b/forge-skills/registry/registry.go new file mode 100644 index 0000000..667fcdc --- /dev/null +++ b/forge-skills/registry/registry.go @@ -0,0 +1,106 @@ +// Package registry provides an embedded image registry mapping binary names to install methods. +package registry + +import ( + _ "embed" + "fmt" + "strings" + "sync" + "text/template" + + "gopkg.in/yaml.v3" +) + +//go:embed image-registry.yaml +var registryData []byte + +// RegistryEntry describes how to install a single binary in a container image. +type RegistryEntry struct { + Name string `yaml:"-"` // set from map key + Apt string `yaml:"apt,omitempty"` // Debian/Ubuntu package + Apk string `yaml:"apk,omitempty"` // Alpine package + URL string `yaml:"url,omitempty"` // Direct download URL template + DefaultVersion string `yaml:"default_version,omitempty"` // Fallback version + Dest string `yaml:"dest,omitempty"` // Install path (default: /usr/local/bin/) + Chmod string `yaml:"chmod,omitempty"` // Permission bits (default: "0755") + Heavy bool `yaml:"heavy,omitempty"` // Use companion Docker image + Image string `yaml:"image,omitempty"` // Companion image template + RequiresUbuntu bool `yaml:"requires_ubuntu,omitempty"` // Incompatible with Alpine + RequiresFirst []string `yaml:"requires_first,omitempty"` // Dependencies + Run []string `yaml:"run,omitempty"` // Custom RUN lines +} + +// ImageRegistry holds the full set of known binaries and their install methods. +type ImageRegistry struct { + entries map[string]RegistryEntry +} + +var ( + defaultRegistry *ImageRegistry + defaultRegistryOnce sync.Once + defaultRegistryErr error +) + +// Default returns the singleton ImageRegistry loaded from the embedded YAML. +func Default() (*ImageRegistry, error) { + defaultRegistryOnce.Do(func() { + defaultRegistry, defaultRegistryErr = Load(registryData) + }) + return defaultRegistry, defaultRegistryErr +} + +// Load parses registry YAML data into an ImageRegistry. +func Load(data []byte) (*ImageRegistry, error) { + var raw struct { + Bins map[string]RegistryEntry `yaml:"bins"` + } + if err := yaml.Unmarshal(data, &raw); err != nil { + return nil, fmt.Errorf("parsing image registry: %w", err) + } + + // Set Name from map key + for k, v := range raw.Bins { + v.Name = k + raw.Bins[k] = v + } + + return &ImageRegistry{entries: raw.Bins}, nil +} + +// Lookup returns the registry entry for a binary, if known. +func (r *ImageRegistry) Lookup(binName string) (RegistryEntry, bool) { + e, ok := r.entries[binName] + return e, ok +} + +// All returns all registry entries. +func (r *ImageRegistry) All() map[string]RegistryEntry { + return r.entries +} + +// ExpandTemplate renders a Go template string with Version substitution. +func ExpandTemplate(tmplStr, version string) (string, error) { + if !strings.Contains(tmplStr, "{{") { + return tmplStr, nil + } + + t, err := template.New("").Parse(tmplStr) + if err != nil { + return "", fmt.Errorf("parsing template %q: %w", tmplStr, err) + } + + var buf strings.Builder + data := struct{ Version string }{Version: version} + if err := t.Execute(&buf, data); err != nil { + return "", fmt.Errorf("executing template %q: %w", tmplStr, err) + } + return buf.String(), nil +} + +// ResolveVersion returns the version to use: explicit > default. +func (e RegistryEntry) ResolveVersion(explicit string) string { + if explicit != "" { + return explicit + } + return e.DefaultVersion +} diff --git a/forge-skills/registry/registry_test.go b/forge-skills/registry/registry_test.go new file mode 100644 index 0000000..2e49982 --- /dev/null +++ b/forge-skills/registry/registry_test.go @@ -0,0 +1,132 @@ +package registry + +import ( + "testing" +) + +func TestDefault_Loads(t *testing.T) { + reg, err := Default() + if err != nil { + t.Fatalf("Default() error: %v", err) + } + if reg == nil { + t.Fatal("Default() returned nil") + } + if len(reg.entries) == 0 { + t.Fatal("registry has no entries") + } +} + +func TestLookup_Known(t *testing.T) { + reg, err := Default() + if err != nil { + t.Fatalf("Default() error: %v", err) + } + + tests := []struct { + name string + wantApt string + }{ + {"jq", "jq"}, + {"curl", "curl"}, + {"psql", "postgresql-client"}, + } + + for _, tt := range tests { + e, ok := reg.Lookup(tt.name) + if !ok { + t.Errorf("Lookup(%q) not found", tt.name) + continue + } + if e.Apt != tt.wantApt { + t.Errorf("Lookup(%q).Apt = %q, want %q", tt.name, e.Apt, tt.wantApt) + } + } +} + +func TestLookup_Unknown(t *testing.T) { + reg, err := Default() + if err != nil { + t.Fatalf("Default() error: %v", err) + } + _, ok := reg.Lookup("nonexistent-binary-xyz") + if ok { + t.Error("expected Lookup for unknown binary to return false") + } +} + +func TestLookup_Heavy(t *testing.T) { + reg, err := Default() + if err != nil { + t.Fatalf("Default() error: %v", err) + } + e, ok := reg.Lookup("playwright") + if !ok { + t.Fatal("playwright not found in registry") + } + if !e.Heavy { + t.Error("playwright should be marked as heavy") + } + if !e.RequiresUbuntu { + t.Error("playwright should require ubuntu") + } + if e.Image == "" { + t.Error("playwright should have an image template") + } +} + +func TestExpandTemplate(t *testing.T) { + tests := []struct { + tmpl string + version string + want string + }{ + {"https://example.com/v{{.Version}}/bin", "1.2.3", "https://example.com/v1.2.3/bin"}, + {"no-template-here", "1.0", "no-template-here"}, + {"{{.Version}}-{{.Version}}", "2.0", "2.0-2.0"}, + } + + for _, tt := range tests { + got, err := ExpandTemplate(tt.tmpl, tt.version) + if err != nil { + t.Errorf("ExpandTemplate(%q, %q) error: %v", tt.tmpl, tt.version, err) + continue + } + if got != tt.want { + t.Errorf("ExpandTemplate(%q, %q) = %q, want %q", tt.tmpl, tt.version, got, tt.want) + } + } +} + +func TestResolveVersion(t *testing.T) { + e := RegistryEntry{DefaultVersion: "1.0.0"} + if v := e.ResolveVersion("2.0.0"); v != "2.0.0" { + t.Errorf("explicit version: got %q, want 2.0.0", v) + } + if v := e.ResolveVersion(""); v != "1.0.0" { + t.Errorf("default version: got %q, want 1.0.0", v) + } +} + +func TestLookup_RequiresFirst(t *testing.T) { + reg, err := Default() + if err != nil { + t.Fatalf("Default() error: %v", err) + } + e, ok := reg.Lookup("aws") + if !ok { + t.Fatal("aws not found") + } + if len(e.RequiresFirst) == 0 { + t.Error("aws should have requires_first dependencies") + } + found := false + for _, dep := range e.RequiresFirst { + if dep == "unzip" { + found = true + } + } + if !found { + t.Errorf("aws requires_first should include unzip, got %v", e.RequiresFirst) + } +} diff --git a/forge-skills/requirements/requirements.go b/forge-skills/requirements/requirements.go index 1389a91..55a313b 100644 --- a/forge-skills/requirements/requirements.go +++ b/forge-skills/requirements/requirements.go @@ -32,6 +32,8 @@ func AggregateRequirements(entries []contract.SkillEntry) *contract.AggregatedRe promptPatternSeen := make(map[string]bool) responsePatternSeen := make(map[string]bool) + binReqMap := make(map[string]contract.BinRequirement) + for _, e := range entries { // Collect forge-level metadata (denied_tools, egress_domains, guardrails) if e.Metadata != nil && e.Metadata.Metadata != nil { @@ -99,7 +101,11 @@ func AggregateRequirements(entries []contract.SkillEntry) *contract.AggregatedRe continue } for _, b := range e.ForgeReqs.Bins { - binSet[b] = true + binSet[b.Name] = true + // Keep richer entry: one with more fields set wins + if existing, ok := binReqMap[b.Name]; !ok || isRicher(b, existing) { + binReqMap[b.Name] = b + } } if e.ForgeReqs.Env != nil { for _, v := range e.ForgeReqs.Env.Required { @@ -121,12 +127,24 @@ func AggregateRequirements(entries []contract.SkillEntry) *contract.AggregatedRe } } + // Build sorted BinRequirements from the map + sortedBins := sortedKeys(binSet) + binReqs := make([]contract.BinRequirement, 0, len(sortedBins)) + for _, name := range sortedBins { + if br, ok := binReqMap[name]; ok { + binReqs = append(binReqs, br) + } else { + binReqs = append(binReqs, contract.BinRequirement{Name: name}) + } + } + agg := &contract.AggregatedRequirements{ - Bins: sortedKeys(binSet), - EnvOneOf: oneOfGroups, - DeniedTools: sortedKeys(deniedSet), - EgressDomains: sortedKeys(egressSet), - WorkflowPhases: sortedKeys(phaseSet), + Bins: sortedBins, + BinRequirements: binReqs, + EnvOneOf: oneOfGroups, + DeniedTools: sortedKeys(deniedSet), + EgressDomains: sortedKeys(egressSet), + WorkflowPhases: sortedKeys(phaseSet), } agg.EnvRequired = sortedKeys(reqSet) agg.EnvOptional = sortedKeys(optSet) @@ -154,6 +172,30 @@ func AggregateDescriptorRequirements(descs []contract.SkillDescriptor) int { return maxTimeout } +// isRicher returns true if a has more install metadata than b. +func isRicher(a, b contract.BinRequirement) bool { + countFields := func(br contract.BinRequirement) int { + n := 0 + if br.Version != "" { + n++ + } + if br.AptPackage != "" { + n++ + } + if br.ApkPackage != "" { + n++ + } + if br.DirectURL != "" { + n++ + } + if len(br.CustomLines) > 0 { + n++ + } + return n + } + return countFields(a) > countFields(b) +} + func sortedKeys(m map[string]bool) []string { if len(m) == 0 { return nil diff --git a/forge-skills/requirements/requirements_test.go b/forge-skills/requirements/requirements_test.go index 00db3b4..71d12bd 100644 --- a/forge-skills/requirements/requirements_test.go +++ b/forge-skills/requirements/requirements_test.go @@ -11,7 +11,7 @@ func TestAggregate_SingleSkill(t *testing.T) { { Name: "github", ForgeReqs: &contract.SkillRequirements{ - Bins: []string{"curl", "jq"}, + Bins: []contract.BinRequirement{{Name: "curl"}, {Name: "jq"}}, Env: &contract.EnvRequirements{ Required: []string{"API_KEY"}, Optional: []string{"TIMEOUT"}, @@ -39,11 +39,11 @@ func TestAggregate_MultiSkill_BinsUnion(t *testing.T) { entries := []contract.SkillEntry{ { Name: "a", - ForgeReqs: &contract.SkillRequirements{Bins: []string{"curl", "jq"}}, + ForgeReqs: &contract.SkillRequirements{Bins: []contract.BinRequirement{{Name: "curl"}, {Name: "jq"}}}, }, { Name: "b", - ForgeReqs: &contract.SkillRequirements{Bins: []string{"jq", "python"}}, + ForgeReqs: &contract.SkillRequirements{Bins: []contract.BinRequirement{{Name: "jq"}, {Name: "python"}}}, }, } @@ -132,7 +132,7 @@ func TestAggregate_DeniedToolsCollected(t *testing.T) { }, }, ForgeReqs: &contract.SkillRequirements{ - Bins: []string{"kubectl"}, + Bins: []contract.BinRequirement{{Name: "kubectl"}}, }, }, {