diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index a7a8c4b..5ac5ead 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -39,7 +39,83 @@ jobs: - run: cargo test --workspace --all-features - run: cargo test -p amber-compiler --all-features docker_smoke_ocap_blocks_unbound_callers -- --ignored --test-threads=1 - run: cargo test -p amber-compiler --all-features docker_smoke_config_forwarding_runtime_validation -- --ignored --test-threads=1 - - run: cargo test -p amber-compose-helper --all-features helper_image_executes_run_plan_in_scratch -- --ignored --test-threads=1 + - run: cargo test -p amber-helper --all-features helper_image_executes_run_plan_in_scratch -- --ignored --test-threads=1 + + kubernetes-tests: + name: Kubernetes Reporter Tests + runs-on: ubuntu-latest + continue-on-error: true + env: + SCCACHE_GHA_ENABLED: "true" + RUSTC_WRAPPER: "sccache" + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@stable + - uses: mozilla-actions/sccache-action@v0.0.9 + - name: Test Kubernetes reporter with KinD + run: | + # Compile the test scenario to Kubernetes manifests + cargo run -p amber-cli -- compile \ + test-scenarios/kubernetes-basic/scenario.json5 \ + --kubernetes target/amber-out/kubernetes + + # Fill in the generated config template files + patch target/amber-out/kubernetes/root-config-secret.env <<'EOF' + --- a/root-config-secret.env + +++ b/root-config-secret.env + @@ -1,2 +1,2 @@ + # Root config secrets - fill in values before deploying + -AMBER_CONFIG_SERVER_RUNTIME_SECRET= + +AMBER_CONFIG_SERVER_RUNTIME_SECRET=test-secret-value + EOF + + patch target/amber-out/kubernetes/root-config.env <<'EOF' + --- a/root-config.env + +++ b/root-config.env + @@ -1,2 +1,2 @@ + # Root config values - fill in values before deploying + -AMBER_CONFIG_SERVER_RUNTIME_CONFIG= + +AMBER_CONFIG_SERVER_RUNTIME_CONFIG=test-config-value + EOF + + # Build the amber-helper image locally + docker build -t ghcr.io/rdi-foundation/amber-helper:v1 -f docker/amber-helper/Dockerfile . + + # Create KinD cluster + kind create cluster --name amber-test --wait 120s + + # Load the amber-helper image into KinD + kind load docker-image ghcr.io/rdi-foundation/amber-helper:v1 --name amber-test + + # Apply the manifests using kustomize + kubectl apply -k target/amber-out/kubernetes + + # Get the namespace that was created + NAMESPACE=$(kubectl get namespaces -l app.kubernetes.io/managed-by=amber -o jsonpath='{.items[0].metadata.name}') + + # Wait for deployments to be ready (includes NetworkPolicy enforcement check in init containers) + kubectl wait --for=condition=available --timeout=120s \ + deployment --all -n "${NAMESPACE}" + + # Verify the deployed scenario by fetching files from the client component + kubectl port-forward -n "${NAMESPACE}" service/c1-client 8080:8080 & + PF_PID=$! + sleep 5 + + # Fetch the config files + RUNTIME_SECRET=$(curl -s http://localhost:8080/runtime_secret.txt) + RUNTIME_CONFIG=$(curl -s http://localhost:8080/runtime_config.txt) + STATIC_SECRET=$(curl -s http://localhost:8080/static_secret.txt) + STATIC_CONFIG=$(curl -s http://localhost:8080/static_config.txt) + + kill $PF_PID + wait + + # Verify values match expectations + test "$RUNTIME_SECRET" = "test-secret-value" + test "$RUNTIME_CONFIG" = "test-config-value" + test "$STATIC_SECRET" = "hardcode-this-secret" + test "$STATIC_CONFIG" = "hardcode-this-config" docker-build: name: Build Docker Images (${{ matrix.image }} ${{ matrix.arch }}) @@ -78,13 +154,13 @@ jobs: arch: arm64 - image: amber-helper context: . - file: docker/amber-compose-helper/Dockerfile + file: docker/amber-helper/Dockerfile cache_scope: amber-helper-pr-${{ github.event.pull_request.number }} runner: ubuntu-latest arch: amd64 - image: amber-helper context: . - file: docker/amber-compose-helper/Dockerfile + file: docker/amber-helper/Dockerfile cache_scope: amber-helper-pr-${{ github.event.pull_request.number }} runner: ubuntu-24.04-arm arch: arm64 diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml index 83ef984..3e213df 100644 --- a/.github/workflows/publish.yaml +++ b/.github/workflows/publish.yaml @@ -40,12 +40,12 @@ jobs: arch: arm64 - image: amber-helper context: . - file: docker/amber-compose-helper/Dockerfile + file: docker/amber-helper/Dockerfile runner: ubuntu-latest arch: amd64 - image: amber-helper context: . - file: docker/amber-compose-helper/Dockerfile + file: docker/amber-helper/Dockerfile runner: ubuntu-24.04-arm arch: arm64 steps: diff --git a/Cargo.lock b/Cargo.lock index 02bcd59..eca6b29 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -82,6 +82,7 @@ dependencies = [ "miette", "serde", "serde_json", + "serde_yaml", "tempfile", "thiserror 2.0.17", "tokio", @@ -89,26 +90,26 @@ dependencies = [ ] [[package]] -name = "amber-compose-helper" +name = "amber-config" version = "0.1.0" dependencies = [ - "amber-config", "amber-template", - "base64", "jsonschema", - "serde", "serde_json", - "tempfile", - "thiserror 2.0.17", ] [[package]] -name = "amber-config" +name = "amber-helper" version = "0.1.0" dependencies = [ + "amber-config", "amber-template", + "base64", "jsonschema", + "serde", "serde_json", + "tempfile", + "thiserror 2.0.17", ] [[package]] @@ -2011,6 +2012,12 @@ version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" +[[package]] +name = "ryu" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" + [[package]] name = "same-file" version = "1.0.6" @@ -2177,6 +2184,19 @@ dependencies = [ "syn", ] +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap 2.12.1", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + [[package]] name = "sha2" version = "0.10.9" @@ -2682,6 +2702,12 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + [[package]] name = "untrusted" version = "0.9.0" diff --git a/Cargo.toml b/Cargo.toml index f929348..8822bc1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ members = [ "cli", "config", "compiler", - "compose-helper", + "helper", "json5", "manifest", "node", @@ -27,6 +27,7 @@ jsonschema = { version = "0.38.1", default-features = false } miette = "7.4.0" serde = "1.0.217" serde_json = "1.0.148" +serde_yaml = "0.9.34" tempfile = "3.24.0" thiserror = "2.0.9" tokio = "1.48.0" diff --git a/Dockerfile b/Dockerfile index 7f68663..c6c3f35 100644 --- a/Dockerfile +++ b/Dockerfile @@ -28,7 +28,7 @@ COPY Cargo.toml Cargo.lock ./ COPY cli/Cargo.toml cli/ COPY config/Cargo.toml config/ COPY compiler/Cargo.toml compiler/ -COPY compose-helper/Cargo.toml compose-helper/ +COPY helper/Cargo.toml helper/ COPY json5/Cargo.toml json5/ COPY manifest/Cargo.toml manifest/ COPY resolver/Cargo.toml resolver/ @@ -36,15 +36,15 @@ COPY scenario/Cargo.toml scenario/ COPY template/Cargo.toml template/ COPY node/Cargo.toml node/ -RUN mkdir -p cli/src config/src compiler/src compose-helper/src json5/src manifest/src resolver/src scenario/src template/src node/src && \ - touch cli/src/main.rs config/src/lib.rs compiler/src/lib.rs compose-helper/src/main.rs json5/src/lib.rs manifest/src/lib.rs resolver/src/lib.rs scenario/src/lib.rs template/src/lib.rs node/src/main.rs +RUN mkdir -p cli/src config/src compiler/src helper/src json5/src manifest/src resolver/src scenario/src template/src node/src && \ + touch cli/src/main.rs config/src/lib.rs compiler/src/lib.rs helper/src/main.rs json5/src/lib.rs manifest/src/lib.rs resolver/src/lib.rs scenario/src/lib.rs template/src/lib.rs node/src/main.rs RUN cargo fetch --locked -RUN rm -rf cli/src config/src compiler/src compose-helper/src json5/src manifest/src resolver/src scenario/src template/src node/src +RUN rm -rf cli/src config/src compiler/src helper/src json5/src manifest/src resolver/src scenario/src template/src node/src COPY cli ./cli COPY config ./config COPY compiler ./compiler -COPY compose-helper ./compose-helper +COPY helper ./helper COPY json5 ./json5 COPY manifest ./manifest COPY resolver ./resolver diff --git a/cli/README.md b/cli/README.md index 4ff2d70..d6570ce 100644 --- a/cli/README.md +++ b/cli/README.md @@ -5,7 +5,7 @@ Command-line front-end for the compiler. It resolves a root manifest, runs compi ## Responsibilities - Wire `amber-compiler` and `amber-resolver` for compile/check flows. - Render diagnostics via `miette`, including treating selected warnings as errors. -- Write compile outputs only when requested; `amber compile` requires at least one output flag (`--output`, `--dot`, `--docker-compose`/`--compose`, or `--bundle`). +- Write compile outputs only when requested; `amber compile` requires at least one output flag (`--output`, `--dot`, `--docker-compose`/`--compose`, `--kubernetes`, or `--bundle`). - Detect bundle inputs and emit bundle directories via `--bundle`. - Surface the manifest README via `amber docs manifest`. diff --git a/cli/src/main.rs b/cli/src/main.rs index c61ffaa..074fa44 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -8,7 +8,10 @@ use amber_compiler::{ CompileOptions, CompileOutput, Compiler, ResolverRegistry, bundle::{BundleBuilder, BundleLoader}, reporter::{ - Reporter as _, docker_compose::DockerComposeReporter, dot::DotReporter, + Reporter as _, + docker_compose::DockerComposeReporter, + dot::DotReporter, + kubernetes::{KubernetesReporter, KubernetesReporterConfig}, scenario_ir::ScenarioIrReporter, }, }; @@ -69,6 +72,14 @@ struct CompileArgs { #[arg(long = "bundle", value_name = "DIR")] bundle: Option, + /// Write Kubernetes manifests to this directory. + #[arg(long = "kubernetes", visible_alias = "k8s", value_name = "DIR")] + kubernetes: Option, + + /// Disable generation of NetworkPolicy enforcement check resources. + #[arg(long = "disable-networkpolicy-check", requires = "kubernetes")] + disable_networkpolicy_check: bool, + /// Root manifest or bundle to compile (URL or local path). #[arg(value_name = "MANIFEST")] manifest: String, @@ -179,6 +190,16 @@ async fn compile(args: CompileArgs) -> Result<()> { } } + if let Some(kubernetes_dest) = outputs.kubernetes { + let reporter = KubernetesReporter { + config: KubernetesReporterConfig { + disable_networkpolicy_check: args.disable_networkpolicy_check, + }, + }; + let artifact = reporter.emit(&output).map_err(miette::Report::new)?; + write_kubernetes_output(&kubernetes_dest, &artifact)?; + } + if let Some(bundle_root) = resolve_bundle_root(&args)? { let tree = bundle_tree.expect("bundle requested"); prepare_bundle_dir(&bundle_root)?; @@ -448,6 +469,7 @@ struct OutputPaths { primary: Option, dot: Option, docker_compose: Option, + kubernetes: Option, } fn ensure_outputs_requested(args: &CompileArgs) -> Result<()> { @@ -455,13 +477,14 @@ fn ensure_outputs_requested(args: &CompileArgs) -> Result<()> { || args.dot.is_some() || args.docker_compose.is_some() || args.bundle.is_some() + || args.kubernetes.is_some() { return Ok(()); } Err(miette::miette!( - help = "Request at least one output with `--output`, `--dot`, `--docker-compose`, or \ - `--bundle`.", + help = "Request at least one output with `--output`, `--dot`, `--docker-compose`, \ + `--kubernetes`, or `--bundle`.", "no outputs requested for `amber compile`" )) } @@ -470,6 +493,7 @@ fn resolve_output_paths(args: &CompileArgs) -> Result { let primary = args.output.clone(); let dot = resolve_optional_output(&args.dot); let docker_compose = resolve_optional_output(&args.docker_compose); + let kubernetes = args.kubernetes.clone(); if let (Some(primary_path), Some(ArtifactOutput::File(dot_path))) = (primary.as_ref(), dot.as_ref()) @@ -505,6 +529,7 @@ fn resolve_output_paths(args: &CompileArgs) -> Result { primary, dot, docker_compose, + kubernetes, }) } @@ -524,18 +549,10 @@ fn resolve_bundle_root(args: &CompileArgs) -> Result> { fn prepare_bundle_dir(path: &Path) -> Result<()> { if path.exists() { - if path.is_dir() { - std::fs::remove_dir_all(path) - .into_diagnostic() - .wrap_err_with(|| { - format!("failed to remove bundle directory `{}`", path.display()) - })?; - } else { - return Err(miette::miette!( - "bundle output path `{}` is not a directory", - path.display() - )); - } + return Err(miette::miette!( + "bundle output directory `{}` already exists; please delete it first", + path.display() + )); } std::fs::create_dir_all(path) @@ -565,3 +582,51 @@ fn write_artifact(path: &Path, contents: &[u8]) -> Result<()> { .into_diagnostic() .wrap_err_with(|| format!("failed to write `{}`", path.display())) } + +fn write_kubernetes_output( + root: &Path, + artifact: &amber_compiler::reporter::kubernetes::KubernetesArtifact, +) -> Result<()> { + // Clean and recreate the output directory. + if root.exists() { + if root.is_dir() { + std::fs::remove_dir_all(root) + .into_diagnostic() + .wrap_err_with(|| { + format!( + "failed to remove kubernetes output directory `{}`", + root.display() + ) + })?; + } else { + return Err(miette::miette!( + "kubernetes output path `{}` is not a directory", + root.display() + )); + } + } + + std::fs::create_dir_all(root) + .into_diagnostic() + .wrap_err_with(|| { + format!( + "failed to create kubernetes output directory `{}`", + root.display() + ) + })?; + + // Write each file. + for (rel_path, content) in &artifact.files { + let full_path = root.join(rel_path); + if let Some(parent) = full_path.parent() { + std::fs::create_dir_all(parent) + .into_diagnostic() + .wrap_err_with(|| format!("failed to create directory `{}`", parent.display()))?; + } + std::fs::write(&full_path, content) + .into_diagnostic() + .wrap_err_with(|| format!("failed to write `{}`", full_path.display()))?; + } + + Ok(()) +} diff --git a/cli/tests/ui/compile_no_output.stderr b/cli/tests/ui/compile_no_output.stderr index 7f50706..3fce3a4 100644 --- a/cli/tests/ui/compile_no_output.stderr +++ b/cli/tests/ui/compile_no_output.stderr @@ -1,3 +1,3 @@ Error: × no outputs requested for `amber compile` help: Request at least one output with `--output`, `--dot`, `--docker- - compose`, or `--bundle`. + compose`, `--kubernetes`, or `--bundle`. diff --git a/compiler/Cargo.toml b/compiler/Cargo.toml index 50846b4..22020c0 100644 --- a/compiler/Cargo.toml +++ b/compiler/Cargo.toml @@ -16,6 +16,7 @@ jsonschema = { workspace = true } miette = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } +serde_yaml = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["fs", "rt", "sync"] } url = { workspace = true, features = ["serde"] } diff --git a/compiler/README.md b/compiler/README.md index 698c526..db0f0e2 100644 --- a/compiler/README.md +++ b/compiler/README.md @@ -20,5 +20,5 @@ Compiles a root component manifest into a linked `Scenario` plus provenance and - `frontend`: async resolver with caching, cycle detection, and environment handling. - `linker`: schema validation, binding resolution, and export verification. - `passes`: graph rewrites that must preserve scenario invariants. -- `reporter`: transforms `CompileOutput` into artifacts (e.g., scenario IR JSON, DOT, Docker Compose YAML). +- `reporter`: transforms `CompileOutput` into artifacts (e.g., scenario IR JSON, DOT, Docker Compose YAML, Kubernetes YAML). - `bundle`: bundle index parsing, manifest packing, and bundle-only resolver wiring. diff --git a/compiler/src/reporter/docker_compose/mod.rs b/compiler/src/reporter/docker_compose/mod.rs index af98361..e625b29 100644 --- a/compiler/src/reporter/docker_compose/mod.rs +++ b/compiler/src/reporter/docker_compose/mod.rs @@ -24,7 +24,7 @@ use crate::{ const MESH_NETWORK_NAME: &str = "amber_mesh"; const SIDECAR_IMAGE: &str = "ghcr.io/rdi-foundation/amber-sidecar:main"; -const HELPER_IMAGE: &str = "ghcr.io/rdi-foundation/amber-compose-helper:v1"; +const HELPER_IMAGE: &str = "ghcr.io/rdi-foundation/amber-helper:v1"; const HELPER_VOLUME_NAME: &str = "amber-helper-bin"; const HELPER_INIT_SERVICE: &str = "amber-init"; const HELPER_BIN_DIR: &str = "/amber/bin"; @@ -607,10 +607,7 @@ fn render_docker_compose_inner(output: &CompileOutput) -> DcResult { let bindings = binding_values_by_component.get(id).unwrap(); // Root-only composed config template (if available). Root component uses runtime root config. - let template_opt: Option<&rc::ConfigNode> = match resolved_templates.get(id) { - Some(rc::RootConfigTemplate::Node(node)) => Some(node), - _ => None, - }; + let template_opt = resolved_templates.get(id).and_then(|t| t.node()); // Build template spec with slots resolved and config either resolved (static) or preserved. let mut entrypoint_ts: Vec = Vec::new(); diff --git a/compiler/src/reporter/docker_compose/tests.rs b/compiler/src/reporter/docker_compose/tests.rs index 6582c30..1336ccc 100644 --- a/compiler/src/reporter/docker_compose/tests.rs +++ b/compiler/src/reporter/docker_compose/tests.rs @@ -194,7 +194,7 @@ fn build_helper_image() -> String { let root = workspace_root(); build_docker_image( HELPER_IMAGE, - &root.join("docker/amber-compose-helper/Dockerfile"), + &root.join("docker/amber-helper/Dockerfile"), &root, ) } diff --git a/compiler/src/reporter/kubernetes/mod.rs b/compiler/src/reporter/kubernetes/mod.rs new file mode 100644 index 0000000..49c0cc9 --- /dev/null +++ b/compiler/src/reporter/kubernetes/mod.rs @@ -0,0 +1,1884 @@ +mod resources; + +use std::{ + collections::{BTreeMap, HashMap}, + path::PathBuf, + sync::Arc, +}; + +use amber_config as rc; +use amber_manifest::{BindingTarget, InterpolatedPart, InterpolationSource, Manifest}; +use amber_scenario::{BindingFrom, ComponentId, ProvideRef, Scenario}; +use amber_template::{ProgramTemplateSpec, TemplatePart, TemplateSpec, TemplateString}; +use base64::Engine as _; +pub use resources::*; +use serde::Serialize; +use serde_json::Value; + +use super::{Reporter, ReporterError}; +use crate::{ + CompileOutput, + binding_query::{BindingObject, resolve_binding_query}, + config_template, + slot_query::{SlotObject, resolve_slot_query}, +}; + +// Helper injection system: When a component requires runtime config interpolation, +// an init container installs the amber-helper binary into a shared volume, then the +// main container uses the helper as its entrypoint to resolve config templates and +// exec the actual program. +const HELPER_IMAGE: &str = "ghcr.io/rdi-foundation/amber-helper:v1"; +const HELPER_VOLUME_NAME: &str = "amber-helper"; +const HELPER_BIN_DIR: &str = "/amber/bin"; +const HELPER_BIN_PATH: &str = "/amber/bin/amber-helper"; + +// Root config Secret/ConfigMap names. +const ROOT_CONFIG_SECRET_NAME: &str = "amber-root-config-secret"; +const ROOT_CONFIG_CONFIGMAP_NAME: &str = "amber-root-config"; + +/// Kubernetes reporter configuration. +#[derive(Clone, Debug, Default)] +pub struct KubernetesReporterConfig { + /// Disable generation of NetworkPolicy enforcement check resources. + pub disable_networkpolicy_check: bool, +} + +/// Reporter that outputs Kubernetes manifests as a directory structure. +#[derive(Clone, Debug, Default)] +pub struct KubernetesReporter { + pub config: KubernetesReporterConfig, +} + +/// Output artifact containing all generated Kubernetes YAML files. +#[derive(Clone, Debug)] +pub struct KubernetesArtifact { + /// Map of relative path -> YAML content. + pub files: BTreeMap, +} + +impl Reporter for KubernetesReporter { + type Artifact = KubernetesArtifact; + + fn emit(&self, output: &CompileOutput) -> Result { + render_kubernetes(output, &self.config) + } +} + +// ---- Internal types ---- + +#[derive(Clone, Debug)] +struct ComponentNames { + /// Service/deployment name (RFC 1123 subdomain). + service: String, + /// NetworkPolicy name. + netpol: String, +} + +/// Metadata about a scenario export for the amber-metadata ConfigMap. +#[derive(Clone, Debug, Serialize)] +struct ExportMetadata { + component: String, + provide: String, + service: String, + port: u16, + kind: String, +} + +/// Metadata about a config input for the amber-metadata ConfigMap. +#[derive(Clone, Debug, Serialize)] +struct InputMetadata { + required: bool, + secret: bool, +} + +/// Full scenario metadata stored in amber-metadata ConfigMap. +#[derive(Clone, Debug, Serialize)] +struct ScenarioMetadata { + version: &'static str, + digest: String, + exports: BTreeMap, + inputs: BTreeMap, +} + +/// How a program component will be run. +#[derive(Clone, Debug)] +enum ProgramMode { + /// Direct execution - all config is statically resolved at compile time. + Direct { + entrypoint: Vec, + env: BTreeMap, + }, + /// Helper-mediated execution - needs runtime config interpolation. + Helper { + /// Base64-encoded TemplateSpec (program entrypoint + env with config refs). + template_spec_b64: String, + /// Base64-encoded component config template (for resolving against root config). + component_cfg_template_b64: String, + /// Base64-encoded component config schema (for validation). + component_schema_b64: String, + }, +} + +type KubernetesResult = Result; + +fn render_kubernetes( + output: &CompileOutput, + config: &KubernetesReporterConfig, +) -> KubernetesResult { + let s = &output.scenario; + + let manifests = crate::manifest_table::build_manifest_table(&s.components, &output.store) + .map_err(|e| { + ReporterError::new(format!( + "internal error: missing manifest content for {} (digest {})", + component_label(s, e.component), + e.digest + )) + })?; + + // Backend prerequisite: strong dependency graph must be acyclic. + if let Err(cycle) = amber_scenario::graph::topo_order(s) { + let cycle_str = cycle + .cycle + .iter() + .map(|id| format!("c{}", id.0)) + .collect::>() + .join(" -> "); + return Err(ReporterError::new(format!( + "kubernetes reporter requires an acyclic dependency graph (ignoring weak bindings). \ + Found a cycle: {cycle_str}" + ))); + } + + // Collect program components (these become deployments). + let program_components: Vec = s + .components_iter() + .filter_map(|(id, c)| c.program.as_ref().map(|_| id)) + .collect(); + + // Generate namespace name. + let namespace = generate_namespace_name(s); + + // Generate component names. + let mut names: HashMap = HashMap::new(); + for id in &program_components { + let c = s.component(*id); + let base = service_name(*id, c.moniker.local_name().unwrap_or("component")); + names.insert( + *id, + ComponentNames { + service: base.clone(), + netpol: format!("{base}-netpol"), + }, + ); + } + + // Validate: framework bindings are not supported. + for b in &s.bindings { + if let BindingFrom::Framework(name) = &b.from { + return Err(ReporterError::new(format!( + "kubernetes reporter does not support framework binding `framework.{name}` (bound \ + to {}.{})", + component_label(s, b.to.component), + b.to.name + ))); + } + } + + // Validate: every binding endpoint is between program components. + for b in &s.bindings { + let from = binding_from_component(&b.from); + if s.component(from.component).program.is_none() { + return Err(ReporterError::new(format!( + "binding source {}.{} is not runnable (component has no program)", + component_label(s, from.component), + from.name + ))); + } + if s.component(b.to.component).program.is_none() { + return Err(ReporterError::new(format!( + "binding target {}.{} is not runnable (component has no program)", + component_label(s, b.to.component), + b.to.name + ))); + } + } + for ex in &s.exports { + if s.component(ex.from.component).program.is_none() { + return Err(ReporterError::new(format!( + "scenario export '{}' points at {}.{} which is not runnable (component has no \ + program)", + ex.name, + component_label(s, ex.from.component), + ex.from.name + ))); + } + } + + // Build slot values and binding values for each component (resolved URLs to services). + let mut slot_values_by_component: HashMap> = + HashMap::new(); + let mut binding_values_by_component: HashMap> = + HashMap::new(); + for id in &program_components { + slot_values_by_component.insert(*id, BTreeMap::new()); + binding_values_by_component.insert(*id, BTreeMap::new()); + } + + // Track inbound allowlist: provider -> set of (consumer_component, port) + let mut inbound_allow: HashMap> = HashMap::new(); + + for b in &s.bindings { + let from = binding_from_component(&b.from); + let provider = from.component; + let consumer = b.to.component; + + let endpoint_port = resolve_provide_endpoint(s, provider, &from.name)?; + let provider_names = names.get(&provider).ok_or_else(|| { + ReporterError::new(format!( + "internal error: missing names for provider {}", + component_label(s, provider) + )) + })?; + + let url = if provider == consumer { + // Self-reference: use localhost + format!("http://127.0.0.1:{}", endpoint_port) + } else { + // Slot resolves to Kubernetes service DNS. + format!( + "http://{}.{}.svc.cluster.local:{}", + provider_names.service, namespace, endpoint_port + ) + }; + + slot_values_by_component + .entry(consumer) + .or_default() + .insert(b.to.name.clone(), SlotObject { url: url.clone() }); + + if let Some(name) = b.name.as_ref() { + binding_values_by_component + .entry(consumer) + .or_default() + .insert(name.clone(), BindingObject { url }); + } + + // Track for NetworkPolicy + if provider != consumer { + inbound_allow + .entry(provider) + .or_default() + .push((consumer, endpoint_port)); + } + } + + // Compose config templates for all components. + let root_id = s.root; + let root_schema = manifests[root_id.0] + .as_ref() + .and_then(|m| m.config_schema()) + .map(|s| &s.0); + + let root_template = if root_schema.is_some() { + rc::RootConfigTemplate::Root + } else { + rc::RootConfigTemplate::Node(rc::ConfigNode::empty_object()) + }; + + let mut composed_templates: HashMap = HashMap::new(); + compose_templates_dfs( + s, + s.root, + &manifests, + root_schema, + &root_template, + &mut composed_templates, + ) + .map_err(|e| { + ReporterError::new(format!("failed to compose component config templates: {e}")) + })?; + + let binding_urls_by_scope = binding_urls_by_scope(s, &manifests, &slot_values_by_component) + .map_err(ReporterError::new)?; + + let resolved_templates = + resolve_binding_templates(composed_templates, &binding_urls_by_scope, s) + .map_err(ReporterError::new)?; + + // Standard labels for all resources. + let scenario_labels = |extra: &[(&str, &str)]| -> BTreeMap { + let mut labels = BTreeMap::new(); + labels.insert( + "app.kubernetes.io/managed-by".to_string(), + "amber".to_string(), + ); + labels.insert( + "amber.io/scenario".to_string(), + sanitize_label_value(&namespace), + ); + for (k, v) in extra { + labels.insert(k.to_string(), v.to_string()); + } + labels + }; + + let component_labels = |id: ComponentId, svc_name: &str| -> BTreeMap { + let mut labels = scenario_labels(&[]); + labels.insert("amber.io/component".to_string(), svc_name.to_string()); + labels.insert("amber.io/component-id".to_string(), format!("c{}", id.0)); + labels + }; + + // ---- Generate resources ---- + + let mut files: BTreeMap = BTreeMap::new(); + + // Namespace + let ns = Namespace::new(&namespace, scenario_labels(&[])); + files.insert(PathBuf::from("00-namespace.yaml"), to_yaml(&ns)?); + + // Collect root config leaf paths for metadata. + let root_leaves = if let Some(schema) = root_schema { + rc::collect_leaf_paths(schema).map_err(|e| { + ReporterError::new(format!("failed to enumerate root config paths: {e}")) + })? + } else { + Vec::new() + }; + + // Build ProgramMode for each component (determines if helper is needed). + let mut program_modes: HashMap = HashMap::new(); + let mut any_helper = false; + + for id in &program_components { + let c = s.component(*id); + let program = c.program.as_ref().unwrap(); + let slots = slot_values_by_component.get(id).unwrap(); + + let component_template = resolved_templates.get(id).ok_or_else(|| { + ReporterError::new(format!( + "no config template for component {}", + component_label(s, *id) + )) + })?; + + // Get the config node for template resolution. + let template_opt = component_template.node(); + + let component_schema = manifests[id.0] + .as_ref() + .and_then(|m| m.config_schema()) + .map(|s| &s.0); + + let bindings = binding_values_by_component.get(id).unwrap(); + + let mode = build_program_mode( + s, + *id, + program, + slots, + bindings, + template_opt, + component_schema, + component_template, + )?; + + if matches!(mode, ProgramMode::Helper { .. }) { + any_helper = true; + } + + program_modes.insert(*id, mode); + } + + // Build kustomization (will be populated at the end if helper mode is used). + let mut kustomization = Kustomization::new(); + kustomization.namespace = Some(namespace.clone()); + + // If any component needs helper, generate root config Secret/ConfigMap and .env templates. + if any_helper { + // Separate root leaves into secret and non-secret. + let (secret_leaves, config_leaves): (Vec<_>, Vec<_>) = + root_leaves.iter().partition(|l| l.secret); + + // Add secretGenerator for secret config values. + if !secret_leaves.is_empty() { + kustomization.secret_generator.push(SecretGenerator { + name: ROOT_CONFIG_SECRET_NAME.to_string(), + namespace: Some(namespace.clone()), + env_files: vec!["root-config-secret.env".to_string()], + literals: Vec::new(), + options: Some(GeneratorOptions { + disable_name_suffix_hash: Some(true), + }), + }); + + // Generate template .env file for secrets. + let mut env_content = String::new(); + env_content.push_str("# Root config secrets - fill in values before deploying\n"); + for leaf in &secret_leaves { + let env_var = rc::env_var_for_path(&leaf.path) + .map_err(|e| ReporterError::new(format!("failed to map config path: {e}")))?; + env_content.push_str(&format!("{}=\n", env_var)); + } + files.insert(PathBuf::from("root-config-secret.env"), env_content); + } + + // Add configMapGenerator for non-secret config values. + if !config_leaves.is_empty() { + kustomization.config_map_generator.push(ConfigMapGenerator { + name: ROOT_CONFIG_CONFIGMAP_NAME.to_string(), + namespace: Some(namespace.clone()), + env_files: vec!["root-config.env".to_string()], + literals: Vec::new(), + options: Some(GeneratorOptions { + disable_name_suffix_hash: Some(true), + }), + }); + + // Generate template .env file for config. + let mut env_content = String::new(); + env_content.push_str("# Root config values - fill in values before deploying\n"); + for leaf in &config_leaves { + let env_var = rc::env_var_for_path(&leaf.path) + .map_err(|e| ReporterError::new(format!("failed to map config path: {e}")))?; + env_content.push_str(&format!("{}=\n", env_var)); + } + files.insert(PathBuf::from("root-config.env"), env_content); + } + + // Don't insert kustomization yet - we'll do it at the end after collecting all resource paths + } + + // Note: Per-component ConfigMaps/Secrets are not generated because: + // - Direct mode: all config must be fully static (no runtime interpolation). + // Static values are rendered to strings and inlined directly into the + // Deployment YAML as literal env vars. This includes secret config values, + // which will be visible in the generated YAML. + // - Helper mode: config with runtime interpolation uses the helper binary. + // The helper reads config values from the root config Secret/ConfigMap at + // runtime and resolves templates, so secret values are not inlined. + // The only ConfigMaps generated are amber-metadata and the Kustomize-generated + // root config (when helper mode is used). + + // Encode root schema for helper (needed for all helper components). + let root_schema_b64 = if any_helper { + let root_schema = root_schema.ok_or_else(|| { + ReporterError::new( + "root component must declare `config_schema` when runtime config interpolation is \ + required" + .to_string(), + ) + })?; + let b64 = base64::engine::general_purpose::STANDARD; + let root_schema_json = + serde_json::to_vec(&rc::canonical_json(root_schema)).map_err(|e| { + ReporterError::new(format!("failed to serialize root config definition: {e}")) + })?; + Some(b64.encode(root_schema_json)) + } else { + None + }; + + // Deployments + for id in &program_components { + let c = s.component(*id); + let cnames = names.get(id).unwrap(); + let labels = component_labels(*id, &cnames.service); + let program = c.program.as_ref().unwrap(); + let mode = program_modes.get(id).unwrap(); + + // Container ports. + let mut ports: Vec = Vec::new(); + if let Some(network) = &program.network { + for ep in &network.endpoints { + ports.push(ContainerPort { + name: sanitize_port_name(&ep.name), + container_port: ep.port, + protocol: "TCP", + }); + } + } + + // Build container based on program mode. + let (container, volumes) = match mode { + ProgramMode::Direct { entrypoint, env } => { + // Direct mode: use resolved entrypoint and env directly. + // Config values are already baked into the entrypoint/env strings, + // so we don't need AMBER_CONFIG_* env vars here. + let container_env: Vec = + env.iter().map(|(k, v)| EnvVar::literal(k, v)).collect(); + + let container = Container { + name: "main".to_string(), + image: program.image.clone(), + command: entrypoint.clone(), + args: Vec::new(), + env: container_env, + env_from: Vec::new(), + ports, + volume_mounts: Vec::new(), + }; + + (container, Vec::new()) + } + ProgramMode::Helper { + template_spec_b64, + component_cfg_template_b64, + component_schema_b64, + } => { + // Helper mode: use helper binary as entrypoint, mount shared volume. + let mut container_env: Vec = Vec::new(); + + // Collect config paths referenced by this component's template. + let component_template = resolved_templates.get(id).ok_or_else(|| { + ReporterError::new(format!( + "no config template for component {}", + component_label(s, *id) + )) + })?; + let referenced_paths = collect_config_refs(component_template); + + // Add only the root config env vars that are actually referenced by this component. + // If referenced_paths is None, it means the component template is Root and needs all config. + for leaf in &root_leaves { + // Check if this leaf path is referenced by the component template. + let should_include = match &referenced_paths { + Some(paths) => paths.contains(&leaf.path), + None => true, // Root template needs all config + }; + + if !should_include { + continue; + } + + let env_var = rc::env_var_for_path(&leaf.path).map_err(|e| { + ReporterError::new(format!("failed to map config path: {e}")) + })?; + + if leaf.secret { + container_env.push(EnvVar::from_secret( + &env_var, + ROOT_CONFIG_SECRET_NAME, + &env_var, + )); + } else { + container_env.push(EnvVar::from_config_map( + &env_var, + ROOT_CONFIG_CONFIGMAP_NAME, + &env_var, + )); + } + } + + // Add helper-specific env vars. + let root_schema_b64 = root_schema_b64 + .as_ref() + .expect("helper mode requires root schema"); + container_env.push(EnvVar::literal( + "AMBER_ROOT_CONFIG_SCHEMA_B64", + root_schema_b64, + )); + container_env.push(EnvVar::literal( + "AMBER_COMPONENT_CONFIG_SCHEMA_B64", + component_schema_b64, + )); + container_env.push(EnvVar::literal( + "AMBER_COMPONENT_CONFIG_TEMPLATE_B64", + component_cfg_template_b64, + )); + container_env.push(EnvVar::literal( + "AMBER_TEMPLATE_SPEC_B64", + template_spec_b64, + )); + + let container = Container { + name: "main".to_string(), + image: program.image.clone(), + command: vec![HELPER_BIN_PATH.to_string(), "run".to_string()], + args: Vec::new(), + env: container_env, + env_from: Vec::new(), + ports, + volume_mounts: vec![VolumeMount { + name: HELPER_VOLUME_NAME.to_string(), + mount_path: HELPER_BIN_DIR.to_string(), + read_only: Some(true), + }], + }; + + let volumes = vec![Volume::empty_dir(HELPER_VOLUME_NAME)]; + + (container, volumes) + } + }; + + // Add init containers. + let mut init_containers = Vec::new(); + + // For helper mode, add init container to install the helper binary. + if matches!(mode, ProgramMode::Helper { .. }) { + init_containers.push(Container { + name: "install-helper".to_string(), + image: HELPER_IMAGE.to_string(), + command: vec![ + "/amber-helper".to_string(), + "install".to_string(), + format!("{}/amber-helper", HELPER_BIN_DIR), + ], + args: Vec::new(), + env: Vec::new(), + env_from: Vec::new(), + ports: Vec::new(), + volume_mounts: vec![VolumeMount { + name: HELPER_VOLUME_NAME.to_string(), + mount_path: HELPER_BIN_DIR.to_string(), + read_only: None, + }], + }); + } + + // Add init container to wait for NetworkPolicy enforcement check. + if !config.disable_networkpolicy_check { + init_containers.push(Container { + name: "wait-for-netpol-check".to_string(), + image: "busybox:1.36".to_string(), + command: vec![ + "/bin/sh".to_string(), + "-c".to_string(), + format!( + "RESPONSE=$(nc amber-netpol-client.{} 8080 /dev/null); [ \ + \"$RESPONSE\" = \"ready\" ] && echo 'NetworkPolicy enforcement verified'", + namespace + ), + ], + args: Vec::new(), + env: Vec::new(), + env_from: Vec::new(), + ports: Vec::new(), + volume_mounts: Vec::new(), + }); + } + + let pod_spec = PodSpec { + init_containers, + containers: vec![container], + volumes, + restart_policy: None, + }; + + let deployment = Deployment { + api_version: "apps/v1", + kind: "Deployment", + metadata: ObjectMeta { + name: cnames.service.clone(), + namespace: Some(namespace.clone()), + labels: labels.clone(), + ..Default::default() + }, + spec: DeploymentSpec { + replicas: 1, + selector: LabelSelector { + match_labels: { + let mut m = BTreeMap::new(); + m.insert("amber.io/component".to_string(), cnames.service.clone()); + m + }, + }, + template: PodTemplateSpec { + metadata: ObjectMeta { + labels: labels.clone(), + ..Default::default() + }, + spec: pod_spec, + }, + }, + }; + + files.insert( + PathBuf::from(format!("03-deployments/{}.yaml", cnames.service)), + to_yaml(&deployment)?, + ); + } + + // Services (only for components with provides) + for id in &program_components { + let c = s.component(*id); + if c.provides.is_empty() { + continue; + } + + let cnames = names.get(id).unwrap(); + let labels = component_labels(*id, &cnames.service); + let program = c.program.as_ref().unwrap(); + + let mut service_ports: Vec = Vec::new(); + if let Some(network) = &program.network { + for ep in &network.endpoints { + service_ports.push(ServicePort { + name: sanitize_port_name(&ep.name), + port: ep.port, + target_port: ep.port, + protocol: "TCP", + }); + } + } + + if service_ports.is_empty() { + continue; + } + + let selector = { + let mut m = BTreeMap::new(); + m.insert("amber.io/component".to_string(), cnames.service.clone()); + m + }; + + let svc = Service::new(&cnames.service, &namespace, labels, selector, service_ports); + + files.insert( + PathBuf::from(format!("04-services/{}.yaml", cnames.service)), + to_yaml(&svc)?, + ); + } + + // NetworkPolicies + for id in &program_components { + let cnames = names.get(id).unwrap(); + let labels = component_labels(*id, &cnames.service); + + let pod_selector = { + let mut m = BTreeMap::new(); + m.insert("amber.io/component".to_string(), cnames.service.clone()); + m + }; + + let mut netpol = NetworkPolicy::new(&cnames.netpol, &namespace, labels, pod_selector); + + // Add ingress rules for bound consumers. + if let Some(allowed) = inbound_allow.get(id) { + // Group by port. + let mut by_port: BTreeMap> = BTreeMap::new(); + for (consumer, port) in allowed { + by_port.entry(*port).or_default().push(*consumer); + } + + for (port, consumers) in by_port { + let from: Vec = consumers + .iter() + .map(|cid| { + let consumer_names = names.get(cid).unwrap(); + NetworkPolicyPeer { + pod_selector: Some(LabelSelector { + match_labels: { + let mut m = BTreeMap::new(); + m.insert( + "amber.io/component".to_string(), + consumer_names.service.clone(), + ); + m + }, + }), + namespace_selector: None, + } + }) + .collect(); + + netpol.add_ingress_rule(NetworkPolicyIngressRule { + from, + ports: vec![NetworkPolicyPort { + protocol: "TCP", + port, + }], + }); + } + } + + files.insert( + PathBuf::from(format!("05-networkpolicies/{}.yaml", cnames.netpol)), + to_yaml(&netpol)?, + ); + } + + // NetworkPolicy enforcement check (unless disabled). + if !config.disable_networkpolicy_check { + let enforcement_resources = + generate_netpol_enforcement_check(&namespace, &scenario_labels(&[])); + for (filename, resource_yaml) in enforcement_resources { + files.insert( + PathBuf::from(format!("06-enforcement/{filename}")), + resource_yaml?, + ); + } + } + + // Metadata ConfigMap + let mut export_metadata: BTreeMap = BTreeMap::new(); + for ex in &s.exports { + let provider = ex.from.component; + let provider_names = names.get(&provider).unwrap(); + let endpoint_port = resolve_provide_endpoint(s, provider, &ex.from.name)?; + + export_metadata.insert( + ex.name.clone(), + ExportMetadata { + component: s.component(provider).moniker.as_str().to_string(), + provide: ex.from.name.clone(), + service: provider_names.service.clone(), + port: endpoint_port, + kind: format!("{}", ex.capability.kind), + }, + ); + } + + let mut input_metadata: BTreeMap = BTreeMap::new(); + for leaf in &root_leaves { + input_metadata.insert( + leaf.path.clone(), + InputMetadata { + required: leaf.required, + secret: leaf.secret, + }, + ); + } + + let scenario_metadata = ScenarioMetadata { + version: "1", + digest: s.component(s.root).digest.to_string(), + exports: export_metadata, + inputs: input_metadata, + }; + + let metadata_json = serde_json::to_string_pretty(&scenario_metadata) + .map_err(|e| ReporterError::new(format!("failed to serialize scenario metadata: {e}")))?; + + let mut metadata_data = BTreeMap::new(); + metadata_data.insert("scenario.json".to_string(), metadata_json); + + let metadata_cm = ConfigMap::new( + "amber-metadata", + &namespace, + scenario_labels(&[("amber.io/type", "metadata")]), + metadata_data, + ); + files.insert( + PathBuf::from("01-configmaps/amber-metadata.yaml"), + to_yaml(&metadata_cm)?, + ); + + // Build and insert kustomization with actual file paths. + // Always generate kustomization.yaml for consistency, even if not using helper mode. + let mut kust_resources = Vec::new(); + + // Collect all YAML files, excluding non-resource files + for path in files.keys() { + if path == &PathBuf::from("root-config.env") + || path == &PathBuf::from("root-config-secret.env") + { + continue; // Skip .env template files + } + kust_resources.push(path.to_string_lossy().to_string()); + } + kust_resources.sort(); + + // Set the resources list and insert the kustomization + kustomization.resources = kust_resources; + files.insert( + PathBuf::from("kustomization.yaml"), + to_yaml(&kustomization)?, + ); + + Ok(KubernetesArtifact { files }) +} + +// ---- Helper functions ---- + +fn component_label(s: &Scenario, id: ComponentId) -> String { + s.component(id).moniker.as_str().to_string() +} + +fn binding_urls_by_scope( + s: &Scenario, + manifests: &[Option>], + slot_values_by_component: &HashMap>, +) -> Result>, String> { + let mut out: HashMap> = HashMap::new(); + + for (idx, manifest) in manifests.iter().enumerate() { + let Some(manifest) = manifest else { + continue; + }; + let realm = ComponentId(idx); + let mut by_name = BTreeMap::new(); + + for (target, binding) in manifest.bindings() { + let Some(name) = binding.name.as_ref() else { + continue; + }; + + let (target_component, slot_name) = match target { + BindingTarget::SelfSlot(slot) => (realm, slot.as_str()), + BindingTarget::ChildSlot { child, slot } => { + let child_id = child_component_id_for_name(s, realm, child.as_str())?; + (child_id, slot.as_str()) + } + _ => { + return Err(format!( + "unsupported binding target {:?} in {}", + target, + component_label(s, realm) + )); + } + }; + + let slot_values = slot_values_by_component + .get(&target_component) + .ok_or_else(|| { + format!( + "internal error: missing slot values for {}", + component_label(s, target_component) + ) + })?; + let slot = slot_values.get(slot_name).ok_or_else(|| { + format!( + "internal error: missing slot url for {}.{}", + component_label(s, target_component), + slot_name + ) + })?; + + by_name.insert( + name.to_string(), + BindingObject { + url: slot.url.clone(), + }, + ); + } + + out.insert(realm.0 as u64, by_name); + } + + Ok(out) +} + +fn resolve_binding_templates( + templates: HashMap, + bindings_by_scope: &HashMap>, + s: &Scenario, +) -> Result, String> { + let mut out = HashMap::with_capacity(templates.len()); + for (id, template) in templates { + let resolved = match template { + rc::RootConfigTemplate::Root => rc::RootConfigTemplate::Root, + rc::RootConfigTemplate::Node(node) => { + let resolved = + resolve_binding_parts_in_config(&node, bindings_by_scope).map_err(|err| { + format!( + "failed to resolve binding interpolation in config for {}: {err}", + component_label(s, id) + ) + })?; + rc::RootConfigTemplate::Node(resolved) + } + }; + out.insert(id, resolved); + } + Ok(out) +} + +fn resolve_binding_parts_in_config( + node: &rc::ConfigNode, + bindings_by_scope: &HashMap>, +) -> Result { + match node { + rc::ConfigNode::StringTemplate(parts) => { + let mut out = Vec::with_capacity(parts.len()); + for part in parts { + match part { + TemplatePart::Lit { lit } => out.push(TemplatePart::lit(lit)), + TemplatePart::Config { config } => out.push(TemplatePart::config(config)), + TemplatePart::Binding { binding, scope } => { + let bindings = bindings_by_scope + .get(scope) + .ok_or_else(|| format!("bindings scope {scope} is missing"))?; + let url = resolve_binding_query(bindings, binding)?; + out.push(TemplatePart::lit(url)); + } + } + } + Ok(rc::ConfigNode::StringTemplate(out).simplify()) + } + rc::ConfigNode::Array(items) => { + let mut out = Vec::with_capacity(items.len()); + for item in items { + out.push(resolve_binding_parts_in_config(item, bindings_by_scope)?); + } + Ok(rc::ConfigNode::Array(out)) + } + rc::ConfigNode::Object(map) => { + let mut out = BTreeMap::new(); + for (k, v) in map { + out.insert( + k.clone(), + resolve_binding_parts_in_config(v, bindings_by_scope)?, + ); + } + Ok(rc::ConfigNode::Object(out)) + } + other => Ok(other.clone()), + } +} + +fn child_component_id_for_name( + s: &Scenario, + parent: ComponentId, + child_name: &str, +) -> Result { + let parent_component = s.component(parent); + for child_id in &parent_component.children { + let child = s.component(*child_id); + if child.moniker.local_name() == Some(child_name) { + return Ok(*child_id); + } + } + Err(format!( + "internal error: missing child {child_name:?} for {}", + component_label(s, parent) + )) +} + +fn generate_namespace_name(s: &Scenario) -> String { + let root = s.component(s.root); + let short_name = root + .moniker + .local_name() + .unwrap_or("scenario") + .to_lowercase(); + // Get digest bytes and encode as hex (DNS-safe) + let digest_bytes = root.digest.bytes(); + let digest_hex: String = digest_bytes[..4] + .iter() + .map(|b| format!("{:02x}", b)) + .collect(); + + let base = format!("{}-{}", sanitize_dns_name(&short_name), digest_hex); + truncate_dns_name(&base, 63) +} + +fn service_name(id: ComponentId, local_name: &str) -> String { + let slug = sanitize_dns_name(local_name); + let name = format!("c{}-{}", id.0, slug); + truncate_dns_name(&name, 63) +} + +fn sanitize_dns_name(s: &str) -> String { + let mut out = String::new(); + for ch in s.chars() { + let ch = ch.to_ascii_lowercase(); + if ch.is_ascii_alphanumeric() { + out.push(ch); + } else { + out.push('-'); + } + } + // Remove leading/trailing hyphens and collapse multiple hyphens. + let out = out.trim_matches('-'); + let mut result = String::new(); + let mut last_hyphen = false; + for ch in out.chars() { + if ch == '-' { + if !last_hyphen { + result.push(ch); + last_hyphen = true; + } + } else { + result.push(ch); + last_hyphen = false; + } + } + if result.is_empty() { + "component".to_string() + } else { + result + } +} + +fn truncate_dns_name(s: &str, max_len: usize) -> String { + if s.len() <= max_len { + s.to_string() + } else { + s[..max_len].trim_end_matches('-').to_string() + } +} + +fn sanitize_label_value(s: &str) -> String { + // Kubernetes label values: max 63 chars, alphanumeric, -, _, . + let mut out = String::new(); + for ch in s.chars() { + if ch.is_ascii_alphanumeric() || ch == '-' || ch == '_' || ch == '.' { + out.push(ch); + } + } + truncate_dns_name(&out, 63) +} + +fn sanitize_port_name(s: &str) -> String { + // Port names: max 15 chars, lowercase alphanumeric and hyphens. + let sanitized = sanitize_dns_name(s); + truncate_dns_name(&sanitized, 15) +} + +fn resolve_provide_endpoint( + s: &Scenario, + component_id: ComponentId, + provide_name: &str, +) -> Result { + let component = s.component(component_id); + + let provide = component.provides.get(provide_name).ok_or_else(|| { + ReporterError::new(format!( + "provide {}.{} not found", + component_label(s, component_id), + provide_name + )) + })?; + + let program = component.program.as_ref().ok_or_else(|| { + ReporterError::new(format!( + "provide {}.{} requires a program, but component has none", + component_label(s, component_id), + provide_name + )) + })?; + + let network = program.network.as_ref().ok_or_else(|| { + ReporterError::new(format!( + "provide {}.{} requires program.network, but none exists", + component_label(s, component_id), + provide_name + )) + })?; + + let endpoint_name = provide.endpoint.as_deref().ok_or_else(|| { + ReporterError::new(format!( + "provide {}.{} is missing an endpoint reference", + component_label(s, component_id), + provide_name + )) + })?; + + let endpoint = network + .endpoints + .iter() + .find(|e| e.name == endpoint_name) + .ok_or_else(|| { + ReporterError::new(format!( + "provide {}.{} references unknown endpoint {:?}", + component_label(s, component_id), + provide_name, + endpoint_name + )) + })?; + + Ok(endpoint.port) +} + +fn compose_templates_dfs( + s: &Scenario, + id: ComponentId, + manifests: &[Option>], + parent_schema: Option<&Value>, + parent_template: &rc::RootConfigTemplate, + out: &mut HashMap, +) -> Result<(), String> { + let c = s.component(id); + let m = manifests[id.0].as_ref().expect("manifest should exist"); + let schema = m.config_schema().map(|s| &s.0); + + let this_template = if id == s.root { + if schema.is_some() { + rc::RootConfigTemplate::Root + } else { + rc::RootConfigTemplate::Node(rc::ConfigNode::empty_object()) + } + } else if schema.is_none() { + rc::RootConfigTemplate::Node(rc::ConfigNode::empty_object()) + } else { + let initial = config_template::parse_instance_config_template( + c.config.as_ref(), + parent_schema, + id.0 as u64, + ) + .map_err(|e| e.to_string())?; + let composed = rc::compose_config_template(initial, parent_template) + .map_err(|e| e.to_string())? + .simplify(); + rc::RootConfigTemplate::Node(composed) + }; + + out.insert(id, this_template.clone()); + + for &child in &c.children { + compose_templates_dfs(s, child, manifests, schema, &this_template, out)?; + } + Ok(()) +} + +// ---- Runtime config / helper mode support ---- + +/// Collect all config paths referenced in a RootConfigTemplate. +/// Returns None if the template is Root (indicating all config should be provided). +/// Returns Some(paths) with the specific paths referenced in the template. +fn collect_config_refs( + template: &rc::RootConfigTemplate, +) -> Option> { + use std::collections::BTreeSet; + + fn collect_from_node(node: &rc::ConfigNode, acc: &mut BTreeSet) { + match node { + rc::ConfigNode::ConfigRef(path) => { + acc.insert(path.clone()); + } + rc::ConfigNode::StringTemplate(parts) => { + for part in parts { + if let amber_template::TemplatePart::Config { config } = part { + acc.insert(config.clone()); + } + } + } + rc::ConfigNode::Array(items) => { + for item in items { + collect_from_node(item, acc); + } + } + rc::ConfigNode::Object(map) => { + for value in map.values() { + collect_from_node(value, acc); + } + } + _ => {} + } + } + + match template { + rc::RootConfigTemplate::Root => { + // Root template means the component IS the root and receives the entire root config. + // Return None to indicate all config paths should be provided. + None + } + rc::RootConfigTemplate::Node(node) => { + let mut paths = BTreeSet::new(); + collect_from_node(node, &mut paths); + Some(paths) + } + } +} + +/// Attempt to resolve a config interpolation to a static string; otherwise keep it as runtime. +enum ConfigResolution { + Static(String), + Runtime, +} + +/// Try to resolve a config query against a composed template. +/// Returns Static if the value is fully resolved, Runtime if it contains config refs. +fn resolve_config_query_for_program( + template: Option<&rc::ConfigNode>, + query: &str, +) -> Result { + let Some(template) = template else { + return Ok(ConfigResolution::Runtime); + }; + + // Empty query means "the whole config". + if query.is_empty() { + return if !template.contains_runtime() { + let v = template.evaluate_static().map_err(|e| e.to_string())?; + Ok(ConfigResolution::Static( + rc::stringify_for_interpolation(&v).map_err(|e| e.to_string())?, + )) + } else { + Ok(ConfigResolution::Runtime) + }; + } + + // Traverse until we either: + // - reach the node (resolved) + // - hit a runtime insert (ConfigRef) before path ends (runtime) + // - find a missing key (error) + let mut cur = template; + for seg in query.split('.') { + if seg.is_empty() { + return Err(format!("invalid config path {query:?}: empty segment")); + } + match cur { + rc::ConfigNode::Object(map) => { + let Some(next) = map.get(seg) else { + return Err(format!("config.{query} not found (missing key {seg:?})")); + }; + cur = next; + } + rc::ConfigNode::ConfigRef(_) => return Ok(ConfigResolution::Runtime), + _ => { + return Err(format!( + "config.{query} not found (encountered non-object before segment {seg:?})" + )); + } + } + } + + if !cur.contains_runtime() { + let v = cur.evaluate_static().map_err(|e| e.to_string())?; + Ok(ConfigResolution::Static( + rc::stringify_for_interpolation(&v).map_err(|e| e.to_string())?, + )) + } else { + Ok(ConfigResolution::Runtime) + } +} + +/// Render a template string that is known to be fully static. +fn render_template_string_static(ts: &TemplateString) -> Result { + if rc::template_string_is_runtime(ts) { + return Err( + "internal error: attempted to render a runtime template string statically".to_string(), + ); + } + let mut out = String::new(); + for part in ts { + match part { + TemplatePart::Lit { lit } => out.push_str(lit), + TemplatePart::Config { .. } => unreachable!(), + TemplatePart::Binding { .. } => unreachable!(), + } + } + Ok(out) +} + +/// Build ProgramMode for a component by analyzing its entrypoint and env for runtime config refs. +#[allow(clippy::too_many_arguments)] +fn build_program_mode( + s: &Scenario, + id: ComponentId, + program: &amber_manifest::Program, + slots: &BTreeMap, + bindings: &BTreeMap, + template_opt: Option<&rc::ConfigNode>, + component_schema: Option<&Value>, + component_template: &rc::RootConfigTemplate, +) -> KubernetesResult { + let mut entrypoint_ts: Vec = Vec::new(); + let mut needs_helper = false; + + for (idx, arg) in program.args.0.iter().enumerate() { + let mut ts: TemplateString = Vec::new(); + for part in &arg.parts { + match part { + InterpolatedPart::Literal(lit) => ts.push(TemplatePart::lit(lit)), + InterpolatedPart::Interpolation { source, query } => match source { + InterpolationSource::Slots => { + let v = resolve_slot_query(slots, query).map_err(|e| { + ReporterError::new(format!( + "failed to resolve slot query in {}: {e}", + component_label(s, id) + )) + })?; + ts.push(TemplatePart::lit(v)); + } + InterpolationSource::Bindings => { + let v = resolve_binding_query(bindings, query).map_err(|e| { + ReporterError::new(format!( + "failed to resolve binding query in {}: {e}", + component_label(s, id) + )) + })?; + ts.push(TemplatePart::lit(v)); + } + InterpolationSource::Config => { + match resolve_config_query_for_program(template_opt, query) + .map_err(ReporterError::new)? + { + ConfigResolution::Static(v) => ts.push(TemplatePart::lit(v)), + ConfigResolution::Runtime => { + ts.push(TemplatePart::config(query.clone())); + needs_helper = true; + } + } + } + other => { + return Err(ReporterError::new(format!( + "unsupported interpolation source {other} in {} \ + program.entrypoint[{idx}]", + component_label(s, id) + ))); + } + }, + _ => { + return Err(ReporterError::new(format!( + "unsupported interpolation part in {} program.entrypoint[{idx}]", + component_label(s, id) + ))); + } + } + } + if ts.is_empty() { + return Err(ReporterError::new(format!( + "internal error: produced empty template for {} program.entrypoint[{idx}]", + component_label(s, id) + ))); + } + entrypoint_ts.push(ts); + } + + // program.env + let mut env_ts: BTreeMap = BTreeMap::new(); + for (k, v) in &program.env { + let mut ts: TemplateString = Vec::new(); + for part in &v.parts { + match part { + InterpolatedPart::Literal(lit) => ts.push(TemplatePart::lit(lit)), + InterpolatedPart::Interpolation { source, query } => match source { + InterpolationSource::Slots => { + let vv = resolve_slot_query(slots, query).map_err(|e| { + ReporterError::new(format!( + "failed to resolve slot query in {}: {e}", + component_label(s, id) + )) + })?; + ts.push(TemplatePart::lit(vv)); + } + InterpolationSource::Bindings => { + let vv = resolve_binding_query(bindings, query).map_err(|e| { + ReporterError::new(format!( + "failed to resolve binding query in {}: {e}", + component_label(s, id) + )) + })?; + ts.push(TemplatePart::lit(vv)); + } + InterpolationSource::Config => { + match resolve_config_query_for_program(template_opt, query) + .map_err(ReporterError::new)? + { + ConfigResolution::Static(vv) => ts.push(TemplatePart::lit(vv)), + ConfigResolution::Runtime => { + ts.push(TemplatePart::config(query.clone())); + needs_helper = true; + } + } + } + other => { + return Err(ReporterError::new(format!( + "unsupported interpolation source {other} in {} program.env.{k}", + component_label(s, id) + ))); + } + }, + _ => { + return Err(ReporterError::new(format!( + "unsupported interpolation part in {} program.env.{k}", + component_label(s, id) + ))); + } + } + } + env_ts.insert(k.clone(), ts); + } + + if needs_helper { + // Build TemplateSpec for the helper. + let spec = TemplateSpec { + program: ProgramTemplateSpec { + entrypoint: entrypoint_ts, + env: env_ts, + }, + }; + + let b64 = base64::engine::general_purpose::STANDARD; + + let spec_json = serde_json::to_vec(&spec).map_err(|e| { + ReporterError::new(format!( + "failed to serialize template spec for {}: {e}", + component_label(s, id) + )) + })?; + let spec_b64 = b64.encode(spec_json); + + // Convert component template to payload format. + let cfg_template_value = component_template.to_json_ir(); + + let template_json = serde_json::to_vec(&cfg_template_value).map_err(|e| { + ReporterError::new(format!( + "failed to serialize component config template for {}: {e}", + component_label(s, id) + )) + })?; + let template_b64 = b64.encode(template_json); + + let schema = component_schema.ok_or_else(|| { + ReporterError::new(format!( + "component {} requires config_schema when using runtime config interpolation", + component_label(s, id) + )) + })?; + + let schema_json = serde_json::to_vec(&rc::canonical_json(schema)).map_err(|e| { + ReporterError::new(format!( + "failed to serialize component config definition for {}: {e}", + component_label(s, id) + )) + })?; + let schema_b64 = b64.encode(schema_json); + + Ok(ProgramMode::Helper { + template_spec_b64: spec_b64, + component_cfg_template_b64: template_b64, + component_schema_b64: schema_b64, + }) + } else { + // Fully resolved: render to concrete entrypoint/env. + let mut rendered_entrypoint: Vec = Vec::new(); + for ts in entrypoint_ts { + rendered_entrypoint + .push(render_template_string_static(&ts).map_err(ReporterError::new)?); + } + + let mut rendered_env: BTreeMap = BTreeMap::new(); + for (k, ts) in env_ts { + rendered_env.insert( + k, + render_template_string_static(&ts).map_err(ReporterError::new)?, + ); + } + + Ok(ProgramMode::Direct { + entrypoint: rendered_entrypoint, + env: rendered_env, + }) + } +} + +/// Generates NetworkPolicy enforcement check resources. +/// +/// This creates a two-phase test within the namespace: +/// 1. An "allowed" server that the client CAN connect to (proves networking works) +/// 2. A "blocked" server that the client should NOT be able to connect to +/// 3. A NetworkPolicy that blocks ingress to the "blocked" server only +/// 4. A client that verifies both conditions +/// +/// The client uses a "poison pill" pattern: +/// - Phase 1: Try to connect to allowed server. If this fails, networking is broken - exit with error. +/// - Phase 2: Try to connect to blocked server. If this succeeds, NetworkPolicy isn't enforced - exit with error. +/// - Success: Both checks pass, stay alive. +fn generate_netpol_enforcement_check( + namespace: &str, + labels: &BTreeMap, +) -> Vec<(&'static str, KubernetesResult)> { + let mut check_labels = labels.clone(); + check_labels.insert("amber.io/type".to_string(), "netpol-check".to_string()); + + let server_labels = { + let mut l = check_labels.clone(); + l.insert( + "amber.io/netpol-check-role".to_string(), + "server".to_string(), + ); + l + }; + + let client_labels = { + let mut l = check_labels.clone(); + l.insert( + "amber.io/netpol-check-role".to_string(), + "client".to_string(), + ); + l + }; + + // Create a single server deployment that listens on two ports + let server_deployment = Deployment { + api_version: "apps/v1", + kind: "Deployment", + metadata: ObjectMeta { + name: "amber-netpol-server".to_string(), + namespace: Some(namespace.to_string()), + labels: server_labels.clone(), + ..Default::default() + }, + spec: DeploymentSpec { + replicas: 1, + selector: LabelSelector { + match_labels: { + let mut m = BTreeMap::new(); + m.insert( + "amber.io/netpol-check-role".to_string(), + "server".to_string(), + ); + m + }, + }, + template: PodTemplateSpec { + metadata: ObjectMeta { + labels: server_labels.clone(), + ..Default::default() + }, + spec: PodSpec { + init_containers: Vec::new(), + containers: vec![Container { + name: "server".to_string(), + image: "busybox:1.36".to_string(), + command: vec![ + "/bin/sh".to_string(), + "-c".to_string(), + // Run two servers - one on port 8080 (allowed), one on 8081 (will be blocked) + "while true; do echo 'ready' | nc -l -p 8080 >/dev/null; done & while \ + true; do echo 'ready' | nc -l -p 8081 >/dev/null; done & wait" + .to_string(), + ], + ports: vec![ + ContainerPort { + name: "allowed".to_string(), + container_port: 8080, + protocol: "TCP", + }, + ContainerPort { + name: "blocked".to_string(), + container_port: 8081, + protocol: "TCP", + }, + ], + ..Default::default() + }], + volumes: Vec::new(), + restart_policy: None, + }, + }, + }, + }; + + // Service for allowed port (8080) - no NetworkPolicy blocks this + let allowed_service = Service::new( + "amber-netpol-allowed", + namespace, + check_labels.clone(), + { + let mut m = BTreeMap::new(); + m.insert( + "amber.io/netpol-check-role".to_string(), + "server".to_string(), + ); + m + }, + vec![ServicePort { + name: "tcp".to_string(), + port: 8080, + target_port: 8080, + protocol: "TCP", + }], + ); + + // Service for blocked port (8081) - NetworkPolicy will block this + let blocked_service = Service::new( + "amber-netpol-blocked", + namespace, + check_labels.clone(), + { + let mut m = BTreeMap::new(); + m.insert( + "amber.io/netpol-check-role".to_string(), + "server".to_string(), + ); + m + }, + vec![ServicePort { + name: "tcp".to_string(), + port: 8080, + target_port: 8081, + protocol: "TCP", + }], + ); + + // NetworkPolicy - deny ingress to port 8081 only + let deny_policy = NetworkPolicy { + api_version: "networking.k8s.io/v1", + kind: "NetworkPolicy", + metadata: ObjectMeta { + name: "amber-netpol-deny-blocked".to_string(), + namespace: Some(namespace.to_string()), + labels: check_labels.clone(), + ..Default::default() + }, + spec: NetworkPolicySpec { + pod_selector: LabelSelector { + match_labels: { + let mut m = BTreeMap::new(); + m.insert( + "amber.io/netpol-check-role".to_string(), + "server".to_string(), + ); + m + }, + }, + policy_types: vec!["Ingress"], + ingress: vec![ + // Allow ingress on port 8080 only + NetworkPolicyIngressRule { + ports: vec![NetworkPolicyPort { + port: 8080, + protocol: "TCP", + }], + from: Vec::new(), // from anywhere + }, + ], + egress: Vec::new(), + }, + }; + + // Client check script - simple and fast, relies on Kubernetes restart policy + let client_script = r#" +echo "==========================================" +echo "Amber NetworkPolicy Enforcement Check" +echo "==========================================" +echo "" + +# Phase 1: Verify we CAN connect to the allowed server +# Kubernetes will restart this pod if it fails, so no retry loop needed +echo "Phase 1: Testing basic connectivity..." +RESPONSE=$(nc amber-netpol-allowed 8080 /dev/null) +if [ "$RESPONSE" != "ready" ]; then + echo "FATAL: Cannot connect to amber-netpol-allowed:8080" + echo "Basic networking is not working. Pod will restart." + exit 1 +fi +echo " SUCCESS: Connected to allowed server and received: $RESPONSE" + +# Phase 2: Verify we CANNOT connect to the blocked server +echo "" +echo "Phase 2: Testing NetworkPolicy enforcement..." +RESPONSE=$(nc -w 2 amber-netpol-blocked 8080 /dev/null || true) +if [ -n "$RESPONSE" ]; then + echo "" + echo "==========================================" + echo "FATAL: NetworkPolicy is NOT enforced!" + echo "==========================================" + echo "" + echo "Your Kubernetes cluster's CNI does not support NetworkPolicy." + echo "Amber scenarios require NetworkPolicy for security isolation." + echo "" + echo "To fix this, either:" + echo " 1. Install a CNI that supports NetworkPolicy:" + echo " - Calico, Cilium, Weave Net, etc." + echo " 2. Re-generate with --disable-networkpolicy-check" + echo "" + exit 1 +fi +echo " SUCCESS: Connection was correctly blocked" + +echo "" +echo "==========================================" +echo "NetworkPolicy enforcement VERIFIED" +echo "==========================================" +echo "" + +# Start server to signal readiness to other pods +echo "Starting readiness server on port 8080..." +while true; do echo "ready" | nc -l -p 8080 >/dev/null; done +"#; + + let client_deployment = Deployment { + api_version: "apps/v1", + kind: "Deployment", + metadata: ObjectMeta { + name: "amber-netpol-client".to_string(), + namespace: Some(namespace.to_string()), + labels: client_labels.clone(), + ..Default::default() + }, + spec: DeploymentSpec { + replicas: 1, + selector: LabelSelector { + match_labels: { + let mut m = BTreeMap::new(); + m.insert( + "amber.io/netpol-check-role".to_string(), + "client".to_string(), + ); + m + }, + }, + template: PodTemplateSpec { + metadata: ObjectMeta { + labels: client_labels.clone(), + ..Default::default() + }, + spec: PodSpec { + init_containers: Vec::new(), + containers: vec![Container { + name: "client".to_string(), + image: "busybox:1.36".to_string(), + command: vec![ + "/bin/sh".to_string(), + "-c".to_string(), + client_script.to_string(), + ], + ports: vec![ContainerPort { + name: "ready".to_string(), + container_port: 8080, + protocol: "TCP", + }], + ..Default::default() + }], + volumes: Vec::new(), + restart_policy: None, + }, + }, + }, + }; + + // Client service - allows scenario pods to check if enforcement check passed + let client_service = Service::new( + "amber-netpol-client", + namespace, + client_labels.clone(), + { + let mut m = BTreeMap::new(); + m.insert( + "amber.io/netpol-check-role".to_string(), + "client".to_string(), + ); + m + }, + vec![ServicePort { + name: "http".to_string(), + port: 8080, + target_port: 8080, + protocol: "TCP", + }], + ); + + vec![ + ("server-deployment.yaml", to_yaml(&server_deployment)), + ("allowed-service.yaml", to_yaml(&allowed_service)), + ("blocked-service.yaml", to_yaml(&blocked_service)), + ("deny-policy.yaml", to_yaml(&deny_policy)), + ("client-deployment.yaml", to_yaml(&client_deployment)), + ("client-service.yaml", to_yaml(&client_service)), + ] +} + +fn binding_from_component(from: &BindingFrom) -> &ProvideRef { + match from { + BindingFrom::Component(provide) => provide, + BindingFrom::Framework(name) => { + unreachable!("framework binding framework.{name} should be rejected earlier") + } + } +} + +fn to_yaml(value: &T) -> Result { + serde_yaml::to_string(value) + .map_err(|e| ReporterError::new(format!("failed to serialize YAML: {e}"))) +} + +#[cfg(test)] +mod tests; diff --git a/compiler/src/reporter/kubernetes/resources.rs b/compiler/src/reporter/kubernetes/resources.rs new file mode 100644 index 0000000..653059a --- /dev/null +++ b/compiler/src/reporter/kubernetes/resources.rs @@ -0,0 +1,645 @@ +use std::collections::BTreeMap; + +use serde::Serialize; + +/// Kubernetes object metadata. +#[derive(Clone, Debug, Default, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ObjectMeta { + pub name: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub namespace: Option, + #[serde(skip_serializing_if = "BTreeMap::is_empty")] + pub labels: BTreeMap, + #[serde(skip_serializing_if = "BTreeMap::is_empty")] + pub annotations: BTreeMap, +} + +// ---- Namespace ---- + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Namespace { + pub api_version: &'static str, + pub kind: &'static str, + pub metadata: ObjectMeta, +} + +impl Namespace { + pub fn new(name: impl Into, labels: BTreeMap) -> Self { + Self { + api_version: "v1", + kind: "Namespace", + metadata: ObjectMeta { + name: name.into(), + labels, + ..Default::default() + }, + } + } +} + +// ---- ConfigMap ---- + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ConfigMap { + pub api_version: &'static str, + pub kind: &'static str, + pub metadata: ObjectMeta, + #[serde(skip_serializing_if = "BTreeMap::is_empty")] + pub data: BTreeMap, +} + +impl ConfigMap { + pub fn new( + name: impl Into, + namespace: impl Into, + labels: BTreeMap, + data: BTreeMap, + ) -> Self { + Self { + api_version: "v1", + kind: "ConfigMap", + metadata: ObjectMeta { + name: name.into(), + namespace: Some(namespace.into()), + labels, + ..Default::default() + }, + data, + } + } +} + +// ---- Secret ---- + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Secret { + pub api_version: &'static str, + pub kind: &'static str, + pub metadata: ObjectMeta, + #[serde(rename = "type")] + pub secret_type: &'static str, + #[serde(skip_serializing_if = "BTreeMap::is_empty")] + pub string_data: BTreeMap, +} + +impl Secret { + pub fn new( + name: impl Into, + namespace: impl Into, + labels: BTreeMap, + string_data: BTreeMap, + ) -> Self { + Self { + api_version: "v1", + kind: "Secret", + metadata: ObjectMeta { + name: name.into(), + namespace: Some(namespace.into()), + labels, + ..Default::default() + }, + secret_type: "Opaque", + string_data, + } + } +} + +// ---- Service ---- + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Service { + pub api_version: &'static str, + pub kind: &'static str, + pub metadata: ObjectMeta, + pub spec: ServiceSpec, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ServiceSpec { + pub selector: BTreeMap, + pub ports: Vec, + #[serde(rename = "type")] + pub service_type: &'static str, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ServicePort { + pub name: String, + pub port: u16, + pub target_port: u16, + pub protocol: &'static str, +} + +impl Service { + pub fn new( + name: impl Into, + namespace: impl Into, + labels: BTreeMap, + selector: BTreeMap, + ports: Vec, + ) -> Self { + Self { + api_version: "v1", + kind: "Service", + metadata: ObjectMeta { + name: name.into(), + namespace: Some(namespace.into()), + labels, + ..Default::default() + }, + spec: ServiceSpec { + selector, + ports, + service_type: "ClusterIP", + }, + } + } +} + +// ---- Deployment ---- + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Deployment { + pub api_version: &'static str, + pub kind: &'static str, + pub metadata: ObjectMeta, + pub spec: DeploymentSpec, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct DeploymentSpec { + pub replicas: u32, + pub selector: LabelSelector, + pub template: PodTemplateSpec, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct LabelSelector { + pub match_labels: BTreeMap, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct PodTemplateSpec { + pub metadata: ObjectMeta, + pub spec: PodSpec, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct PodSpec { + #[serde(skip_serializing_if = "Vec::is_empty")] + pub init_containers: Vec, + pub containers: Vec, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub volumes: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub restart_policy: Option<&'static str>, +} + +// ---- Probes ---- + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Probe { + #[serde(skip_serializing_if = "Option::is_none")] + pub http_get: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub tcp_socket: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub exec: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub initial_delay_seconds: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub period_seconds: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub timeout_seconds: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub failure_threshold: Option, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct HttpGetAction { + pub path: String, + pub port: u16, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct TcpSocketAction { + pub port: u16, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ExecAction { + pub command: Vec, +} + +#[derive(Clone, Debug, Default, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Container { + pub name: String, + pub image: String, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub command: Vec, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub args: Vec, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub env: Vec, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub env_from: Vec, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub ports: Vec, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub volume_mounts: Vec, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct EnvVar { + pub name: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub value_from: Option, +} + +impl EnvVar { + pub fn literal(name: impl Into, value: impl Into) -> Self { + Self { + name: name.into(), + value: Some(value.into()), + value_from: None, + } + } + + pub fn from_config_map( + name: impl Into, + config_map_name: impl Into, + key: impl Into, + ) -> Self { + Self { + name: name.into(), + value: None, + value_from: Some(EnvVarSource { + config_map_key_ref: Some(KeyRef { + name: config_map_name.into(), + key: key.into(), + optional: Some(true), + }), + secret_key_ref: None, + }), + } + } + + pub fn from_secret( + name: impl Into, + secret_name: impl Into, + key: impl Into, + ) -> Self { + Self { + name: name.into(), + value: None, + value_from: Some(EnvVarSource { + config_map_key_ref: None, + secret_key_ref: Some(KeyRef { + name: secret_name.into(), + key: key.into(), + optional: Some(true), + }), + }), + } + } +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct EnvVarSource { + #[serde(skip_serializing_if = "Option::is_none")] + pub config_map_key_ref: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub secret_key_ref: Option, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct KeyRef { + pub name: String, + pub key: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub optional: Option, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct EnvFromSource { + #[serde(skip_serializing_if = "Option::is_none")] + pub config_map_ref: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub secret_ref: Option, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct LocalObjectReference { + pub name: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub optional: Option, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ContainerPort { + pub name: String, + pub container_port: u16, + pub protocol: &'static str, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct VolumeMount { + pub name: String, + pub mount_path: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub read_only: Option, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Volume { + pub name: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub config_map: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub secret: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub empty_dir: Option, +} + +impl Volume { + pub fn config_map(name: impl Into, config_map_name: impl Into) -> Self { + Self { + name: name.into(), + config_map: Some(ConfigMapVolumeSource { + name: config_map_name.into(), + }), + secret: None, + empty_dir: None, + } + } + + pub fn secret(name: impl Into, secret_name: impl Into) -> Self { + Self { + name: name.into(), + config_map: None, + secret: Some(SecretVolumeSource { + secret_name: secret_name.into(), + }), + empty_dir: None, + } + } + + pub fn empty_dir(name: impl Into) -> Self { + Self { + name: name.into(), + config_map: None, + secret: None, + empty_dir: Some(EmptyDirVolumeSource {}), + } + } +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ConfigMapVolumeSource { + pub name: String, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SecretVolumeSource { + pub secret_name: String, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct EmptyDirVolumeSource {} + +// ---- NetworkPolicy ---- + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct NetworkPolicy { + pub api_version: &'static str, + pub kind: &'static str, + pub metadata: ObjectMeta, + pub spec: NetworkPolicySpec, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct NetworkPolicySpec { + pub pod_selector: LabelSelector, + pub policy_types: Vec<&'static str>, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub ingress: Vec, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub egress: Vec, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct NetworkPolicyIngressRule { + #[serde(skip_serializing_if = "Vec::is_empty")] + pub from: Vec, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub ports: Vec, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct NetworkPolicyEgressRule { + #[serde(skip_serializing_if = "Vec::is_empty")] + pub to: Vec, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub ports: Vec, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct NetworkPolicyPeer { + #[serde(skip_serializing_if = "Option::is_none")] + pub pod_selector: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub namespace_selector: Option, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct NetworkPolicyPort { + pub protocol: &'static str, + pub port: u16, +} + +impl NetworkPolicy { + pub fn new( + name: impl Into, + namespace: impl Into, + labels: BTreeMap, + pod_selector: BTreeMap, + ) -> Self { + Self { + api_version: "networking.k8s.io/v1", + kind: "NetworkPolicy", + metadata: ObjectMeta { + name: name.into(), + namespace: Some(namespace.into()), + labels, + ..Default::default() + }, + spec: NetworkPolicySpec { + pod_selector: LabelSelector { + match_labels: pod_selector, + }, + policy_types: vec!["Ingress"], + ingress: Vec::new(), + egress: Vec::new(), + }, + } + } + + pub fn add_ingress_rule(&mut self, rule: NetworkPolicyIngressRule) { + self.spec.ingress.push(rule); + } +} + +// ---- Job (for NetworkPolicy enforcement check) ---- + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Job { + pub api_version: &'static str, + pub kind: &'static str, + pub metadata: ObjectMeta, + pub spec: JobSpec, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct JobSpec { + #[serde(skip_serializing_if = "Option::is_none")] + pub backoff_limit: Option, + pub template: PodTemplateSpec, +} + +impl Job { + pub fn new( + name: impl Into, + namespace: impl Into, + labels: BTreeMap, + template: PodTemplateSpec, + ) -> Self { + Self { + api_version: "batch/v1", + kind: "Job", + metadata: ObjectMeta { + name: name.into(), + namespace: Some(namespace.into()), + labels, + ..Default::default() + }, + spec: JobSpec { + backoff_limit: Some(0), + template, + }, + } + } +} + +// ---- Kustomization ---- + +/// Kustomization configuration for runtime config generation. +/// Used to generate ConfigMaps/Secrets from .env files at deploy time. +#[derive(Clone, Debug, Default, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Kustomization { + pub api_version: String, + pub kind: String, + /// Resources to include (relative paths to YAML files). + #[serde(skip_serializing_if = "Vec::is_empty")] + pub resources: Vec, + /// ConfigMap generators. + #[serde(skip_serializing_if = "Vec::is_empty")] + pub config_map_generator: Vec, + /// Secret generators. + #[serde(skip_serializing_if = "Vec::is_empty")] + pub secret_generator: Vec, + /// Namespace to apply to all resources. + #[serde(skip_serializing_if = "Option::is_none")] + pub namespace: Option, +} + +impl Kustomization { + pub fn new() -> Self { + Self { + api_version: "kustomize.config.k8s.io/v1beta1".to_string(), + kind: "Kustomization".to_string(), + ..Default::default() + } + } +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ConfigMapGenerator { + pub name: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Generate from .env files. + #[serde(skip_serializing_if = "Vec::is_empty")] + #[serde(rename = "envs")] + pub env_files: Vec, + /// Literal key=value pairs. + #[serde(skip_serializing_if = "Vec::is_empty")] + pub literals: Vec, + /// Disable hash suffix on generated name. + #[serde(skip_serializing_if = "Option::is_none")] + pub options: Option, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SecretGenerator { + pub name: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Generate from .env files. + #[serde(skip_serializing_if = "Vec::is_empty")] + #[serde(rename = "envs")] + pub env_files: Vec, + /// Literal key=value pairs. + #[serde(skip_serializing_if = "Vec::is_empty")] + pub literals: Vec, + /// Disable hash suffix on generated name. + #[serde(skip_serializing_if = "Option::is_none")] + pub options: Option, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct GeneratorOptions { + #[serde(skip_serializing_if = "Option::is_none")] + pub disable_name_suffix_hash: Option, +} diff --git a/compiler/src/reporter/kubernetes/tests.rs b/compiler/src/reporter/kubernetes/tests.rs new file mode 100644 index 0000000..fd3df51 --- /dev/null +++ b/compiler/src/reporter/kubernetes/tests.rs @@ -0,0 +1,159 @@ +use super::*; + +#[test] +fn test_sanitize_dns_name() { + assert_eq!(sanitize_dns_name("hello"), "hello"); + assert_eq!(sanitize_dns_name("Hello-World"), "hello-world"); + assert_eq!(sanitize_dns_name("hello_world"), "hello-world"); + assert_eq!(sanitize_dns_name("hello--world"), "hello-world"); + assert_eq!(sanitize_dns_name("-hello-"), "hello"); + assert_eq!(sanitize_dns_name("UPPERCASE"), "uppercase"); + assert_eq!(sanitize_dns_name("with spaces"), "with-spaces"); + assert_eq!(sanitize_dns_name(""), "component"); + assert_eq!(sanitize_dns_name("---"), "component"); +} + +#[test] +fn test_truncate_dns_name() { + assert_eq!(truncate_dns_name("short", 63), "short"); + let long = "a".repeat(100); + assert_eq!(truncate_dns_name(&long, 63).len(), 63); + assert_eq!(truncate_dns_name("hello-", 5), "hello"); +} + +#[test] +fn test_sanitize_label_value() { + assert_eq!(sanitize_label_value("hello"), "hello"); + assert_eq!(sanitize_label_value("hello-world"), "hello-world"); + assert_eq!(sanitize_label_value("hello_world"), "hello_world"); + assert_eq!(sanitize_label_value("hello.world"), "hello.world"); + assert_eq!(sanitize_label_value("hello@world"), "helloworld"); +} + +#[test] +fn test_sanitize_port_name() { + assert_eq!(sanitize_port_name("http"), "http"); + assert_eq!(sanitize_port_name("HTTP"), "http"); + // Truncated to 15 chars, trailing hyphen stripped + assert_eq!(sanitize_port_name("very-long-port-name"), "very-long-port"); +} + +#[test] +fn test_service_name() { + assert_eq!(service_name(ComponentId(0), "server"), "c0-server"); + assert_eq!(service_name(ComponentId(1), "My Service"), "c1-my-service"); + assert_eq!(service_name(ComponentId(42), "test"), "c42-test"); +} + +#[test] +fn test_schema_leaf_secret_field() { + let schema = serde_json::json!({ + "type": "object", + "properties": { + "api_key": { + "type": "string", + "secret": true + }, + "log_level": { + "type": "string" + }, + "database": { + "type": "object", + "properties": { + "password": { + "type": "string", + "secret": true + }, + "host": { + "type": "string" + } + } + } + } + }); + + let leaves = rc::collect_leaf_paths(&schema).expect("collect leaf paths"); + let secrets: std::collections::HashSet<_> = leaves + .iter() + .filter(|l| l.secret) + .map(|l| l.path.as_str()) + .collect(); + let non_secrets: std::collections::HashSet<_> = leaves + .iter() + .filter(|l| !l.secret) + .map(|l| l.path.as_str()) + .collect(); + + assert!(secrets.contains("api_key")); + assert!(secrets.contains("database.password")); + assert!(non_secrets.contains("log_level")); + assert!(non_secrets.contains("database.host")); +} + +#[test] +fn test_collect_config_refs() { + use std::collections::BTreeMap; + + use amber_template::TemplatePart; + + // Test with Root template - should return None (all config needed) + let root_template = rc::RootConfigTemplate::Root; + assert_eq!(collect_config_refs(&root_template), None); + + // Test with Node template containing config refs + let mut map = BTreeMap::new(); + map.insert( + "token".to_string(), + rc::ConfigNode::ConfigRef("api.token".to_string()), + ); + map.insert( + "url".to_string(), + rc::ConfigNode::StringTemplate(vec![ + TemplatePart::lit("http://"), + TemplatePart::config("api.host".to_string()), + TemplatePart::lit(":8080"), + ]), + ); + map.insert("port".to_string(), rc::ConfigNode::Number(8080.into())); + + let node_template = rc::RootConfigTemplate::Node(rc::ConfigNode::Object(map)); + let paths = collect_config_refs(&node_template).expect("should have paths"); + + assert_eq!(paths.len(), 2); + assert!(paths.contains("api.token")); + assert!(paths.contains("api.host")); + + // Test with nested structure + let mut inner_map = BTreeMap::new(); + inner_map.insert( + "secret".to_string(), + rc::ConfigNode::ConfigRef("db.password".to_string()), + ); + + let mut outer_map = BTreeMap::new(); + outer_map.insert("database".to_string(), rc::ConfigNode::Object(inner_map)); + outer_map.insert( + "static".to_string(), + rc::ConfigNode::String("value".to_string()), + ); + + let nested_template = rc::RootConfigTemplate::Node(rc::ConfigNode::Object(outer_map)); + let paths = collect_config_refs(&nested_template).expect("should have paths"); + + assert_eq!(paths.len(), 1); + assert!(paths.contains("db.password")); + + // Test with array containing config refs + let array_node = rc::ConfigNode::Array(vec![ + rc::ConfigNode::ConfigRef("item1".to_string()), + rc::ConfigNode::String("literal".to_string()), + rc::ConfigNode::ConfigRef("item2".to_string()), + ]); + + let array_template = rc::RootConfigTemplate::Node(array_node); + let paths = collect_config_refs(&array_template).expect("should have paths"); + + assert_eq!(paths.len(), 2); + assert!(paths.contains("item1")); + assert!(paths.contains("item2")); +} diff --git a/compiler/src/reporter/mod.rs b/compiler/src/reporter/mod.rs index 2a4b35d..4c854d1 100644 --- a/compiler/src/reporter/mod.rs +++ b/compiler/src/reporter/mod.rs @@ -6,6 +6,7 @@ use crate::CompileOutput; pub mod docker_compose; pub mod dot; +pub mod kubernetes; pub mod scenario_ir; pub use docker_compose::DockerComposeReporter; diff --git a/config/src/node.rs b/config/src/node.rs index 94a03a8..c7e3ec7 100644 --- a/config/src/node.rs +++ b/config/src/node.rs @@ -213,6 +213,13 @@ pub enum RootConfigTemplate { } impl RootConfigTemplate { + pub fn node(&self) -> Option<&ConfigNode> { + match self { + RootConfigTemplate::Root => None, + RootConfigTemplate::Node(node) => Some(node), + } + } + pub fn to_template_payload(&self) -> ConfigTemplatePayload { match self { RootConfigTemplate::Root => ConfigTemplatePayload::Root, diff --git a/docker/amber-compose-helper/Dockerfile b/docker/amber-helper/Dockerfile similarity index 68% rename from docker/amber-compose-helper/Dockerfile rename to docker/amber-helper/Dockerfile index 25bb0a5..a0f915c 100644 --- a/docker/amber-compose-helper/Dockerfile +++ b/docker/amber-helper/Dockerfile @@ -28,7 +28,7 @@ COPY Cargo.toml Cargo.lock ./ COPY cli/Cargo.toml cli/ COPY config/Cargo.toml config/ COPY compiler/Cargo.toml compiler/ -COPY compose-helper/Cargo.toml compose-helper/ +COPY helper/Cargo.toml helper/ COPY json5/Cargo.toml json5/ COPY manifest/Cargo.toml manifest/ COPY resolver/Cargo.toml resolver/ @@ -36,15 +36,15 @@ COPY scenario/Cargo.toml scenario/ COPY template/Cargo.toml template/ COPY node/Cargo.toml node/ -RUN mkdir -p cli/src config/src compiler/src compose-helper/src json5/src manifest/src resolver/src scenario/src template/src node/src && \ - touch cli/src/main.rs config/src/lib.rs compiler/src/lib.rs compose-helper/src/main.rs json5/src/lib.rs manifest/src/lib.rs resolver/src/lib.rs scenario/src/lib.rs template/src/lib.rs node/src/main.rs +RUN mkdir -p cli/src config/src compiler/src helper/src json5/src manifest/src resolver/src scenario/src template/src node/src && \ + touch cli/src/main.rs config/src/lib.rs compiler/src/lib.rs helper/src/main.rs json5/src/lib.rs manifest/src/lib.rs resolver/src/lib.rs scenario/src/lib.rs template/src/lib.rs node/src/main.rs RUN cargo fetch --locked -RUN rm -rf cli/src config/src compiler/src compose-helper/src json5/src manifest/src resolver/src scenario/src template/src node/src +RUN rm -rf cli/src config/src compiler/src helper/src json5/src manifest/src resolver/src scenario/src template/src node/src COPY cli ./cli COPY config ./config COPY compiler ./compiler -COPY compose-helper ./compose-helper +COPY helper ./helper COPY json5 ./json5 COPY manifest ./manifest COPY resolver ./resolver @@ -53,8 +53,8 @@ COPY template ./template COPY node ./node RUN target=$(cat /tmp/rust-target) && \ - cargo build -p amber-compose-helper --release --locked --target "${target}" && \ - install -D -m 0755 /app/target/"${target}"/release/amber-compose-helper /out/amber-helper + cargo build -p amber-helper --release --locked --target "${target}" && \ + install -D -m 0755 /app/target/"${target}"/release/amber-helper /out/amber-helper FROM scratch diff --git a/compose-helper/Cargo.toml b/helper/Cargo.toml similarity index 92% rename from compose-helper/Cargo.toml rename to helper/Cargo.toml index 3c564f7..1645960 100644 --- a/compose-helper/Cargo.toml +++ b/helper/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "amber-compose-helper" +name = "amber-helper" version = "0.1.0" edition = "2024" diff --git a/compose-helper/src/lib.rs b/helper/src/lib.rs similarity index 100% rename from compose-helper/src/lib.rs rename to helper/src/lib.rs diff --git a/compose-helper/src/main.rs b/helper/src/main.rs similarity index 97% rename from compose-helper/src/main.rs rename to helper/src/main.rs index 29a3ae8..7c6e653 100644 --- a/compose-helper/src/main.rs +++ b/helper/src/main.rs @@ -4,7 +4,7 @@ use std::{ process::{Command, ExitCode}, }; -use amber_compose_helper::{HelperError, RunPlan, build_run_plan}; +use amber_helper::{HelperError, RunPlan, build_run_plan}; fn main() -> ExitCode { match run_main() { diff --git a/compose-helper/tests/docker_e2e.rs b/helper/tests/docker_e2e.rs similarity index 95% rename from compose-helper/tests/docker_e2e.rs rename to helper/tests/docker_e2e.rs index 510152b..485047e 100644 --- a/compose-helper/tests/docker_e2e.rs +++ b/helper/tests/docker_e2e.rs @@ -28,7 +28,7 @@ fn encode_spec_b64(spec: &TemplateSpec) -> String { fn workspace_root() -> PathBuf { Path::new(env!("CARGO_MANIFEST_DIR")) .parent() - .expect("compose-helper crate should live under the workspace root") + .expect("helper crate should live under the workspace root") .to_path_buf() } @@ -42,7 +42,7 @@ fn docker_target_arch() -> &'static str { fn build_helper_image(tag: &str) { let root = workspace_root(); - let dockerfile = root.join("docker/amber-compose-helper/Dockerfile"); + let dockerfile = root.join("docker/amber-helper/Dockerfile"); let status = Command::new("docker") .arg("build") .arg("--build-arg") @@ -73,7 +73,7 @@ fn run_helper_container(image: &str, out_dir: &Path, envs: &[(String, String)]) #[test] #[ignore = "requires docker; run manually or in CI"] fn helper_image_executes_run_plan_in_scratch() { - let tag = "amber-compose-helper:e2e"; + let tag = "amber-helper:e2e"; build_helper_image(tag); let out_dir = tempdir().expect("temp dir should create"); diff --git a/test-scenarios/kubernetes-basic/client.json5 b/test-scenarios/kubernetes-basic/client.json5 new file mode 100644 index 0000000..78bba04 --- /dev/null +++ b/test-scenarios/kubernetes-basic/client.json5 @@ -0,0 +1,32 @@ +{ + manifest_version: "0.1.0", + program: { + image: "busybox:1.36", + args: [ + "sh", + "-eu", + "-c", + "\ + mkdir content\n\ + cd content\n\ + wget '${slots.server.url}/runtime_secret.txt'\n\ + wget '${slots.server.url}/runtime_config.txt'\n\ + wget '${slots.server.url}/static_secret.txt'\n\ + wget '${slots.server.url}/static_config.txt'\n\ + httpd -f -p 8080\n\ + ", + ], + network: { + endpoints: [{ name: "http", port: 8080 }], + }, + }, + slots: { + server: { kind: "http" }, + }, + provides: { + http: { kind: "http", endpoint: "http" }, + }, + exports: { + http: "http", + }, +} diff --git a/test-scenarios/kubernetes-basic/scenario.json5 b/test-scenarios/kubernetes-basic/scenario.json5 new file mode 100644 index 0000000..a1f14cf --- /dev/null +++ b/test-scenarios/kubernetes-basic/scenario.json5 @@ -0,0 +1,30 @@ +{ + manifest_version: "0.1.0", + config_schema: { + type: "object", + properties: { + server_runtime_secret: { type: "string", secret: true }, + server_runtime_config: { type: "string" }, + }, + required: ["server_runtime_secret", "server_runtime_config"], + }, + components: { + server: { + manifest: "./server.json5", + config: { + runtime_secret: "${config.server_runtime_secret}", + runtime_config: "${config.server_runtime_config}", + static_secret: "hardcode-this-secret", + static_config: "hardcode-this-config", + }, + }, + client: "./client.json5", + }, + bindings: [ + { to: "#client.server", from: "#server.http" }, + ], + exports: { + server_http: "#server.http", + client_http: "#client.http", + }, +} diff --git a/test-scenarios/kubernetes-basic/server.json5 b/test-scenarios/kubernetes-basic/server.json5 new file mode 100644 index 0000000..edcbe2c --- /dev/null +++ b/test-scenarios/kubernetes-basic/server.json5 @@ -0,0 +1,48 @@ +{ + manifest_version: "0.1.0", + config_schema: { + type: "object", + properties: { + runtime_secret: { type: "string", secret: true }, + runtime_config: { type: "string", }, + static_secret: { type: "string", secret: true }, + static_config: { type: "string", }, + }, + required: [ + "runtime_secret", + "runtime_config", + "static_secret", + "static_config", + ], + }, + program: { + image: "busybox:1.36", + args: [ + "sh", + "-eu", + "-c", + "\ + mkdir content\n\ + cd content\n\ + printf '%s\n' \"$RUNTIME_SECRET\" >runtime_secret.txt\n\ + printf '%s\n' '${config.runtime_config}' >runtime_config.txt\n\ + printf '%s\n' \"$STATIC_SECRET\" >static_secret.txt\n\ + printf '%s\n' '${config.static_config}' >static_config.txt\n\ + httpd -f -p 8080\n\ + ", + ], + env: { + RUNTIME_SECRET: "${config.runtime_secret}", + STATIC_SECRET: "${config.static_secret}", + }, + network: { + endpoints: [{ name: "http", port: 8080 }], + }, + }, + provides: { + http: { kind: "http", endpoint: "http" }, + }, + exports: { + http: "http", + }, +}