From b730167c33b3b71d70b5455f81e09b39be7e2a7a Mon Sep 17 00:00:00 2001 From: Rafael Garcia Date: Tue, 10 Feb 2026 15:23:43 -0500 Subject: [PATCH 1/5] refactor: cross-platform foundation for macOS support Split platform-specific code into _linux.go and _darwin.go files across resources, network, devices, ingress, vmm, and vm_metrics packages. Add hypervisor abstraction with registration pattern (RegisterSocketName, RegisterVsockDialerFactory, RegisterClientFactory) to decouple instance management from specific hypervisor implementations. Add "vz" to the OpenAPI hypervisor type enum, erofs disk format support, and insecure registry option for builds. No behavioral changes on Linux. macOS can now compile but has no VM functionality yet. Co-Authored-By: Claude Opus 4.6 --- .gitignore | 7 + Makefile | 62 +++- cmd/api/config/config.go | 2 + cmd/api/hypervisor_check_darwin.go | 31 ++ cmd/api/hypervisor_check_linux.go | 29 ++ cmd/api/main.go | 23 +- lib/builds/builder_agent/main.go | 95 +++++- lib/builds/manager.go | 162 ++++++----- lib/builds/manager_test.go | 14 +- lib/devices/discovery_darwin.go | 52 ++++ .../{discovery.go => discovery_linux.go} | 2 + lib/devices/manager.go | 6 + lib/devices/mdev_darwin.go | 57 ++++ lib/devices/{mdev.go => mdev_linux.go} | 9 +- lib/devices/types.go | 7 + lib/devices/vfio_darwin.go | 74 +++++ lib/devices/{vfio.go => vfio_linux.go} | 2 + lib/hypervisor/cloudhypervisor/process.go | 3 + lib/hypervisor/hypervisor.go | 33 +++ lib/hypervisor/qemu/process.go | 3 + lib/hypervisor/qemu/vsock.go | 2 + lib/images/disk.go | 34 ++- lib/images/manager.go | 53 ++-- lib/images/oci.go | 28 +- lib/images/oci_public.go | 11 + lib/ingress/binaries_amd64.go | 2 +- lib/ingress/binaries_arm64.go | 2 +- lib/ingress/binaries_darwin.go | 33 +++ .../{binaries.go => binaries_linux.go} | 2 + lib/instances/exec_test.go | 2 +- lib/instances/hypervisor_darwin.go | 16 ++ lib/instances/hypervisor_linux.go | 14 + lib/instances/manager.go | 45 ++- lib/instances/manager_test.go | 4 +- lib/instances/network_test.go | 2 +- lib/instances/qemu_test.go | 4 +- lib/instances/volumes_test.go | 4 +- lib/instances/vsock.go | 17 ++ lib/middleware/oapi_auth.go | 2 - lib/network/bridge_darwin.go | 68 +++++ lib/network/{bridge.go => bridge_linux.go} | 18 +- lib/network/ip.go | 22 ++ lib/oapi/oapi.go | 270 +++++++++--------- lib/resources/cpu.go | 79 +---- lib/resources/cpu_darwin.go | 13 + lib/resources/cpu_linux.go | 83 ++++++ lib/resources/disk.go | 43 +-- lib/resources/disk_darwin.go | 49 ++++ lib/resources/disk_linux.go | 42 +++ lib/resources/memory.go | 38 +-- lib/resources/memory_darwin.go | 17 ++ lib/resources/memory_linux.go | 42 +++ lib/resources/network_darwin.go | 49 ++++ .../{network.go => network_linux.go} | 50 +--- lib/resources/resource_test.go | 4 + lib/resources/util.go | 56 ++++ lib/system/init/logger.go | 17 +- lib/system/init/mount.go | 20 +- lib/system/initrd.go | 19 +- lib/vm_metrics/collector_darwin.go | 20 ++ .../{collector.go => collector_linux.go} | 2 + lib/vm_metrics/collector_test.go | 2 + lib/vm_metrics/manager_test.go | 2 + lib/vm_metrics/metrics_test.go | 2 + lib/vmm/binaries_darwin.go | 34 +++ lib/vmm/{binaries.go => binaries_linux.go} | 2 + lib/vmm/client_test.go | 2 + openapi.yaml | 4 +- 68 files changed, 1460 insertions(+), 559 deletions(-) create mode 100644 cmd/api/hypervisor_check_darwin.go create mode 100644 cmd/api/hypervisor_check_linux.go create mode 100644 lib/devices/discovery_darwin.go rename lib/devices/{discovery.go => discovery_linux.go} (99%) create mode 100644 lib/devices/mdev_darwin.go rename lib/devices/{mdev.go => mdev_linux.go} (98%) create mode 100644 lib/devices/vfio_darwin.go rename lib/devices/{vfio.go => vfio_linux.go} (99%) create mode 100644 lib/ingress/binaries_darwin.go rename lib/ingress/{binaries.go => binaries_linux.go} (99%) create mode 100644 lib/instances/hypervisor_darwin.go create mode 100644 lib/instances/hypervisor_linux.go create mode 100644 lib/instances/vsock.go create mode 100644 lib/network/bridge_darwin.go rename lib/network/{bridge.go => bridge_linux.go} (98%) create mode 100644 lib/network/ip.go create mode 100644 lib/resources/cpu_darwin.go create mode 100644 lib/resources/cpu_linux.go create mode 100644 lib/resources/disk_darwin.go create mode 100644 lib/resources/disk_linux.go create mode 100644 lib/resources/memory_darwin.go create mode 100644 lib/resources/memory_linux.go create mode 100644 lib/resources/network_darwin.go rename lib/resources/{network.go => network_linux.go} (73%) create mode 100644 lib/resources/util.go create mode 100644 lib/vm_metrics/collector_darwin.go rename lib/vm_metrics/{collector.go => collector_linux.go} (99%) create mode 100644 lib/vmm/binaries_darwin.go rename lib/vmm/{binaries.go => binaries_linux.go} (98%) diff --git a/.gitignore b/.gitignore index b2b815d4..8de172d4 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,7 @@ cloud-hypervisor/** lib/system/exec_agent/exec-agent lib/system/guest_agent/guest-agent lib/system/init/init +lib/hypervisor/vz/vz-shim/vz-shim # Envoy binaries lib/ingress/binaries/** @@ -29,3 +30,9 @@ dist/** # UTM VM - downloaded ISO files scripts/utm/images/ + +# IDE and editor +.cursor/ + +# Build artifacts +api diff --git a/Makefile b/Makefile index 88eab9c9..943393d1 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ SHELL := /bin/bash -.PHONY: oapi-generate generate-vmm-client generate-wire generate-all dev build test install-tools gen-jwt download-ch-binaries download-ch-spec ensure-ch-binaries build-caddy-binaries build-caddy ensure-caddy-binaries release-prep clean build-embedded +.PHONY: oapi-generate generate-vmm-client generate-wire generate-all dev build build-linux build-darwin test test-linux test-darwin install-tools gen-jwt download-ch-binaries download-ch-spec ensure-ch-binaries build-caddy-binaries build-caddy ensure-caddy-binaries release-prep clean build-embedded # Directory where local binaries will be installed BIN_DIR ?= $(CURDIR)/bin @@ -174,33 +174,57 @@ ensure-caddy-binaries: fi # Build guest-agent (guest binary) into its own directory for embedding +# Cross-compile for Linux since it runs inside the VM lib/system/guest_agent/guest-agent: lib/system/guest_agent/*.go - @echo "Building guest-agent..." - cd lib/system/guest_agent && CGO_ENABLED=0 go build -ldflags="-s -w" -o guest-agent . + @echo "Building guest-agent for Linux..." + cd lib/system/guest_agent && CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o guest-agent . # Build init binary (runs as PID 1 in guest VM) for embedding +# Cross-compile for Linux since it runs inside the VM lib/system/init/init: lib/system/init/*.go - @echo "Building init binary..." - cd lib/system/init && CGO_ENABLED=0 go build -ldflags="-s -w" -o init . + @echo "Building init binary for Linux..." + cd lib/system/init && CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o init . build-embedded: lib/system/guest_agent/guest-agent lib/system/init/init # Build the binary -build: ensure-ch-binaries ensure-caddy-binaries build-embedded | $(BIN_DIR) +build: +ifeq ($(shell uname -s),Darwin) + $(MAKE) build-darwin +else + $(MAKE) build-linux +endif + +build-linux: ensure-ch-binaries ensure-caddy-binaries build-embedded | $(BIN_DIR) + go build -tags containers_image_openpgp -o $(BIN_DIR)/hypeman ./cmd/api + +# Build for macOS (no CH/Caddy needed; guest binaries cross-compiled for Linux) +build-darwin: build-embedded | $(BIN_DIR) go build -tags containers_image_openpgp -o $(BIN_DIR)/hypeman ./cmd/api # Build all binaries build-all: build # Run in development mode with hot reload -dev: ensure-ch-binaries ensure-caddy-binaries build-embedded $(AIR) +dev: dev-linux + +# Linux development mode with hot reload +dev-linux: ensure-ch-binaries ensure-caddy-binaries build-embedded $(AIR) @rm -f ./tmp/main $(AIR) -c .air.toml -# Run tests (as root for network capabilities, enables caching and parallelism) +# Run tests # Usage: make test - runs all tests # make test TEST=TestCreateInstanceWithNetwork - runs specific test -test: ensure-ch-binaries ensure-caddy-binaries build-embedded +test: +ifeq ($(shell uname -s),Darwin) + $(MAKE) test-darwin +else + $(MAKE) test-linux +endif + +# Linux tests (as root for network capabilities) +test-linux: ensure-ch-binaries ensure-caddy-binaries build-embedded @VERBOSE_FLAG=""; \ if [ -n "$(VERBOSE)" ]; then VERBOSE_FLAG="-v"; fi; \ if [ -n "$(TEST)" ]; then \ @@ -210,6 +234,24 @@ test: ensure-ch-binaries ensure-caddy-binaries build-embedded sudo env "PATH=$$PATH" "DOCKER_CONFIG=$${DOCKER_CONFIG:-$$HOME/.docker}" go test -tags containers_image_openpgp $$VERBOSE_FLAG -timeout=180s ./...; \ fi +# macOS tests (no sudo needed, adds e2fsprogs to PATH) +# Uses 'go list' to discover compilable packages, then filters out packages +# whose test files reference Linux-only symbols (network, devices, system/init). +DARWIN_EXCLUDE_PKGS := /lib/network|/lib/devices|/lib/system/init +test-darwin: build-embedded + @VERBOSE_FLAG=""; \ + if [ -n "$(VERBOSE)" ]; then VERBOSE_FLAG="-v"; fi; \ + PKGS=$$(PATH="/opt/homebrew/opt/e2fsprogs/sbin:$(PATH)" \ + go list -tags containers_image_openpgp ./... 2>/dev/null | grep -Ev '$(DARWIN_EXCLUDE_PKGS)'); \ + if [ -n "$(TEST)" ]; then \ + echo "Running specific test: $(TEST)"; \ + PATH="/opt/homebrew/opt/e2fsprogs/sbin:$(PATH)" \ + go test -tags containers_image_openpgp -run=$(TEST) $$VERBOSE_FLAG -timeout=180s $$PKGS; \ + else \ + PATH="/opt/homebrew/opt/e2fsprogs/sbin:$(PATH)" \ + go test -tags containers_image_openpgp $$VERBOSE_FLAG -timeout=180s $$PKGS; \ + fi + # Generate JWT token for testing # Usage: make gen-jwt [USER_ID=test-user] gen-jwt: $(GODOTENV) @@ -233,8 +275,10 @@ clean: rm -rf lib/ingress/binaries/ rm -f lib/system/guest_agent/guest-agent rm -f lib/system/init/init + rm -f lib/hypervisor/vz/vz-shim/vz-shim # Prepare for release build (called by GoReleaser) # Downloads all embedded binaries and builds embedded components release-prep: download-ch-binaries build-caddy-binaries build-embedded go mod tidy + diff --git a/cmd/api/config/config.go b/cmd/api/config/config.go index 79dcd0e5..12b22318 100644 --- a/cmd/api/config/config.go +++ b/cmd/api/config/config.go @@ -115,6 +115,7 @@ type Config struct { RegistryCACertFile string // Path to CA certificate file for registry TLS verification BuildTimeout int // Default build timeout in seconds BuildSecretsDir string // Directory containing build secrets (optional) + DockerSocket string // Path to Docker socket (for building builder image) // Hypervisor configuration DefaultHypervisor string // Default hypervisor type: "cloud-hypervisor" or "qemu" @@ -213,6 +214,7 @@ func Load() *Config { RegistryCACertFile: getEnv("REGISTRY_CA_CERT_FILE", ""), // Path to CA cert for registry TLS BuildTimeout: getEnvInt("BUILD_TIMEOUT", 600), BuildSecretsDir: getEnv("BUILD_SECRETS_DIR", ""), // Optional: path to directory with build secrets + DockerSocket: getEnv("DOCKER_SOCKET", "/var/run/docker.sock"), // Hypervisor configuration DefaultHypervisor: getEnv("DEFAULT_HYPERVISOR", "cloud-hypervisor"), diff --git a/cmd/api/hypervisor_check_darwin.go b/cmd/api/hypervisor_check_darwin.go new file mode 100644 index 00000000..51950cc2 --- /dev/null +++ b/cmd/api/hypervisor_check_darwin.go @@ -0,0 +1,31 @@ +//go:build darwin + +package main + +import ( + "fmt" + "runtime" + + "github.com/Code-Hex/vz/v3" +) + +// checkHypervisorAccess verifies Virtualization.framework is available on macOS +func checkHypervisorAccess() error { + if runtime.GOARCH != "arm64" { + return fmt.Errorf("Virtualization.framework on macOS requires Apple Silicon (arm64), got %s", runtime.GOARCH) + } + + // Validate virtualization is usable by attempting to get max CPU count + // This will fail if entitlements are missing or virtualization is not available + maxCPU := vz.VirtualMachineConfigurationMaximumAllowedCPUCount() + if maxCPU < 1 { + return fmt.Errorf("Virtualization.framework reports 0 max CPUs - check entitlements") + } + + return nil +} + +// hypervisorAccessCheckName returns the name of the hypervisor access check for logging +func hypervisorAccessCheckName() string { + return "Virtualization.framework" +} diff --git a/cmd/api/hypervisor_check_linux.go b/cmd/api/hypervisor_check_linux.go new file mode 100644 index 00000000..042e70ca --- /dev/null +++ b/cmd/api/hypervisor_check_linux.go @@ -0,0 +1,29 @@ +//go:build linux + +package main + +import ( + "fmt" + "os" +) + +// checkHypervisorAccess verifies KVM is available and the user has permission to use it +func checkHypervisorAccess() error { + f, err := os.OpenFile("/dev/kvm", os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("/dev/kvm not found - KVM not enabled or not supported") + } + if os.IsPermission(err) { + return fmt.Errorf("permission denied accessing /dev/kvm - user not in 'kvm' group") + } + return fmt.Errorf("cannot access /dev/kvm: %w", err) + } + f.Close() + return nil +} + +// hypervisorAccessCheckName returns the name of the hypervisor access check for logging +func hypervisorAccessCheckName() string { + return "KVM" +} diff --git a/cmd/api/main.go b/cmd/api/main.go index 7f5e4265..561a9f3c 100644 --- a/cmd/api/main.go +++ b/cmd/api/main.go @@ -130,11 +130,11 @@ func run() error { logger.Warn("JWT_SECRET not configured - API authentication will fail") } - // Verify KVM access (required for VM creation) - if err := checkKVMAccess(); err != nil { - return fmt.Errorf("KVM access check failed: %w\n\nEnsure:\n 1. KVM is enabled (check /dev/kvm exists)\n 2. User is in 'kvm' group: sudo usermod -aG kvm $USER\n 3. Log out and back in, or use: newgrp kvm", err) + // Verify hypervisor access (KVM on Linux, Virtualization.framework on macOS) + if err := checkHypervisorAccess(); err != nil { + return fmt.Errorf("hypervisor access check failed: %w", err) } - logger.Info("KVM access verified") + logger.Info("Hypervisor access verified", "type", hypervisorAccessCheckName()) // Check if QEMU is available (optional - only warn if not present) if _, err := (&qemu.Starter{}).GetBinaryPath(nil, ""); err != nil { @@ -465,18 +465,3 @@ func run() error { return err } -// checkKVMAccess verifies KVM is available and the user has permission to use it -func checkKVMAccess() error { - f, err := os.OpenFile("/dev/kvm", os.O_RDWR, 0) - if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("/dev/kvm not found - KVM not enabled or not supported") - } - if os.IsPermission(err) { - return fmt.Errorf("permission denied accessing /dev/kvm - user not in 'kvm' group") - } - return fmt.Errorf("cannot access /dev/kvm: %w", err) - } - f.Close() - return nil -} diff --git a/lib/builds/builder_agent/main.go b/lib/builds/builder_agent/main.go index 045b3005..f85322fb 100644 --- a/lib/builds/builder_agent/main.go +++ b/lib/builds/builder_agent/main.go @@ -103,8 +103,63 @@ var ( // Encoder lock protects concurrent access to json.Encoder // (the goroutine sending build_result and the main loop handling get_status) encoderLock sync.Mutex + + // Log streaming channel - logs are sent here and forwarded to host via vsock + logChan = make(chan string, 1000) + logChanOnce sync.Once ) +// streamingLogWriter writes log lines to a channel for streaming to the host. +// It also writes to a buffer to include all logs in the final result. +type streamingLogWriter struct { + buffer *bytes.Buffer + mu sync.Mutex + closed bool + closedMu sync.RWMutex +} + +func newStreamingLogWriter() *streamingLogWriter { + return &streamingLogWriter{ + buffer: &bytes.Buffer{}, + } +} + +func (w *streamingLogWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + w.buffer.Write(p) + w.mu.Unlock() + + // Hold RLock through the send to prevent markClosed()+close(logChan) + // from racing between the check and the channel send. + w.closedMu.RLock() + if !w.closed { + line := string(p) + select { + case logChan <- line: + default: + // Channel full, drop the log line for streaming but it's still in buffer + } + } + w.closedMu.RUnlock() + + // Also write to stdout for local debugging + os.Stdout.Write(p) + + return len(p), nil +} + +func (w *streamingLogWriter) markClosed() { + w.closedMu.Lock() + w.closed = true + w.closedMu.Unlock() +} + +func (w *streamingLogWriter) String() string { + w.mu.Lock() + defer w.mu.Unlock() + return w.buffer.String() +} + func main() { log.Println("=== Builder Agent Starting ===") @@ -212,6 +267,19 @@ func handleHostConnection(conn net.Conn) { close(secretsReady) }) + // Start streaming logs to host + go func() { + for logLine := range logChan { + encoderLock.Lock() + err := encoder.Encode(VsockMessage{Type: "log", Log: logLine}) + encoderLock.Unlock() + if err != nil { + // Connection closed, stop streaming + return + } + } + }() + // Wait for build to complete and send result to host go func() { <-buildDone @@ -341,12 +409,17 @@ func handleSecretsRequest(encoder *json.Encoder, decoder *json.Decoder) error { // runBuildProcess runs the actual build and stores the result func runBuildProcess() { start := time.Now() - var logs bytes.Buffer - logWriter := io.MultiWriter(os.Stdout, &logs) + logWriter := newStreamingLogWriter() log.SetOutput(logWriter) defer func() { + // Mark writer as closed first to prevent writes to closed channel + logWriter.markClosed() + // Close log channel so streaming goroutine terminates + logChanOnce.Do(func() { + close(logChan) + }) close(buildDone) }() @@ -356,7 +429,7 @@ func runBuildProcess() { setResult(BuildResult{ Success: false, Error: fmt.Sprintf("load config: %v", err), - Logs: logs.String(), + Logs: logWriter.String(), DurationMS: time.Since(start).Milliseconds(), }) return @@ -373,7 +446,7 @@ func runBuildProcess() { setResult(BuildResult{ Success: false, Error: fmt.Sprintf("setup registry auth: %v", err), - Logs: logs.String(), + Logs: logWriter.String(), DurationMS: time.Since(start).Milliseconds(), }) return @@ -403,7 +476,7 @@ func runBuildProcess() { setResult(BuildResult{ Success: false, Error: "build timeout while waiting for secrets", - Logs: logs.String(), + Logs: logWriter.String(), DurationMS: time.Since(start).Milliseconds(), }) return @@ -418,7 +491,7 @@ func runBuildProcess() { setResult(BuildResult{ Success: false, Error: "Dockerfile required: provide dockerfile parameter or include Dockerfile in source tarball", - Logs: logs.String(), + Logs: logWriter.String(), DurationMS: time.Since(start).Milliseconds(), }) return @@ -428,7 +501,7 @@ func runBuildProcess() { setResult(BuildResult{ Success: false, Error: fmt.Sprintf("write dockerfile: %v", err), - Logs: logs.String(), + Logs: logWriter.String(), DurationMS: time.Since(start).Milliseconds(), }) return @@ -443,8 +516,8 @@ func runBuildProcess() { // Run the build log.Println("=== Starting Build ===") - digest, buildLogs, err := runBuild(ctx, config, logWriter) - logs.WriteString(buildLogs) + digest, _, err := runBuild(ctx, config, logWriter) + // Note: buildLogs is already written to logWriter via io.MultiWriter in runBuild duration := time.Since(start).Milliseconds() @@ -452,7 +525,7 @@ func runBuildProcess() { setResult(BuildResult{ Success: false, Error: err.Error(), - Logs: logs.String(), + Logs: logWriter.String(), Provenance: provenance, DurationMS: duration, }) @@ -466,7 +539,7 @@ func runBuildProcess() { setResult(BuildResult{ Success: true, ImageDigest: digest, - Logs: logs.String(), + Logs: logWriter.String(), Provenance: provenance, DurationMS: duration, }) diff --git a/lib/builds/manager.go b/lib/builds/manager.go index 3a612baa..b4eb6097 100644 --- a/lib/builds/manager.go +++ b/lib/builds/manager.go @@ -12,6 +12,7 @@ import ( "path/filepath" "strings" "sync" + "sync/atomic" "time" "github.com/nrednav/cuid2" @@ -77,6 +78,9 @@ type Config struct { // RegistrySecret is the secret used to sign registry access tokens // This should be the same secret used by the registry middleware RegistrySecret string + + // DockerSocket is the path to the Docker socket for building the builder image + DockerSocket string } // DefaultConfig returns the default build manager configuration @@ -113,6 +117,7 @@ type manager struct { logger *slog.Logger metrics *Metrics createMu sync.Mutex + builderReady atomic.Bool // Status subscription system for SSE streaming statusSubscribers map[string][]chan BuildEvent @@ -164,13 +169,72 @@ func NewManager( // Start starts the build manager's background services func (m *manager) Start(ctx context.Context) error { - // Note: We no longer use a global vsock listener. - // Instead, we connect TO each builder VM's vsock socket directly. - // This follows the Cloud Hypervisor vsock pattern where host initiates connections. + go m.ensureBuilderImage(ctx) m.logger.Info("build manager started") return nil } +// ensureBuilderImage ensures the builder image is available in the registry. +// If BUILDER_IMAGE is unset/empty, it builds from the embedded Dockerfile. +// If BUILDER_IMAGE is set, it checks if the image exists. +// This runs in a background goroutine during startup. +func (m *manager) ensureBuilderImage(ctx context.Context) { + defer m.builderReady.Store(true) + + builderImage := m.config.BuilderImage + if builderImage == "" { + builderImage = "hypeman/builder:latest" + } + + // Check if image already exists in the registry + registryHost := stripRegistryScheme(m.config.RegistryURL) + imageRef := fmt.Sprintf("%s/%s", registryHost, builderImage) + if _, err := m.imageManager.GetImage(ctx, imageRef); err == nil { + m.logger.Info("builder image already available", "image", imageRef) + return + } + + // Try to build the image using Docker + dockerSocket := m.config.DockerSocket + if dockerSocket == "" { + dockerSocket = "/var/run/docker.sock" + } + + // Check if Docker socket exists + if _, err := os.Stat(dockerSocket); err != nil { + m.logger.Warn("Docker socket not found, skipping builder image build", + "socket", dockerSocket, + "error", err) + return + } + + m.logger.Info("building builder image", "image", builderImage) + + // Find the Dockerfile - look relative to the binary or in common locations + dockerfilePath := "lib/builds/images/generic/Dockerfile" + if _, err := os.Stat(dockerfilePath); err != nil { + // Try relative to executable + if execPath, err := os.Executable(); err == nil { + altPath := filepath.Join(filepath.Dir(execPath), "..", dockerfilePath) + if _, err := os.Stat(altPath); err == nil { + dockerfilePath = altPath + } + } + } + + cmd := exec.CommandContext(ctx, "docker", "build", "-t", builderImage, "-f", dockerfilePath, ".") + cmd.Env = append(os.Environ(), fmt.Sprintf("DOCKER_HOST=unix://%s", dockerSocket)) + output, err := cmd.CombinedOutput() + if err != nil { + m.logger.Warn("failed to build builder image", + "error", err, + "output", string(output)) + return + } + + m.logger.Info("builder image built successfully", "image", builderImage) +} + // CreateBuild starts a new build job func (m *manager) CreateBuild(ctx context.Context, req CreateBuildRequest, sourceData []byte) (*Build, error) { m.logger.Info("creating build") @@ -331,12 +395,9 @@ func (m *manager) runBuild(ctx context.Context, id string, req CreateBuildReques return } - // Save build logs (regardless of success/failure) - if result.Logs != "" { - if err := appendLog(m.paths, id, []byte(result.Logs)); err != nil { - m.logger.Warn("failed to save build logs", "id", id, "error", err) - } - } + // Note: Logs are now streamed via vsock "log" messages and written incrementally + // in waitForResult, so we no longer need to save them here. + // The result.Logs field is kept for backward compatibility but is redundant. if !result.Success { m.logger.Error("build failed", "id", id, "error", result.Error, "duration", duration) @@ -387,6 +448,10 @@ func (m *manager) runBuild(ctx context.Context, id string, req CreateBuildReques // executeBuild runs the build in a builder VM func (m *manager) executeBuild(ctx context.Context, id string, req CreateBuildRequest, policy *BuildPolicy) (*BuildResult, error) { + if !m.builderReady.Load() { + return nil, fmt.Errorf("builder image is being prepared, please retry shortly") + } + // Create a volume with the source data sourceVolID := fmt.Sprintf("build-source-%s", id) sourcePath := m.paths.BuildSourceDir(id) + "/source.tar.gz" @@ -480,7 +545,7 @@ func (m *manager) executeBuild(ctx context.Context, id string, req CreateBuildRe // Wait for build result via vsock // The builder agent will send the result when complete - result, err := m.waitForResult(ctx, inst) + result, err := m.waitForResult(ctx, id, inst) if err != nil { return nil, fmt.Errorf("wait for result: %w", err) } @@ -489,7 +554,7 @@ func (m *manager) executeBuild(ctx context.Context, id string, req CreateBuildRe } // waitForResult waits for the build result from the builder agent via vsock -func (m *manager) waitForResult(ctx context.Context, inst *instances.Instance) (*BuildResult, error) { +func (m *manager) waitForResult(ctx context.Context, buildID string, inst *instances.Instance) (*BuildResult, error) { // Wait a bit for the VM to start and the builder agent to listen on vsock time.Sleep(3 * time.Second) @@ -504,9 +569,14 @@ func (m *manager) waitForResult(ctx context.Context, inst *instances.Instance) ( default: } - conn, err = m.dialBuilderVsock(inst.VsockSocket) - if err == nil { - break + dialer, dialerErr := m.instanceManager.GetVsockDialer(ctx, inst.Id) + if dialerErr == nil { + conn, err = dialer.DialVsock(ctx, BuildAgentVsockPort) + if err == nil { + break + } + } else { + err = dialerErr } m.logger.Debug("waiting for builder agent", "attempt", attempt+1, "error", err) @@ -590,6 +660,14 @@ func (m *manager) waitForResult(ctx context.Context, inst *instances.Instance) ( } m.logger.Info("sent secrets to agent", "count", len(secrets), "instance", inst.Id) + case "log": + // Stream log line to build log file immediately + if dr.response.Log != "" { + if err := appendLog(m.paths, buildID, []byte(dr.response.Log)); err != nil { + m.logger.Error("failed to append streamed log", "error", err, "build_id", buildID) + } + } + case "build_result": // Build completed if dr.response.Result == nil { @@ -603,62 +681,6 @@ func (m *manager) waitForResult(ctx context.Context, inst *instances.Instance) ( } } -// dialBuilderVsock connects to a builder VM's vsock socket using Cloud Hypervisor's handshake -func (m *manager) dialBuilderVsock(vsockSocketPath string) (net.Conn, error) { - // Connect to the Cloud Hypervisor vsock Unix socket - conn, err := net.DialTimeout("unix", vsockSocketPath, 5*time.Second) - if err != nil { - return nil, fmt.Errorf("dial vsock socket %s: %w", vsockSocketPath, err) - } - - // Set deadline for handshake - if err := conn.SetDeadline(time.Now().Add(5 * time.Second)); err != nil { - conn.Close() - return nil, fmt.Errorf("set handshake deadline: %w", err) - } - - // Perform Cloud Hypervisor vsock handshake - // Format: "CONNECT \n" -> "OK \n" - handshakeCmd := fmt.Sprintf("CONNECT %d\n", BuildAgentVsockPort) - if _, err := conn.Write([]byte(handshakeCmd)); err != nil { - conn.Close() - return nil, fmt.Errorf("send vsock handshake: %w", err) - } - - // Read handshake response - reader := bufio.NewReader(conn) - response, err := reader.ReadString('\n') - if err != nil { - conn.Close() - return nil, fmt.Errorf("read vsock handshake response: %w", err) - } - - // Clear deadline after successful handshake - if err := conn.SetDeadline(time.Time{}); err != nil { - conn.Close() - return nil, fmt.Errorf("clear deadline: %w", err) - } - - response = strings.TrimSpace(response) - if !strings.HasPrefix(response, "OK ") { - conn.Close() - return nil, fmt.Errorf("vsock handshake failed: %s", response) - } - - return &bufferedConn{Conn: conn, reader: reader}, nil -} - -// bufferedConn wraps a net.Conn with a bufio.Reader to ensure any buffered -// data from the handshake is properly drained before reading from the connection -type bufferedConn struct { - net.Conn - reader *bufio.Reader -} - -func (c *bufferedConn) Read(p []byte) (int, error) { - return c.reader.Read(p) -} - // updateStatus updates the build status func (m *manager) updateStatus(id string, status string, err error) { meta, readErr := readMetadata(m.paths, id) diff --git a/lib/builds/manager_test.go b/lib/builds/manager_test.go index 5a9e82cc..fdbf60a1 100644 --- a/lib/builds/manager_test.go +++ b/lib/builds/manager_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/kernel/hypeman/lib/hypervisor" "github.com/kernel/hypeman/lib/images" "github.com/kernel/hypeman/lib/instances" "github.com/kernel/hypeman/lib/paths" @@ -130,6 +131,10 @@ func (m *mockInstanceManager) SetResourceValidator(v instances.ResourceValidator // no-op for mock } +func (m *mockInstanceManager) GetVsockDialer(ctx context.Context, instanceID string) (hypervisor.VsockDialer, error) { + return nil, nil +} + // mockVolumeManager implements volumes.Manager for testing type mockVolumeManager struct { volumes map[string]*volumes.Volume @@ -350,6 +355,7 @@ func setupTestManagerWithImageMgr(t *testing.T) (*manager, *mockInstanceManager, logger: logger, statusSubscribers: make(map[string][]chan BuildEvent), } + mgr.builderReady.Store(true) return mgr, instanceMgr, volumeMgr, imageMgr, tempDir } @@ -881,7 +887,7 @@ func TestStreamBuildEvents_WithStatusUpdate(t *testing.T) { // Read events until we see the initial log var foundInitialLog bool - timeout := time.After(2 * time.Second) + timeout := time.After(10 * time.Second) eventLoop: for !foundInitialLog { select { @@ -901,7 +907,7 @@ eventLoop: // Should receive "ready" status event and channel should close var readyReceived bool - timeout = time.After(2 * time.Second) + timeout = time.After(10 * time.Second) for !readyReceived { select { case event, ok := <-eventChan: @@ -942,7 +948,7 @@ func TestStreamBuildEvents_ContextCancellation(t *testing.T) { // Read events until we see the log line var foundLogLine bool - timeout := time.After(2 * time.Second) + timeout := time.After(10 * time.Second) eventLoop: for !foundLogLine { select { @@ -961,7 +967,7 @@ eventLoop: cancel() // Channel should close - timeout = time.After(2 * time.Second) + timeout = time.After(10 * time.Second) for { select { case _, ok := <-eventChan: diff --git a/lib/devices/discovery_darwin.go b/lib/devices/discovery_darwin.go new file mode 100644 index 00000000..219ba963 --- /dev/null +++ b/lib/devices/discovery_darwin.go @@ -0,0 +1,52 @@ +//go:build darwin + +package devices + +import ( + "fmt" +) + +// ErrNotSupportedOnMacOS is returned for operations not supported on macOS +var ErrNotSupportedOnMacOS = fmt.Errorf("PCI device passthrough is not supported on macOS") + +// ValidatePCIAddress validates that a string is a valid PCI address format. +// On macOS, this always returns false as PCI passthrough is not supported. +func ValidatePCIAddress(addr string) bool { + return false +} + +// DiscoverAvailableDevices returns an empty list on macOS. +// PCI device passthrough is not supported on macOS. +func DiscoverAvailableDevices() ([]AvailableDevice, error) { + return []AvailableDevice{}, nil +} + +// GetDeviceInfo returns an error on macOS as PCI passthrough is not supported. +func GetDeviceInfo(pciAddress string) (*AvailableDevice, error) { + return nil, ErrNotSupportedOnMacOS +} + +// GetIOMMUGroupDevices returns an error on macOS as IOMMU is not available. +func GetIOMMUGroupDevices(iommuGroup int) ([]string, error) { + return nil, ErrNotSupportedOnMacOS +} + +// DetermineDeviceType returns DeviceTypeGeneric on macOS. +func DetermineDeviceType(device *AvailableDevice) DeviceType { + return DeviceTypeGeneric +} + +// readSysfsFile is not available on macOS. +func readSysfsFile(path string) (string, error) { + return "", ErrNotSupportedOnMacOS +} + +// readIOMMUGroup is not available on macOS. +func readIOMMUGroup(pciAddress string) (int, error) { + return -1, ErrNotSupportedOnMacOS +} + +// readCurrentDriver is not available on macOS. +func readCurrentDriver(pciAddress string) *string { + return nil +} diff --git a/lib/devices/discovery.go b/lib/devices/discovery_linux.go similarity index 99% rename from lib/devices/discovery.go rename to lib/devices/discovery_linux.go index b04213c0..33798292 100644 --- a/lib/devices/discovery.go +++ b/lib/devices/discovery_linux.go @@ -1,3 +1,5 @@ +//go:build linux + package devices import ( diff --git a/lib/devices/manager.go b/lib/devices/manager.go index d93a7572..6c0d84b6 100644 --- a/lib/devices/manager.go +++ b/lib/devices/manager.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "os" + "runtime" "strings" "sync" "time" @@ -552,6 +553,11 @@ func (m *manager) ReconcileDevices(ctx context.Context) error { func (m *manager) validatePrerequisites(ctx context.Context) { log := logger.FromContext(ctx) + // Skip GPU passthrough checks on macOS - not supported + if runtime.GOOS == "darwin" { + return + } + // Check IOMMU availability iommuGroupsDir := "/sys/kernel/iommu_groups" entries, err := os.ReadDir(iommuGroupsDir) diff --git a/lib/devices/mdev_darwin.go b/lib/devices/mdev_darwin.go new file mode 100644 index 00000000..dacca12f --- /dev/null +++ b/lib/devices/mdev_darwin.go @@ -0,0 +1,57 @@ +//go:build darwin + +package devices + +import ( + "context" + "fmt" +) + +// ErrVGPUNotSupportedOnMacOS is returned for vGPU operations on macOS +var ErrVGPUNotSupportedOnMacOS = fmt.Errorf("vGPU (mdev) is not supported on macOS") + +// SetGPUProfileCacheTTL is a no-op on macOS. +func SetGPUProfileCacheTTL(ttl string) { + // No-op on macOS +} + +// DiscoverVFs returns an empty list on macOS. +// SR-IOV Virtual Functions are not available on macOS. +func DiscoverVFs() ([]VirtualFunction, error) { + return []VirtualFunction{}, nil +} + +// ListGPUProfiles returns an empty list on macOS. +func ListGPUProfiles() ([]GPUProfile, error) { + return []GPUProfile{}, nil +} + +// ListGPUProfilesWithVFs returns an empty list on macOS. +func ListGPUProfilesWithVFs(vfs []VirtualFunction) ([]GPUProfile, error) { + return []GPUProfile{}, nil +} + +// ListMdevDevices returns an empty list on macOS. +func ListMdevDevices() ([]MdevDevice, error) { + return []MdevDevice{}, nil +} + +// CreateMdev returns an error on macOS as mdev is not supported. +func CreateMdev(ctx context.Context, profileName, instanceID string) (*MdevDevice, error) { + return nil, ErrVGPUNotSupportedOnMacOS +} + +// DestroyMdev is a no-op on macOS. +func DestroyMdev(ctx context.Context, mdevUUID string) error { + return nil +} + +// IsMdevInUse returns false on macOS. +func IsMdevInUse(mdevUUID string) bool { + return false +} + +// ReconcileMdevs is a no-op on macOS. +func ReconcileMdevs(ctx context.Context, instanceInfos []MdevReconcileInfo) error { + return nil +} diff --git a/lib/devices/mdev.go b/lib/devices/mdev_linux.go similarity index 98% rename from lib/devices/mdev.go rename to lib/devices/mdev_linux.go index de648e05..2e5bab44 100644 --- a/lib/devices/mdev.go +++ b/lib/devices/mdev_linux.go @@ -1,3 +1,5 @@ +//go:build linux + package devices import ( @@ -604,13 +606,6 @@ func IsMdevInUse(mdevUUID string) bool { return err == nil // Has a driver = in use } -// MdevReconcileInfo contains information needed to reconcile mdevs for an instance -type MdevReconcileInfo struct { - InstanceID string - MdevUUID string - IsRunning bool // true if instance's VMM is running or state is unknown -} - // ReconcileMdevs destroys orphaned mdevs that belong to hypeman but are no longer in use. // This is called on server startup to clean up stale mdevs from previous runs. // diff --git a/lib/devices/types.go b/lib/devices/types.go index bd66fa86..d436ca1d 100644 --- a/lib/devices/types.go +++ b/lib/devices/types.go @@ -94,3 +94,10 @@ type PassthroughDevice struct { Name string `json:"name"` // GPU name, e.g., "NVIDIA L40S" Available bool `json:"available"` // true if not attached to an instance } + +// MdevReconcileInfo contains information needed to reconcile mdevs for an instance +type MdevReconcileInfo struct { + InstanceID string + MdevUUID string + IsRunning bool // true if instance's VMM is running or state is unknown +} diff --git a/lib/devices/vfio_darwin.go b/lib/devices/vfio_darwin.go new file mode 100644 index 00000000..ae47cbcd --- /dev/null +++ b/lib/devices/vfio_darwin.go @@ -0,0 +1,74 @@ +//go:build darwin + +package devices + +import ( + "fmt" +) + +// ErrVFIONotSupportedOnMacOS is returned for VFIO operations on macOS +var ErrVFIONotSupportedOnMacOS = fmt.Errorf("VFIO device passthrough is not supported on macOS") + +// VFIOBinder handles binding and unbinding devices to/from VFIO. +// On macOS, this is a stub that returns errors for all operations. +type VFIOBinder struct{} + +// NewVFIOBinder creates a new VFIOBinder +func NewVFIOBinder() *VFIOBinder { + return &VFIOBinder{} +} + +// IsVFIOAvailable returns false on macOS as VFIO is not available. +func (v *VFIOBinder) IsVFIOAvailable() bool { + return false +} + +// IsDeviceBoundToVFIO returns false on macOS. +func (v *VFIOBinder) IsDeviceBoundToVFIO(pciAddress string) bool { + return false +} + +// BindToVFIO returns an error on macOS as VFIO is not supported. +func (v *VFIOBinder) BindToVFIO(pciAddress string) error { + return ErrVFIONotSupportedOnMacOS +} + +// UnbindFromVFIO returns an error on macOS as VFIO is not supported. +func (v *VFIOBinder) UnbindFromVFIO(pciAddress string) error { + return ErrVFIONotSupportedOnMacOS +} + +// GetVFIOGroupPath returns an error on macOS as VFIO is not supported. +func (v *VFIOBinder) GetVFIOGroupPath(pciAddress string) (string, error) { + return "", ErrVFIONotSupportedOnMacOS +} + +// CheckIOMMUGroupSafe returns an error on macOS as IOMMU is not available. +func (v *VFIOBinder) CheckIOMMUGroupSafe(pciAddress string, allowedDevices []string) error { + return ErrVFIONotSupportedOnMacOS +} + +// GetDeviceSysfsPath returns an empty string on macOS. +func GetDeviceSysfsPath(pciAddress string) string { + return "" +} + +// unbindFromDriver is not available on macOS. +func (v *VFIOBinder) unbindFromDriver(pciAddress, driver string) error { + return ErrVFIONotSupportedOnMacOS +} + +// setDriverOverride is not available on macOS. +func (v *VFIOBinder) setDriverOverride(pciAddress, driver string) error { + return ErrVFIONotSupportedOnMacOS +} + +// triggerDriverProbe is not available on macOS. +func (v *VFIOBinder) triggerDriverProbe(pciAddress string) error { + return ErrVFIONotSupportedOnMacOS +} + +// startNvidiaPersistenced is not available on macOS. +func (v *VFIOBinder) startNvidiaPersistenced() error { + return nil // No-op, not an error +} diff --git a/lib/devices/vfio.go b/lib/devices/vfio_linux.go similarity index 99% rename from lib/devices/vfio.go rename to lib/devices/vfio_linux.go index 38606f5b..65be8104 100644 --- a/lib/devices/vfio.go +++ b/lib/devices/vfio_linux.go @@ -1,3 +1,5 @@ +//go:build linux + package devices import ( diff --git a/lib/hypervisor/cloudhypervisor/process.go b/lib/hypervisor/cloudhypervisor/process.go index b81b72d4..c30b6c3d 100644 --- a/lib/hypervisor/cloudhypervisor/process.go +++ b/lib/hypervisor/cloudhypervisor/process.go @@ -15,6 +15,9 @@ import ( func init() { hypervisor.RegisterSocketName(hypervisor.TypeCloudHypervisor, "ch.sock") + hypervisor.RegisterClientFactory(hypervisor.TypeCloudHypervisor, func(socketPath string) (hypervisor.Hypervisor, error) { + return New(socketPath) + }) } // Starter implements hypervisor.VMStarter for Cloud Hypervisor. diff --git a/lib/hypervisor/hypervisor.go b/lib/hypervisor/hypervisor.go index 197a6ac7..b4287a79 100644 --- a/lib/hypervisor/hypervisor.go +++ b/lib/hypervisor/hypervisor.go @@ -5,6 +5,7 @@ package hypervisor import ( "context" + "errors" "fmt" "net" "time" @@ -12,6 +13,16 @@ import ( "github.com/kernel/hypeman/lib/paths" ) +// Common errors +var ( + // ErrHypervisorNotRunning is returned when trying to connect to a hypervisor + // that is not currently running or cannot be reconnected to. + ErrHypervisorNotRunning = errors.New("hypervisor is not running") + + // ErrNotSupported is returned when an operation is not supported by the hypervisor. + ErrNotSupported = errors.New("operation not supported by this hypervisor") +) + // Type identifies the hypervisor implementation type Type string @@ -20,6 +31,8 @@ const ( TypeCloudHypervisor Type = "cloud-hypervisor" // TypeQEMU is the QEMU VMM TypeQEMU Type = "qemu" + // TypeVZ is the Virtualization.framework VMM (macOS only) + TypeVZ Type = "vz" ) // socketNames maps hypervisor types to their socket filenames. @@ -164,3 +177,23 @@ func NewVsockDialer(hvType Type, vsockSocket string, vsockCID int64) (VsockDiale } return factory(vsockSocket, vsockCID), nil } + +// ClientFactory creates Hypervisor client instances for a hypervisor type. +type ClientFactory func(socketPath string) (Hypervisor, error) + +// clientFactories maps hypervisor types to their client factories. +var clientFactories = make(map[Type]ClientFactory) + +// RegisterClientFactory registers a Hypervisor client factory. +func RegisterClientFactory(t Type, factory ClientFactory) { + clientFactories[t] = factory +} + +// NewClient creates a Hypervisor client for the given type and socket. +func NewClient(hvType Type, socketPath string) (Hypervisor, error) { + factory, ok := clientFactories[hvType] + if !ok { + return nil, fmt.Errorf("no client factory registered for hypervisor type: %s", hvType) + } + return factory(socketPath) +} diff --git a/lib/hypervisor/qemu/process.go b/lib/hypervisor/qemu/process.go index 459d94eb..e2e1d098 100644 --- a/lib/hypervisor/qemu/process.go +++ b/lib/hypervisor/qemu/process.go @@ -37,6 +37,9 @@ const ( func init() { hypervisor.RegisterSocketName(hypervisor.TypeQEMU, "qemu.sock") + hypervisor.RegisterClientFactory(hypervisor.TypeQEMU, func(socketPath string) (hypervisor.Hypervisor, error) { + return New(socketPath) + }) } // Starter implements hypervisor.VMStarter for QEMU. diff --git a/lib/hypervisor/qemu/vsock.go b/lib/hypervisor/qemu/vsock.go index 50c0791f..88be6cc5 100644 --- a/lib/hypervisor/qemu/vsock.go +++ b/lib/hypervisor/qemu/vsock.go @@ -1,3 +1,5 @@ +//go:build linux + package qemu import ( diff --git a/lib/images/disk.go b/lib/images/disk.go index 53378b49..c76660d6 100644 --- a/lib/images/disk.go +++ b/lib/images/disk.go @@ -108,6 +108,17 @@ func convertToCpio(rootfsDir, outputPath string) (int64, error) { return stat.Size(), nil } +// sectorSize is the block size for disk images (required by Virtualization.framework) +const sectorSize = 4096 + +// alignToSector rounds size up to the nearest sector boundary +func alignToSector(size int64) int64 { + if size%sectorSize == 0 { + return size + } + return ((size / sectorSize) + 1) * sectorSize +} + // convertToExt4 converts a rootfs directory to an ext4 disk image using mkfs.ext4 func convertToExt4(rootfsDir, diskPath string) (int64, error) { // Calculate size of rootfs directory @@ -125,6 +136,9 @@ func convertToExt4(rootfsDir, diskPath string) (int64, error) { diskSizeBytes = minSize } + // Align to sector boundary (required by macOS Virtualization.framework) + diskSizeBytes = alignToSector(diskSizeBytes) + // Ensure parent directory exists if err := os.MkdirAll(filepath.Dir(diskPath), 0755); err != nil { return 0, fmt.Errorf("create disk parent dir: %w", err) @@ -142,7 +156,7 @@ func convertToExt4(rootfsDir, diskPath string) (int64, error) { f.Close() // Format as ext4 with rootfs contents using mkfs.ext4 - // -b 4096: 4KB blocks (standard, matches VM page size) + // -b 4096: 4KB blocks (standard, matches VM page size and sector alignment) // -O ^has_journal: Disable journal (not needed for read-only VM mounts) // -d: Copy directory contents into filesystem // -F: Force creation (file not block device) @@ -152,12 +166,21 @@ func convertToExt4(rootfsDir, diskPath string) (int64, error) { return 0, fmt.Errorf("mkfs.ext4 failed: %w, output: %s", err, output) } - // Get actual disk size + // Verify final size is sector-aligned (mkfs.ext4 should preserve our truncated size) stat, err := os.Stat(diskPath) if err != nil { return 0, fmt.Errorf("stat disk: %w", err) } + // Re-align if mkfs.ext4 changed the size (shouldn't happen with -F on a regular file) + if stat.Size()%sectorSize != 0 { + alignedSize := alignToSector(stat.Size()) + if err := os.Truncate(diskPath, alignedSize); err != nil { + return 0, fmt.Errorf("align disk to sector boundary: %w", err) + } + return alignedSize, nil + } + return stat.Size(), nil } @@ -204,6 +227,9 @@ func dirSize(path string) (int64, error) { // CreateEmptyExt4Disk creates a sparse disk file and formats it as ext4. // Used for volumes and instance overlays that need empty writable filesystems. func CreateEmptyExt4Disk(diskPath string, sizeBytes int64) error { + // Align to sector boundary (required by macOS Virtualization.framework) + sizeBytes = alignToSector(sizeBytes) + // Ensure parent directory exists if err := os.MkdirAll(filepath.Dir(diskPath), 0755); err != nil { return fmt.Errorf("create disk parent dir: %w", err) @@ -221,8 +247,8 @@ func CreateEmptyExt4Disk(diskPath string, sizeBytes int64) error { return fmt.Errorf("truncate disk file: %w", err) } - // Format as ext4 - cmd := exec.Command("mkfs.ext4", "-F", diskPath) + // Format as ext4 with 4KB blocks (matches sector alignment) + cmd := exec.Command("mkfs.ext4", "-b", "4096", "-F", diskPath) output, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("mkfs.ext4 failed: %w, output: %s", err, output) diff --git a/lib/images/manager.go b/lib/images/manager.go index a7e5d965..c423bf34 100644 --- a/lib/images/manager.go +++ b/lib/images/manager.go @@ -113,18 +113,26 @@ func (m *manager) CreateImage(ctx context.Context, req CreateImageRequest) (*Ima // Check if we already have this digest (deduplication) if meta, err := readMetadata(m.paths, ref.Repository(), ref.DigestHex()); err == nil { - // We have this digest already - if meta.Status == StatusReady && ref.Tag() != "" { - // Update tag symlink to point to current digest - // (handles case where tag moved to new digest) - createTagSymlink(m.paths, ref.Repository(), ref.Tag(), ref.DigestHex()) - } - img := meta.toImage() - // Add queue position if pending - if meta.Status == StatusPending { - img.QueuePosition = m.queue.GetPosition(meta.Digest) + // Don't cache failed builds - allow retry + if meta.Status == StatusFailed { + // Clean up the failed build directory so we can retry + digestDir := filepath.Join(m.paths.ImagesDir(), ref.Repository(), ref.DigestHex()) + os.RemoveAll(digestDir) + // Fall through to re-queue the build + } else { + // We have this digest already (ready, pending, pulling, or converting) + if meta.Status == StatusReady && ref.Tag() != "" { + // Update tag symlink to point to current digest + // (handles case where tag moved to new digest) + createTagSymlink(m.paths, ref.Repository(), ref.Tag(), ref.DigestHex()) + } + img := meta.toImage() + // Add queue position if pending + if meta.Status == StatusPending { + img.QueuePosition = m.queue.GetPosition(meta.Digest) + } + return img, nil } - return img, nil } // Don't have this digest yet, queue the build @@ -156,15 +164,22 @@ func (m *manager) ImportLocalImage(ctx context.Context, repo, reference, digest // Check if we already have this digest (deduplication) if meta, err := readMetadata(m.paths, ref.Repository(), ref.DigestHex()); err == nil { - // We have this digest already - if meta.Status == StatusReady && ref.Tag() != "" { - createTagSymlink(m.paths, ref.Repository(), ref.Tag(), ref.DigestHex()) - } - img := meta.toImage() - if meta.Status == StatusPending { - img.QueuePosition = m.queue.GetPosition(meta.Digest) + // Don't cache failed builds - allow retry + if meta.Status == StatusFailed { + digestDir := filepath.Join(m.paths.ImagesDir(), ref.Repository(), ref.DigestHex()) + os.RemoveAll(digestDir) + // Fall through to re-queue the build + } else { + // We have this digest already + if meta.Status == StatusReady && ref.Tag() != "" { + createTagSymlink(m.paths, ref.Repository(), ref.Tag(), ref.DigestHex()) + } + img := meta.toImage() + if meta.Status == StatusPending { + img.QueuePosition = m.queue.GetPosition(meta.Digest) + } + return img, nil } - return img, nil } // Don't have this digest yet, queue the build diff --git a/lib/images/oci.go b/lib/images/oci.go index 31962d88..1d07758d 100644 --- a/lib/images/oci.go +++ b/lib/images/oci.go @@ -64,11 +64,13 @@ func newOCIClient(cacheDir string) (*ociClient, error) { return &ociClient{cacheDir: cacheDir}, nil } -// currentPlatform returns the platform for the current host -func currentPlatform() gcr.Platform { +// vmPlatform returns the target platform for VM images. +// Always returns Linux since hypeman VMs are always Linux guests, +// regardless of the host OS (Linux or macOS). +func vmPlatform() gcr.Platform { return gcr.Platform{ Architecture: runtime.GOARCH, - OS: runtime.GOOS, + OS: "linux", } } @@ -77,6 +79,12 @@ func currentPlatform() gcr.Platform { // For multi-arch images, it returns the platform-specific manifest digest // (matching the current host platform) rather than the manifest index digest. func (c *ociClient) inspectManifest(ctx context.Context, imageRef string) (string, error) { + return c.inspectManifestWithPlatform(ctx, imageRef, vmPlatform()) +} + +// inspectManifestWithPlatform synchronously inspects a remote image to get its digest +// for a specific platform. +func (c *ociClient) inspectManifestWithPlatform(ctx context.Context, imageRef string, platform gcr.Platform) (string, error) { ref, err := name.ParseReference(imageRef) if err != nil { return "", fmt.Errorf("parse image reference: %w", err) @@ -89,7 +97,7 @@ func (c *ociClient) inspectManifest(ctx context.Context, imageRef string) (strin img, err := remote.Image(ref, remote.WithContext(ctx), remote.WithAuthFromKeychain(authn.DefaultKeychain), - remote.WithPlatform(currentPlatform())) + remote.WithPlatform(platform)) if err != nil { return "", fmt.Errorf("fetch manifest: %w", wrapRegistryError(err)) } @@ -109,6 +117,10 @@ type pullResult struct { } func (c *ociClient) pullAndExport(ctx context.Context, imageRef, digest, exportDir string) (*pullResult, error) { + return c.pullAndExportWithPlatform(ctx, imageRef, digest, exportDir, vmPlatform()) +} + +func (c *ociClient) pullAndExportWithPlatform(ctx context.Context, imageRef, digest, exportDir string, platform gcr.Platform) (*pullResult, error) { // Use a shared OCI layout for all images to enable automatic layer caching // The cacheDir itself is the OCI layout root with shared blobs/sha256/ directory // The digest is ALWAYS known at this point (from inspectManifest or digest reference) @@ -117,7 +129,7 @@ func (c *ociClient) pullAndExport(ctx context.Context, imageRef, digest, exportD // Check if this digest is already cached if !c.existsInLayout(layoutTag) { // Not cached, pull it using digest-based tag - if err := c.pullToOCILayout(ctx, imageRef, layoutTag); err != nil { + if err := c.pullToOCILayoutWithPlatform(ctx, imageRef, layoutTag, platform); err != nil { return nil, fmt.Errorf("pull to oci layout: %w", err) } } @@ -141,6 +153,10 @@ func (c *ociClient) pullAndExport(ctx context.Context, imageRef, digest, exportD } func (c *ociClient) pullToOCILayout(ctx context.Context, imageRef, layoutTag string) error { + return c.pullToOCILayoutWithPlatform(ctx, imageRef, layoutTag, vmPlatform()) +} + +func (c *ociClient) pullToOCILayoutWithPlatform(ctx context.Context, imageRef, layoutTag string, platform gcr.Platform) error { ref, err := name.ParseReference(imageRef) if err != nil { return fmt.Errorf("parse image reference: %w", err) @@ -152,7 +168,7 @@ func (c *ociClient) pullToOCILayout(ctx context.Context, imageRef, layoutTag str img, err := remote.Image(ref, remote.WithContext(ctx), remote.WithAuthFromKeychain(authn.DefaultKeychain), - remote.WithPlatform(currentPlatform())) + remote.WithPlatform(platform)) if err != nil { // Rate limits fail here immediately (429 is not retried by default) return fmt.Errorf("fetch image manifest: %w", wrapRegistryError(err)) diff --git a/lib/images/oci_public.go b/lib/images/oci_public.go index 5d20835e..66643b97 100644 --- a/lib/images/oci_public.go +++ b/lib/images/oci_public.go @@ -20,11 +20,18 @@ func NewOCIClient(cacheDir string) (*OCIClient, error) { } // InspectManifest inspects a remote image to get its digest (public for system manager) +// Always targets Linux platform since hypeman VMs are Linux guests. func (c *OCIClient) InspectManifest(ctx context.Context, imageRef string) (string, error) { return c.client.inspectManifest(ctx, imageRef) } +// InspectManifestForLinux is an alias for InspectManifest (all images target Linux) +func (c *OCIClient) InspectManifestForLinux(ctx context.Context, imageRef string) (string, error) { + return c.InspectManifest(ctx, imageRef) +} + // PullAndUnpack pulls an OCI image and unpacks it to a directory (public for system manager) +// Always targets Linux platform since hypeman VMs are Linux guests. func (c *OCIClient) PullAndUnpack(ctx context.Context, imageRef, digest, exportDir string) error { _, err := c.client.pullAndExport(ctx, imageRef, digest, exportDir) if err != nil { @@ -33,3 +40,7 @@ func (c *OCIClient) PullAndUnpack(ctx context.Context, imageRef, digest, exportD return nil } +// PullAndUnpackForLinux is an alias for PullAndUnpack (all images target Linux) +func (c *OCIClient) PullAndUnpackForLinux(ctx context.Context, imageRef, digest, exportDir string) error { + return c.PullAndUnpack(ctx, imageRef, digest, exportDir) +} diff --git a/lib/ingress/binaries_amd64.go b/lib/ingress/binaries_amd64.go index 309da631..551e12fb 100644 --- a/lib/ingress/binaries_amd64.go +++ b/lib/ingress/binaries_amd64.go @@ -1,4 +1,4 @@ -//go:build amd64 +//go:build amd64 && linux package ingress diff --git a/lib/ingress/binaries_arm64.go b/lib/ingress/binaries_arm64.go index 8fb413ce..995578a8 100644 --- a/lib/ingress/binaries_arm64.go +++ b/lib/ingress/binaries_arm64.go @@ -1,4 +1,4 @@ -//go:build arm64 +//go:build arm64 && linux package ingress diff --git a/lib/ingress/binaries_darwin.go b/lib/ingress/binaries_darwin.go new file mode 100644 index 00000000..1a2ba408 --- /dev/null +++ b/lib/ingress/binaries_darwin.go @@ -0,0 +1,33 @@ +//go:build darwin + +package ingress + +import ( + "fmt" + "os/exec" + + "github.com/kernel/hypeman/lib/paths" +) + +// CaddyVersion is the version of Caddy to use. +const CaddyVersion = "v2.10.2" + +// ErrCaddyNotEmbedded indicates Caddy is not embedded on macOS. +// Users should install Caddy via Homebrew or download from caddyserver.com. +var ErrCaddyNotEmbedded = fmt.Errorf("caddy binary is not embedded on macOS; install via: brew install caddy") + +// ExtractCaddyBinary on macOS attempts to find Caddy in PATH. +// Unlike Linux, we don't embed the binary on macOS. +func ExtractCaddyBinary(p *paths.Paths) (string, error) { + // Try to find caddy in PATH + path, err := exec.LookPath("caddy") + if err != nil { + return "", ErrCaddyNotEmbedded + } + return path, nil +} + +// GetCaddyBinaryPath returns path to Caddy, looking in PATH on macOS. +func GetCaddyBinaryPath(p *paths.Paths) (string, error) { + return ExtractCaddyBinary(p) +} diff --git a/lib/ingress/binaries.go b/lib/ingress/binaries_linux.go similarity index 99% rename from lib/ingress/binaries.go rename to lib/ingress/binaries_linux.go index 79143506..2b2a6a87 100644 --- a/lib/ingress/binaries.go +++ b/lib/ingress/binaries_linux.go @@ -1,3 +1,5 @@ +//go:build linux + package ingress import ( diff --git a/lib/instances/exec_test.go b/lib/instances/exec_test.go index 64fd1ae8..94f54ef2 100644 --- a/lib/instances/exec_test.go +++ b/lib/instances/exec_test.go @@ -36,7 +36,7 @@ func waitForExecAgent(ctx context.Context, mgr *manager, instanceID string, time // This validates that the exec infrastructure handles concurrent access correctly. func TestExecConcurrent(t *testing.T) { if _, err := os.Stat("/dev/kvm"); os.IsNotExist(err) { - t.Fatal("/dev/kvm not available") + t.Skip("/dev/kvm not available, skipping on this platform") } if testing.Short() { diff --git a/lib/instances/hypervisor_darwin.go b/lib/instances/hypervisor_darwin.go new file mode 100644 index 00000000..183a928e --- /dev/null +++ b/lib/instances/hypervisor_darwin.go @@ -0,0 +1,16 @@ +//go:build darwin + +package instances + +import ( + "github.com/kernel/hypeman/lib/hypervisor" + "github.com/kernel/hypeman/lib/hypervisor/cloudhypervisor" + "github.com/kernel/hypeman/lib/hypervisor/qemu" + "github.com/kernel/hypeman/lib/hypervisor/vz" +) + +func init() { + platformStarters[hypervisor.TypeCloudHypervisor] = cloudhypervisor.NewStarter() + platformStarters[hypervisor.TypeQEMU] = qemu.NewStarter() + platformStarters[hypervisor.TypeVZ] = vz.NewStarter() +} diff --git a/lib/instances/hypervisor_linux.go b/lib/instances/hypervisor_linux.go new file mode 100644 index 00000000..f6abe18c --- /dev/null +++ b/lib/instances/hypervisor_linux.go @@ -0,0 +1,14 @@ +//go:build linux + +package instances + +import ( + "github.com/kernel/hypeman/lib/hypervisor" + "github.com/kernel/hypeman/lib/hypervisor/cloudhypervisor" + "github.com/kernel/hypeman/lib/hypervisor/qemu" +) + +func init() { + platformStarters[hypervisor.TypeCloudHypervisor] = cloudhypervisor.NewStarter() + platformStarters[hypervisor.TypeQEMU] = qemu.NewStarter() +} diff --git a/lib/instances/manager.go b/lib/instances/manager.go index 8411d193..f1551045 100644 --- a/lib/instances/manager.go +++ b/lib/instances/manager.go @@ -7,8 +7,6 @@ import ( "github.com/kernel/hypeman/lib/devices" "github.com/kernel/hypeman/lib/hypervisor" - "github.com/kernel/hypeman/lib/hypervisor/cloudhypervisor" - "github.com/kernel/hypeman/lib/hypervisor/qemu" "github.com/kernel/hypeman/lib/images" "github.com/kernel/hypeman/lib/network" "github.com/kernel/hypeman/lib/paths" @@ -44,6 +42,8 @@ type Manager interface { // SetResourceValidator sets the validator for aggregate resource limit checking. // Called after initialization to avoid circular dependencies. SetResourceValidator(v ResourceValidator) + // GetVsockDialer returns a VsockDialer for the specified instance. + GetVsockDialer(ctx context.Context, instanceID string) (hypervisor.VsockDialer, error) } // ResourceLimits contains configurable resource limits for instances @@ -79,6 +79,9 @@ type manager struct { defaultHypervisor hypervisor.Type // Default hypervisor type when not specified in request } +// platformStarters is populated by platform-specific init functions. +var platformStarters = make(map[hypervisor.Type]hypervisor.VMStarter) + // NewManager creates a new instances manager. // If meter is nil, metrics are disabled. // defaultHypervisor specifies which hypervisor to use when not specified in requests. @@ -88,20 +91,23 @@ func NewManager(p *paths.Paths, imageManager images.Manager, systemManager syste defaultHypervisor = hypervisor.TypeCloudHypervisor } + // Initialize VM starters from platform-specific init functions + vmStarters := make(map[hypervisor.Type]hypervisor.VMStarter, len(platformStarters)) + for hvType, starter := range platformStarters { + vmStarters[hvType] = starter + } + m := &manager{ - paths: p, - imageManager: imageManager, - systemManager: systemManager, - networkManager: networkManager, - deviceManager: deviceManager, - volumeManager: volumeManager, - limits: limits, - instanceLocks: sync.Map{}, - hostTopology: detectHostTopology(), // Detect and cache host topology - vmStarters: map[hypervisor.Type]hypervisor.VMStarter{ - hypervisor.TypeCloudHypervisor: cloudhypervisor.NewStarter(), - hypervisor.TypeQEMU: qemu.NewStarter(), - }, + paths: p, + imageManager: imageManager, + systemManager: systemManager, + networkManager: networkManager, + deviceManager: deviceManager, + volumeManager: volumeManager, + limits: limits, + instanceLocks: sync.Map{}, + hostTopology: detectHostTopology(), // Detect and cache host topology + vmStarters: vmStarters, defaultHypervisor: defaultHypervisor, } @@ -125,14 +131,7 @@ func (m *manager) SetResourceValidator(v ResourceValidator) { // getHypervisor creates a hypervisor client for the given socket and type. // Used for connecting to already-running VMs (e.g., for state queries). func (m *manager) getHypervisor(socketPath string, hvType hypervisor.Type) (hypervisor.Hypervisor, error) { - switch hvType { - case hypervisor.TypeCloudHypervisor: - return cloudhypervisor.New(socketPath) - case hypervisor.TypeQEMU: - return qemu.New(socketPath) - default: - return nil, fmt.Errorf("unsupported hypervisor type: %s", hvType) - } + return hypervisor.NewClient(hvType, socketPath) } // getVMStarter returns the VM starter for the given hypervisor type. diff --git a/lib/instances/manager_test.go b/lib/instances/manager_test.go index 4bfb9b4f..7120903d 100644 --- a/lib/instances/manager_test.go +++ b/lib/instances/manager_test.go @@ -184,7 +184,7 @@ func cleanupOrphanedProcesses(t *testing.T, mgr *manager) { func TestBasicEndToEnd(t *testing.T) { // Require KVM access (don't skip, fail informatively) if _, err := os.Stat("/dev/kvm"); os.IsNotExist(err) { - t.Fatal("/dev/kvm not available - ensure KVM is enabled and user is in 'kvm' group (sudo usermod -aG kvm $USER)") + t.Skip("/dev/kvm not available, skipping on this platform") } manager, tmpDir := setupTestManager(t) // Automatically registers cleanup @@ -1007,7 +1007,7 @@ func TestStorageOperations(t *testing.T) { func TestStandbyAndRestore(t *testing.T) { // Require KVM access (don't skip, fail informatively) if _, err := os.Stat("/dev/kvm"); os.IsNotExist(err) { - t.Fatal("/dev/kvm not available - ensure KVM is enabled and user is in 'kvm' group (sudo usermod -aG kvm $USER)") + t.Skip("/dev/kvm not available, skipping on this platform") } manager, tmpDir := setupTestManager(t) // Automatically registers cleanup diff --git a/lib/instances/network_test.go b/lib/instances/network_test.go index 70181cef..70ac861c 100644 --- a/lib/instances/network_test.go +++ b/lib/instances/network_test.go @@ -254,6 +254,6 @@ func execCommand(ctx context.Context, inst *Instance, command ...string) (string // requireKVMAccess checks for KVM availability func requireKVMAccess(t *testing.T) { if _, err := os.Stat("/dev/kvm"); os.IsNotExist(err) { - t.Fatal("/dev/kvm not available - ensure KVM is enabled and user is in 'kvm' group") + t.Skip("/dev/kvm not available, skipping on this platform") } } diff --git a/lib/instances/qemu_test.go b/lib/instances/qemu_test.go index 4f34384d..98d0095e 100644 --- a/lib/instances/qemu_test.go +++ b/lib/instances/qemu_test.go @@ -171,7 +171,7 @@ func (r *qemuInstanceResolver) ResolveInstance(ctx context.Context, nameOrID str func TestQEMUBasicEndToEnd(t *testing.T) { // Require KVM access if _, err := os.Stat("/dev/kvm"); os.IsNotExist(err) { - t.Fatal("/dev/kvm not available - ensure KVM is enabled and user is in 'kvm' group (sudo usermod -aG kvm $USER)") + t.Skip("/dev/kvm not available, skipping on this platform") } // Require QEMU to be installed @@ -727,7 +727,7 @@ func TestQEMUEntrypointEnvVars(t *testing.T) { func TestQEMUStandbyAndRestore(t *testing.T) { // Require KVM access if _, err := os.Stat("/dev/kvm"); os.IsNotExist(err) { - t.Fatal("/dev/kvm not available - ensure KVM is enabled and user is in 'kvm' group (sudo usermod -aG kvm $USER)") + t.Skip("/dev/kvm not available, skipping on this platform") } // Require QEMU to be installed diff --git a/lib/instances/volumes_test.go b/lib/instances/volumes_test.go index 2dc48143..d1614f8d 100644 --- a/lib/instances/volumes_test.go +++ b/lib/instances/volumes_test.go @@ -42,7 +42,7 @@ func execWithRetry(ctx context.Context, inst *Instance, command []string) (strin func TestVolumeMultiAttachReadOnly(t *testing.T) { // Require KVM if _, err := os.Stat("/dev/kvm"); os.IsNotExist(err) { - t.Fatal("/dev/kvm not available - ensure KVM is enabled and user is in 'kvm' group") + t.Skip("/dev/kvm not available, skipping on this platform") } if testing.Short() { @@ -334,7 +334,7 @@ func createTestTarGz(t *testing.T, files map[string][]byte) *bytes.Buffer { func TestVolumeFromArchive(t *testing.T) { // Require KVM if _, err := os.Stat("/dev/kvm"); os.IsNotExist(err) { - t.Fatal("/dev/kvm not available - ensure KVM is enabled and user is in 'kvm' group") + t.Skip("/dev/kvm not available, skipping on this platform") } if testing.Short() { diff --git a/lib/instances/vsock.go b/lib/instances/vsock.go new file mode 100644 index 00000000..415dcc29 --- /dev/null +++ b/lib/instances/vsock.go @@ -0,0 +1,17 @@ +package instances + +import ( + "context" + + "github.com/kernel/hypeman/lib/hypervisor" +) + +// GetVsockDialer returns a VsockDialer for the specified instance. +func (m *manager) GetVsockDialer(ctx context.Context, instanceID string) (hypervisor.VsockDialer, error) { + inst, err := m.GetInstance(ctx, instanceID) + if err != nil { + return nil, err + } + + return hypervisor.NewVsockDialer(hypervisor.Type(inst.HypervisorType), inst.VsockSocket, inst.VsockCID) +} diff --git a/lib/middleware/oapi_auth.go b/lib/middleware/oapi_auth.go index 6f8a8254..f60b25a1 100644 --- a/lib/middleware/oapi_auth.go +++ b/lib/middleware/oapi_auth.go @@ -209,8 +209,6 @@ func isTokenEndpoint(path string) bool { } -// extractRepoFromPath extracts the repository name from a registry path. -// e.g., "/v2/builds/abc123/manifests/latest" -> "builds/abc123" // extractRepoFromPath extracts the repository name from a registry path. // Uses the docker/distribution router which properly handles repository names // that can contain slashes (e.g., "builds/abc123" from "/v2/builds/abc123/manifests/latest"). diff --git a/lib/network/bridge_darwin.go b/lib/network/bridge_darwin.go new file mode 100644 index 00000000..6eec3940 --- /dev/null +++ b/lib/network/bridge_darwin.go @@ -0,0 +1,68 @@ +//go:build darwin + +package network + +import ( + "context" + + "github.com/kernel/hypeman/lib/logger" +) + +// checkSubnetConflicts is a no-op on macOS as we use NAT networking. +func (m *manager) checkSubnetConflicts(ctx context.Context, subnet string) error { + // NAT networking doesn't conflict with host routes + return nil +} + +// createBridge is a no-op on macOS as we use NAT networking. +// Virtualization.framework provides built-in NAT with NATNetworkDeviceAttachment. +func (m *manager) createBridge(ctx context.Context, name, gateway, subnet string) error { + log := logger.FromContext(ctx) + log.InfoContext(ctx, "macOS: skipping bridge creation (using NAT networking)") + return nil +} + +// setupIPTablesRules is a no-op on macOS as we use NAT networking. +func (m *manager) setupIPTablesRules(ctx context.Context, subnet, bridgeName string) error { + return nil +} + +// setupBridgeHTB is a no-op on macOS as we use NAT networking. +// macOS doesn't use traffic control qdiscs. +func (m *manager) setupBridgeHTB(ctx context.Context, bridgeName string, capacityBps int64) error { + return nil +} + +// createTAPDevice is a no-op on macOS as we use NAT networking. +// Virtualization.framework creates virtual network interfaces internally. +func (m *manager) createTAPDevice(tapName, bridgeName string, isolated bool, downloadBps, uploadBps, uploadCeilBps int64) error { + // On macOS with vz, network devices are created by the VMM itself + return nil +} + +// deleteTAPDevice is a no-op on macOS as we use NAT networking. +func (m *manager) deleteTAPDevice(tapName string) error { + return nil +} + +// queryNetworkState returns a stub network state for macOS. +// On macOS, we use NAT which doesn't have a physical bridge. +func (m *manager) queryNetworkState(bridgeName string) (*Network, error) { + // Return a virtual network representing macOS NAT + // The actual IP will be assigned by Virtualization.framework's DHCP + return &Network{ + Bridge: "nat", + Gateway: "192.168.64.1", // Default macOS vz NAT gateway + Subnet: "192.168.64.0/24", + }, nil +} + +// CleanupOrphanedTAPs is a no-op on macOS as we don't create TAP devices. +func (m *manager) CleanupOrphanedTAPs(ctx context.Context, runningInstanceIDs []string) int { + return 0 +} + +// CleanupOrphanedClasses is a no-op on macOS as we don't use traffic control. +func (m *manager) CleanupOrphanedClasses(ctx context.Context) int { + return 0 +} diff --git a/lib/network/bridge.go b/lib/network/bridge_linux.go similarity index 98% rename from lib/network/bridge.go rename to lib/network/bridge_linux.go index a979c111..952d7dbb 100644 --- a/lib/network/bridge.go +++ b/lib/network/bridge_linux.go @@ -1,3 +1,5 @@ +//go:build linux + package network import ( @@ -15,22 +17,6 @@ import ( "golang.org/x/sys/unix" ) -// DeriveGateway returns the first usable IP in a subnet (used as gateway). -// e.g., 10.100.0.0/16 -> 10.100.0.1 -func DeriveGateway(cidr string) (string, error) { - _, ipNet, err := net.ParseCIDR(cidr) - if err != nil { - return "", fmt.Errorf("parse CIDR: %w", err) - } - - // Gateway is network address + 1 - gateway := make(net.IP, len(ipNet.IP)) - copy(gateway, ipNet.IP) - gateway[len(gateway)-1]++ // Increment last octet - - return gateway.String(), nil -} - // checkSubnetConflicts checks if the configured subnet conflicts with existing routes. // Returns an error if a conflict is detected, with guidance on how to resolve it. func (m *manager) checkSubnetConflicts(ctx context.Context, subnet string) error { diff --git a/lib/network/ip.go b/lib/network/ip.go new file mode 100644 index 00000000..555ad579 --- /dev/null +++ b/lib/network/ip.go @@ -0,0 +1,22 @@ +package network + +import ( + "fmt" + "net" +) + +// DeriveGateway returns the first usable IP in a subnet (used as gateway). +// e.g., 10.100.0.0/16 -> 10.100.0.1 +func DeriveGateway(cidr string) (string, error) { + _, ipNet, err := net.ParseCIDR(cidr) + if err != nil { + return "", fmt.Errorf("parse CIDR: %w", err) + } + + // Gateway is network address + 1 + gateway := make(net.IP, len(ipNet.IP)) + copy(gateway, ipNet.IP) + gateway[len(gateway)-1]++ // Increment last octet + + return gateway.String(), nil +} diff --git a/lib/oapi/oapi.go b/lib/oapi/oapi.go index 935d2d99..de752467 100644 --- a/lib/oapi/oapi.go +++ b/lib/oapi/oapi.go @@ -50,6 +50,7 @@ const ( const ( CreateInstanceRequestHypervisorCloudHypervisor CreateInstanceRequestHypervisor = "cloud-hypervisor" CreateInstanceRequestHypervisorQemu CreateInstanceRequestHypervisor = "qemu" + CreateInstanceRequestHypervisorVz CreateInstanceRequestHypervisor = "vz" ) // Defines values for DeviceType. @@ -82,6 +83,7 @@ const ( const ( InstanceHypervisorCloudHypervisor InstanceHypervisor = "cloud-hypervisor" InstanceHypervisorQemu InstanceHypervisor = "qemu" + InstanceHypervisorVz InstanceHypervisor = "vz" ) // Defines values for InstanceState. @@ -10622,140 +10624,140 @@ var swaggerSpec = []string{ "vXguLNOUMywIiO4IcYZOLq4QjmMeWmNoojWsCZ1mgkT9mg0OvfuwhbD5F8jhJ2xOBWeJ1oXmWFBNPBXP", "wsfgxcvTJ6MnL66DI72TURZaM/3i5avXwVGwNxgMAp+o0zuxBhmfXVydwIp1+xlXaZxNR5J+IBWfWLD3", "7HFQn/hxvl6UkIQLo4/aPlBnVmUHRlyjmN4QNNT9mU3beVZn1Lsw1BLQZouUiDmVPjvzl/yd3u9MkjJt", - "GmKoooQkYk5Evtew+f2SrA9jnkW90pDd4B1JAK2LiXoa+W29VlJgDXvHcUoZaeTv3R+FJ99ycRNzHPV2", - "vjJLZkTpvpeX+MK8qG6mRQCS73/QXdLzWXRLIzUbRfyW6Sl7eI99g/LGOQN6r1eC49//9e/r80IB2Xk2", - "Ti032tl98IXcqMZ/dNde4yJfSJb6l3GV+hdxff77v/7tVvJ9F0GYxs+ownSMvV5dyj9mRM2IKEklt8H6", - "J6MdwufI4Utp+IoDoOy1X2KcfE5EjBceRrgz8HDCfwiqgL7sd0hLNKQ/XsMGdW9OeC0zwoGfE3om5ZnT", - "Y03fli+3mUk+kZ3dc/u425Y3yxuajqZa2Rjhae7AWHWecnlDUwRf9OALs41xbIg3ynTPaMy56g/ZP2aE", - "Idg72GDynoTAp7SFho4vziS6pXEM5g4wgmXeP2SvS6zANJdK/1dkrIvGmUKCJFwRbWsmum89SAZzgcZj", - "gjKG3YFNf8jKULELrOOVBcsNEYzEoxnBERGyJWTMR8h+1AgcWOoES0WE4dBZWoXX6d/PL1HndMFwQkP0", - "d9PrOY+ymKDLLNU0vFWFXnfIUkHmhIGiqxUGasflE8Qz1eOTnhKEuCkm0FluMNrThPmziyt7HiW3+kP2", - "imjAEhZpe5ML5KSERGqGFYo4+7OmWBJVuy2PXwO6n5a7wTxMsyqUd+sQfgGnQHo9cypUhmPNsioal/dQ", - "yBw3ejRUc5pZ1pQtK8oRDquqN7+tpWB6hrPHZb3ZbxwYhaPZOFhz9OrzsecOhzCTiiclTzvq1HwJtOp1", - "qDKPOY97EVYYVIOW+ouZ7vKpVbIwXZlNaeKSo+nY46DSzJAyNKVTPF6oqq69M1jeej+gXf8+UDed6Br0", - "INFI8dVnWnSCXNs2Lmw4/x0pPppPqKfnXGgWzhMqUVg7PrZIq7vopSG15NtFtzOqxaxEDghAwdfnZRuw", - "P2Q9YDlH6DQfIO8271JzVnCUQRcdLkqToODzROPFFsLo+ryPXuez/bNEDCs6J+6Ie4YlGhPCUAbqGYlg", - "fGCn5QlkUvMwquqfW15lTsO3wNTl9l0faVsiwZbva/ROsKIh+NnGtLYeON8wG6VH0gyAlaVOKymx6iTw", - "FZlSqUTtHBB1Xj092dvbe1TXF3Yf9AY7vZ0Hr3cGRwP9/3+2PzL8+gf+vr6Oq/zCei7LHOXk6ux01yon", - "1XHUh3386PD9e6weHdBb+ehDMhbT3/bwnYQE+NnTaeFyRZ1MEtFzrE9jlc/RWvJnNjhSP9s/ulE0gjuR", - "WSV+zOpe65bfIn7Bd4pmz3A2jzCoM8G153ClxS2tR/+q9YMC80u+AevuDqnXsX9K5c1jQfCNtio98lWL", - "Zzkycsfv68q0HTVeIPJeq2ckQoJzNZHGX1BVU3b2H+4f7h3sHw4GnmP7ZSTmIR2FWqq0msDLkzMU4wUR", - "CL5BHTD0IjSO+biKvA/2Dg4fDh7t7LadhzGT2sEh16LcV6hjIfIXFwLm3lQmtbv78GBvb29wcLC732pW", - "VsFrNSmnDFZUh4d7D/d3Dnf3W0HBZ3Y+cWEU9WPhyIOkx2kaU2Nk92RKQjqhIYJADKQ/QJ0ExBLJLb4q", - "TY5xNBJWDfTKA4Vp7AFDyetnBrMtTdRNksWKpjEx72BDWmm6sPJT6MnnIaaMETHKo0w26MkGn6z1jLm1", - "5E1QJYioArpzKkGzKBQiSuLoyFDoWj4Hu1lM7E0THtg1tMSG5/yWiF5M5iQuI4ERR3qyCRcE5XhiNq2y", - "KsrmOKbRiLI086JEIyifZgL0S9MpwmOeKWOqw4aVB4EjM7ARJppdtzuxLXzUS0NrO3NDx18q+ITGnmWA", - "0WrfWpHuXGLP9weXvZ3/A36wlyxeGD5AmTF0Ex6Rfi1OEdq3Xt5F05zyIFFUnt3SmnLXhMc9mlu7DiLW", - "6A4xQ2OCrJg0Tl1wmxSDFAz+kY9hTgROyDibTIgYJR5L66l+j0wD44OiDJ0/rjJNzZzbqlsXlc0BfWuC", - "Qxvj1w76HkuutoxuCZpv/Nv1ipiwhqYoAr1VwraxgQR99CIPy0XPLq4kKtxJHhOv5YHdxWwhtXFiejRB", - "QZSVLTNAztZs+KL40NqwHmaceBmQIwTUmU/TDMjw8lXv7OX1dhKRebcyJ3ABzXhM9Ly3SrrV3MUSFKeL", - "lSOXeZOKbBBDtiWgEqxyCm4NpBK9eqCjuMLxSMZceWbzWr9E8BJ1rp+aM2Q9gy5KK1upfy9BoYLfB16K", - "0RypadhLGLBua1cIfK3bIzFiq7y8yqA+UvmF4NgE8VfxuQhLcxvPb6obzW/WUq/txDfumTt1q0nOxGO7", - "nJyfGsss5ExhyohACVHYXhkonWxDgEXQDXpaGYgwScAnOvmv1WfdDb6bHF1WWf8nSxHA38Tyb4hy00wu", - "npMIJZjRCZHKRrlVRpYzvPvg4MjE10Zksv/goN/v+094lFiknPrCG5/k79ptxbY5H+0Vffbl7Mv24Ruc", - "4bdZy8fg4vj1L8FRsJ1JsR3zEMfbckzZUenv/M/iBTyYP8eUec/+W4Vk08lSKHZle1Mts8zvR3oljIQ5", - "QnLQEtf6Jv2S/IVGzZh+IBHyRkQpPEVa/waM+7LQpy8IYi5u0qhS8HL5mKBFIDP9sNrcdooRtLFjZkzR", - "uIjxXja0PytKX64MelwKeEwJy8Mc49g8hZzNNVX4Yh4rDNy9W9qMWy5uKJuOIurBzn+YlyiigoQKQkrW", - "01CwjdN0PSr6lb+cp7WN37bRWx7p8t05+ec4XKujv5z+7d3/lRcPf9t59/z6+r/nz/52+oL+93V88fKL", - "Qk5WB+591+i7lWdq4GWsRN21RY9zrEKP4jPjUjVAzb5BiqNEf9xHJ2CgHQ1ZDz2niggcH6FhgFPat8Ds", - "hzwZBqhD3uNQma8QZ0h3ZY+Ot/THFybsRn/80dmAn+p9RPaMWFgg5+EcMhtHPMGUbQ3ZkNm+kFuIhEMb", - "/RShEKcqE0TviNY14wUaCxwWZ8PF4F30Eafpp60hA0uUvFdCryDFQuVRvm4E2Gg7K3MoZJuTCM1xnBFp", - "Ldkhy+UHmOa6E4XFlKh+7kIER03tYKYBKF4zg4tqbMPhoOvZR6Tb6Y2MqVSEodwrQSUgL+q4IJXDQYX8", - "DweH688fcxxagX6A3cv3ah1StqAPg8AwtGHGo5lS6frwBeA3hkbQL69fX2gw6H8vkeuogEW+xcYYw2ka", - "UyLNqZqKQSexcUFbge/kzOxuywW9No31Z3GLMIwnMDB6/fwSKSISygz/7oQanBMa6vXB+Q6VMtOoSDE6", - "Pjl/stVvcTEYYJvPf8U+vs5XWDtGcM6tZQsTviic5hq+XXR22tXqlKXQQtGCc9OnXKDYMJiCro/QlSTV", - "KAbYKnPEY3YyXhQeMsPVh8GW6zGtc4oj9CrX73A+lfz2QYEMrsuCLqFbG9hiDnWXeu9W5wrH1dZ+sawN", - "jnCxQtbpDaK4mRWsJn8PxIHmOav7Hjej7bLTUg/mR41i77+5BrK3qS25aSR3NSitFISYB3N/3yjsz4mp", - "djv07OIKIpexHEmGUznjqjk4AyPXBpH3VCq5HMfWKpxgOYa7Kp5MdPaKwMCvGY0tMsYgMqK+jK8eZ/09", - "Yw1+vBjvlVHZXxpabRW0bxRZ3cgQfFHJVd5gfv66MdLfZDqVaGcfMyjLMRcI9tkBzt2AeoJgjqWkU0Yi", - "dHZR3PorHB6u+9qaHu32dw4O+zuDQX9n0Mb9k+BwxdjnxyftBx/sGoP4CI+PwuiITL7A/WQR2ygcOL7F", - "C4mGTiUcBkYHLSmfJbK1amOro73lOPLPCxuvC8F1geGbBIK3i/BecR3/snoRv7Ve8eCfX3Rnn7QVw5fQ", - "2H012sQxSlDIszhif1ZorCnPmAIkshaLJKrIcQDEesVuGL9l1aUb/5im33cZEQt0fX5e8aYKMrHXvVss", - "nKdp4z7wdKNt2F2j3q2dTSnY+i4CrOucsCSBvno4ddn14+I6DNa1cAEV6p/3mJQyA2699yvWVDPeIzIf", - "ZZlP0dGvXITm1dXZaWXDMT7YORwcPuodjncOevvRYKeHd/YOersP8GCyFz7ca0iM0j5M4vMjH6oU2hwR", - "DYAHR5gJYo+ONA3loQvjTKH8kpomzhOtMaKSHmrif8E2fWVUUt0DSNdQv4kXuaq68uMLrAnVfZvCX6u/", - "uJxlSqtB8I2cZQrpv2DKeglW1V/dhaH5I/SCwzd2pl0tKGs2g2mOWTReLDev2xcdGwEiiFRckAgGswzs", - "CD3NmVbO9iyb60hiHw0vtZFSEAW2ZQxqq97b3Qq6gYV60A0MCINu4CCjH80K4QkmH3QDOxFvkGUZb3yO", - "YoJj4GFFEEamaEw/GJLTU6dS0dCYWBh2s4ns7E02Eo2MCG06yjEn+1bM5h85qr4+Rx24d/AXZC0w/ddW", - "fuxTJqH93Uf7jw4e7j46aBW1WExwPTc+gbiT5cmtZc1hmo1cgqiGpZ9cXIHw0YJNZokJk7RrL2w3zThC", - "re1RhoqMU8Xgj/qPysGaEc/GccnTYKO1ISKwTXqwhnOOdzSe08mEvfsQ3uz+Jmiy8/5A7o69xlE+kF+T", - "PCt7x5bMLjLumavG/ng6QCghG0NOXxEJK0CXRCHAn55mWFqi5uEiFuVcYKqFuBex9vf29g4fPththVd2", - "diXCGYH9tzzLczuDEolBS9R5dXmJtksIZ/p0MXSpIFIvztyi8NIZGmaDwR5Bg0p4nbY99nxY0qCwFFhj", - "+54njSC/thqLXZQFOkS95NrMEpV7ob23N3i4/+DwQTsythbPSLxfzWFsO3taLEhI6Lyy8x3wqL4+vkC6", - "dzHBYVXD39nd239w8PBwo1mpjWalBGYyoUptNLHDhwcP9vd2d9rFTvu8pvZWQIVgq7zLQ3QepPDshgcU", - "y6y32yQtfFricqjdyui+IlywHhu2STBocROMSuiVluIQUUcrUWWFtHSbaauNn8HPIvU4TWkntbrYNk5z", - "dVjmBVazMzbhy27xTQw+G+ziDiFSrfhISMgVEUZJ5HhXbvlZXQrCZ2JJUJQRCzmjGwlsAY7N0UCK1QyU", - "VfiQsmk1cHhpwDZmmJnD6nt/MK5t2MZjJP0BGq9FBrAyDl2JcBGq0co7TeXIb1UsdyzINIuxQPVY5BVT", - "loskpuymTe9ykYx5TEOkP6ib8xMex/x2pF/Jv8JatlqtTn8wKk4la+a5mZw9kzYbUhu3WMJf9Sq3alEu", - "IPm3zffbkFe4jQPOG637VBtvJlz3itH3JUSvXqLZ3x00BTU1dFoJZ1oO9d6Ut1uU9VG8i8I+zpNNeI7E", - "zGlNzYKt6sGV9fpWC6daq0K4ljUB1HE+PXdJqQrX0mWhVoK43cFa3XvtZrMtSVgdff/wwcODlre1vkjV", - "XpF59QsU63myQqFu2KnzNlrb4YPDR4/29h882t1IP3IHHQ3703TYUd6fWk6Zms72YAD/22hS5qjDP6WG", - "447qhCr5YT57Qp9WkG5xgaLB6l6V9bzYSWfmVxXwdiruCm3puKJyldKEdchkQsBxNDJw6xWTqQX0tJpD", - "iFMcUrXwWID4FmIcUN6kdhGgRe+1yXpAavtGeKKIgNMImY2Lm3AdNzj6T2PZ1XDhsPWlT5mNm6zIl/VR", - "jQ1pgoKimoeihYPAYITvBPw2Bya6xbLi1dfPoSJRt5QGrn78Y1q0z3LrcD1PdFscbPsus/iT2pa3v7ad", - "JaujoiTXIb5KhDaToNYIIOKojYPdI5E9N2TC9REUNf5gBeDnfTUal69jr7zvXrm7XUjdzcdtl3hv+Tsj", - "wTYfr3SCv8mH9ZupgI92DhbkRd/dCkr4sMmcrzSlPUlcOZDaxVVqEqzb20mo1Bh1SJKqhYvAd5bp1mbn", - "Pcd5h15k/MoxU4NHXyNq+2plmPb/kEQ65SM2N8jaw7WlPW2MjfSrq6f18BVjE9pEAtVwi9r1aKlW1BFY", - "VbPGFI8Bg8/GJU+z+kWqDerUNJn4BeW4AgGuUM06y3WlP620stJMmvfGnK9+YVEfKl01n88EmTW/1gf6", - "mjMqbQD36pkmzD1VQcGeswAygNUgyE30ZT/A6rCPc/w+HwGsZSxRLTefWUcpz+2zx3D3/JXLOEAnrguY", - "Rj3L4uMvq3bksGp5M1aVP3In+F7Cs/xnBUdroq0achZjdFdXWNKsi4SZoGpxqQWCDU4jWBBxnBk0BEkB", - "i4Cfi8Eh2P3TJzBTJx5t9RlhRNAQHV+cAZYkmOGp3rLrcxTTCQkXYUxsrPLS2S5ctX95ctYzlyzyZHhQ", - "vEABQFwWquOLM0iAY8sGBIP+bh9y/fKUMJzS4CjY6+9Aih8NBljiNtxhg0friNJ0CJLsLLIS97FpokEr", - "U86kAc7uYFArQ4GLJCPbv0njYTHitbVSaOr8LMdbLIXgOk3ATv9TN9gf7Gw0n7V5QXzDXjGcqRkX9AOB", - "aT7YEAifNegZM1a1y0RMbMMCZ4OjX6vY+uubT2+6gcySBGsV0YCrgFXKZZMKQyTCiJFbe7nxNz7uo0tj", - "k0CSkKKCmnEZkEizJIwUFv3pB4RFOKNzMmSWE5scL1jATY4EaQ5s4uiraGaGNrtvSJhI9ZhHixp08+62", - "dXegjVQBvHF9jjxhYdpQqMPHHU1eJBlyb0IowjBTRZodkxDphsAh5oS+98bCQ3yv39t9mr9zFV2qvF2r", - "u5SFcRYVArBaScN7x9pUhLApnm6IR194Bi3s/Muh0E7SMB4RE9aaLtSMM/OcjTOmMvM8FvxWEqHlkb2S", - "YcGizea8EpfJn0cTuBZhLnHqMbfNFLc/3pDFp/6QHUeJu3Rr07jiWHKb+8oEKFCJ8mTCQ+bVoOUI635G", - "Y1dSrKaoEuhqGGhROQz081RgrZJlcoZwCAEJ+scycDoGm7kAcbdVn2uIGUp5msVaeYDtMcmxKn3A7TYc", - "x0gB/rhvtRAFmDSsR5JQEJ+t9LfLly8Q8E8orQLNiuhyWANlWvrlSWL1gP0he4LDGTKCEZInDgMaDYOi", - "hMYWCLFMEiObej2QrH+F2kJmmC6N/trv666M0D5Cv340vRxprEmTkeI3hA2DT11UejGlapaN83dvGhbc", - "4Ku5rKA86hiGtOXuA+sVlnizYWaYRYhbBhAvEEYFrZVNsjFlWCya6tHwTDXHu5jr0rZZcZfvYDDYWn+e", - "YZfqUVcqDTWmflqSzrtfTTBZobwsmEq157QYYPYufGTE8R1Ixsc4cle0fqoAa1QAa7uUhDt8bxXA7Y80", - "+mTQNyYmvrImoaFEkZPQKRY4IQqSVP/qx3kILaX6b3f6CL4GY8lXkbdbAk9doX+zhNj7jbWf8ipKgAv7", - "d4B/MG6RoQzGfXRX4+LY5MfN61HeK3SEzXKI2PVbH8+I+hEwbnBXrNQlUvyO+Htf8OcZsSpSAbQaN9uG", - "zPRl07Z+BUIQnEjbi2msbZlLmFPvkjCFoOqg7Nt/nZoN0eVvYz59e4QMCGNbc1Ha1Hi5D1gLRQtL+Mhk", - "Dsm/swl1whlmUyJRx8jP3//1b1c37vd//dvWjfv9X/8Gct+2VVChu7zi4dsj9HdC0h6O6Zy4xUDEJJkT", - "sUB7A1uLA1550vPIIRuyV0Rlgsk83kivC2BiOgSVncF6KMuIRBJACImzJzYQxriYPCaeo2UDyjul6O6S", - "pWtXUFqAlooOB+BkkzKqKI4Rz5TJcQnzgEs5xUTMmoPy4HVv2ZL/dD1/UeS9MtjbMxPckMGYiqEeujNF", - "NE2fqHN5+WSrj0DdN1gBwU5gNxTdWEug/5MnredJhqNUGQpA2fCmUmbGRl/bqW1zF862pqyNzd42ASnm", - "ibZd3WJ+qt0tPG9+uDkvnM8VduoyiTf7wj5/vb6Coq1syq+3zw73lmFu0+QXIPse1iTq2AzHeSKTSi7+", - "74X0d8KASyUcci6MuEmfcmcWzglnk5iGCvXcXGyRydzqqSLIfWEHr+ysEXbrqkfol0XFdiXgrFFo5LFn", - "dyk9aoNuIkaKWwQFrv2UJOtQ55TKkOtvS9jSC3EKgLRALOi0jEXrfDun8HsuclYq5nnZV0eQd+flsUNn", - "rC4b7oApntYY4ndkhLU0H6V7N/cJm6/yXXRVU1Y4gX4s1BzcnRZ01w4hH5rfJ49QVAOb5oKzPLF4E3rZ", - "1OPfcKPtCJ6FXxLhqNpM1KSXKJZlPkXhjIQ3ZkG2uM8qjeDM1f/59nqAyZ++gfS30/8p7lsYjgWsVhmL", - "ZzbnyLezFWGEjUzFr3f8aBHMA2SI0hg7R6pJ54HlgoVbf6gTyDuRDPViPPeIki6yOHaO+DkRqsgiX+an", - "2x+1ftBCT3bUtlIXuXr1vEdYyCEmx4CuUSFxSaO/rrZsNsws5SeatLGvAFQOMZqV0S/YfxM6hfJsjn/a", - "fWrzOf5p96nJ6PinvWOT03HrmyHL4K5Y811rr/cY+bTySqtAA9ZkUjuv0/byVnei8Nkc+puofPkEf2p9", - "bbS+MrhWKn55OYNvqPrZLPHf55wgRzYftOGViz/7g6l8d+t6shhZKvxX8cXbxCZcFJnZbdmw+xcgR3OM", - "K/Pflj7UgiBXagcOdc9OuzbpvkmVnweI35FH1c3jzrVEO+7du1OPkzGdZjyT5YB2qLFAZFGQtsKA75v+", - "WojnRg32B8bSwV2KjjtXUH/i/TdSnesbapi3rWG7Rnl2re5GeS6Oatprz26GP7XnVtpzCVyrtec8j+u3", - "VJ/NIN9Nf3b45gO4vcL8U4O+Cw1aZpMJDSlhqshBtBTVYlOY3cN7Jcw64Uun0RUm3FqDLpIrr1ZOLPJ+", - "j0iEfPC7V5xdorP7GR/LTUR85FTVQhg266o/Gj4M7pY5372Oep9R7Fm5np1fGzSXQ2I+XX81JO/J3YPw", - "3A0ZMlf87q1h6m9RjqhIcSRJTEKFbmc0nME9Ef0b9G+ukeA0fZtfDN06Qs8g/rR8VRUG70giKI4hYzqP", - "TbL/t/MkeXu0nDPi+vwcPjJXREx2iLdHyOWJyGlM6lblex96FTGWCr2wt1k6esMFj2OTnfmthmdpfVv2", - "Rkhxh3bIfLdDGLm1HdIJelu6KPK24aaIQ8Lnepe+E+V3m5Pjm7UojgQAztxZJyxquCWioea/I7Iz8KY+", - "anlfxUzjG19XWZrMcz7N8wtUUBmnaVv0tdMELJ4nyQocRp1SQQCpIp6pv0gVEWHq1VrsbkJu1MGh+UPh", - "G1NdtVJezpSg8IHK3r32giowNaRd5Qrz1zxJAlPrLsG+ShRffu+n3uGywah3pnS556fM2OTaTpXZl+7t", - "1CSHLYEC2Ua81uUr0+APr7m4WjHfGQ2/g6VXzIJCCRkWjRewt0URnvt1aQE2slgZyDu7Li+NuHeNNGJr", - "9/zhaaTAjz84lYRcQMFv6Qrw3Z/ospLFUSL3DlT8KippdZ3Ve31+vtVENKZmdCPJiJ/msA30/MPLFCiC", - "dv+oxdT/xPkCVjkLNUGoRhvd2ayVAoljnunel9KnQmEQuZCKJMZgn2Qx3LyDsHqbwACXC590EVUS0nB3", - "wWVVKnoxZGMy0fIwJUKPrT+H9GyF7eEzay8Vzsn3wtDgj2HXQkZVMOWwaoJarbpImrpkqj7bKc//+tlT", - "egqGarXwikSdmN6YaoJoLlGsH7ZWWrqmKsvXTs/w+ZSV1x3yXbs1OJsj8x+Bw53V2Jqrq3nv2NozUiYW", - "x39go/1sTa7la2LDwpQOdqUClf0hOydK6DZYEBTyOIZ6BEZ/304FD7ehaF6Y0shUz4PJAcNrfp3AiCcX", - "V9DOpIDvDpn+Y7lsW32irvrb2fbLNb4/U7Dzf7CeYxa4iiz8G/7TrbP5UUAjDckGEuXpKk2cpz8VcVuH", - "96fZei/NVjiLzVfTmQocglIsbaVlv4lqy5NtfzQPZ+tO9BUOZ9euWsSPoe3a5PLrhnELvBdEadcUEZMW", - "4O5pkuf5/+/p1S8NOLcEUGLKsQl+KWDqivzRsPvrx8mV4bhRlNyd0pZLufHD0NZdSz47BxeoVobHfSFz", - "g2luJZAAvex9EuUCZyttM1d/Cqrt5aqlq7vWLZf/Mxk+cx9SUTcmrzTWH7K8tJrLMKqtq64zrVBE5Y3p", - "wVpPfeSvgGfsPFsGb8gURyGOQ5N3Pi8FZ8o3ygbr61WpPOI3o7diEM9G5zXwZF6y7D6ZHH6cgN0r10QD", - "jLPq1Mr49Gvb5i6i060w2yA23a3gZ2R6i8j0ErDaVGAxBe0st7KVyPLyGVANqt9QSCVXSr5dXPtnyOuv", - "hx4OTxul9c+I9jtTCIoroWen9z+MvUxzFR69ra2Cni1vVHYNraJgC6JUkJ6r/xIZgFl4GFujXj2pP2Sv", - "Z8T9hagLpSSRraAfLxBlUPDGFcH7s0SCc1VU2G+usmRI5KngybFdzRrjpXU5SN9BzMb5KrqeEng0yZK8", - "WPyzx1D+WpjIPjTBNIa4UgdS8j4kJJKAk1v1MpPeUL+8nuTaWa6I0cwLSYWZVDxxe392ijo4U7w3JUzv", - "RVGzKRV8TqN6zeBKvU7fbMFC/ApG2vQDTaukt7bezTLhVfEW5UWqbMGdAj/d7gQ/xUQ9w7DebW3kOSAq", - "zlGMxZRs/RQl91mUlL1JTm5UJEq7C1HtHEwt/T7f4jJU7ny826tQ1z+OT6SUkfUeJgyY50Zf0x2sHwsF", - "B3cnH+767tX1PfahPyPOwC3du4IOdI8+hHnOQxyjiMxJzFMoRW3aBt0gE7EtrHu0vR3rdjMu1dHh4HAQ", - "fHrz6f8HAAD//wgwYbc14gAA", + "GmKoooQkYk5Evtew+f2SrA9jnkW90pDd4B1JtJibf9C4XczW09Jv8LUSBWt4PI5Tykgjk+/+KIz5loub", + "mOOot/OV+TIjSve9vMQX5kV1Ry0WkBwJgu6Sss+iWxqp2Sjit0xP2cOA7BuUN8650Hu9Ehz//q9/X58X", + "WsjOs3FqWdLO7oMvZEk1JqS79loY+UKy1L+Mq9S/iOvz3//1b7eS77sIwjR+RhXOY4z26lL+MSNqRkRJ", + "NLkN1j8ZFRE+Rw5fSsNXvABl1/0S9+RzImK88HDDnYGHHf5DUAX0Zb9DWqwh/fEaXqh7cxJsmRsO/OzQ", + "MynPnB5r+rbMuc1M8ons7J7bx922DFre0HQ01RrHCE9zL8aqQ5XLG5oi+KIHX5htjGNDvFGme0ZjzlV/", + "yP4xIwzB3sEGk/ckBD6lzTR0fHEm0S2NY7B5gBEsC4Ahe11iBaa5VPq/ImNdNM4UEiThimiDM9F960Ey", + "mAs0HhOUMexObfpDVoaKXWAdryxYbohgJB7NCI6IkC0hYz5C9qNG4MBSJ1gqIgyHztIqvE7/fn6JOqcL", + "hhMaor+bXs95lMUEXWappuGtKvS6Q5YKMicMtF2tNVA7Lp8gnqken/SUIMRNMYHOcqvRHinMn11c2UMp", + "udUfsldEA5awSBudXCAnJSRSM6xQxNmfNcWSqNptefwa0P203A3mYZpVobxbh/ALOArS65lToTIca5ZV", + "Ubu8J0PmzNGjppojzbK6bFlRjnBYVV36bc0F0zMcQC4rz34LwSgczRbCmvNXn6M99zqEmVQ8KbnbUafm", + "UKBV10OVecx53IuwwqAatNRfzHSXj66ShenKbEoTlxxNxx4vlWaGlKEpneLxQlUV7p3B8tb7Ae3694G6", + "6VjXoAeJRoqvPtiiE+TatvFjwyHwSPHRfEI9PedCs/CgUInC2hmyRVrdRS8NqSXfLrqdUS1mJXJAAAq+", + "Pi8bgv0h6wHLOUKn+QB5t3mXmrOCtwy66HBRmgQFxycaL7YQRtfnffQ6n+2fJWJY0Tlx59wzLNGYEIYy", + "UM9IBOMDOy1PIJOah1FV/9zyKnMkvgX2Lrfv+kgbFAm2fF+jd4IVDcHZNqa19cAhh9koPZJmAKwsdVpJ", + "iVXHga/IlEolaoeBqPPq6cne3t6jur6w+6A32OntPHi9Mzga6P//s/254dc/9ff1dVzlF9Z9WeYoJ1dn", + "p7tWOamOoz7s40eH799j9eiA3spHH5KxmP62h+8kLsDPnk4LvyvqZJKInmN9Gqt83taSU7PBm/rZTtKN", + "QhLcscwq8WNW91q3/BZBDL6jNHuQs3mYQZ0Jrj2MKy1uaT36V60fFJhfchBYn3dIvd79UypvHguCb7RV", + "6ZGvWjzLkZE7fodXpu2o8QKR91o9IxESnKuJNP6Cqpqys/9w/3DvYP9wMPCc3S8jMQ/pKNRSpdUEXp6c", + "oRgviEDwDeqAoRehcczHVeR9sHdw+HDwaGe37TyMmdQODrkW5b5CHQuRv7g4MPemMqnd3YcHe3t7g4OD", + "3f1Ws7IKXqtJOWWwojo83Hu4v3O4u98KCj6z84mLpaifDUceJD1O05gaI7snUxLSCQ0RRGMg/QHqJCCW", + "SG7xVWlyjKORsGqgVx4oTGMPGEquPzOYbWlCb5IsVjSNiXkHG9JK04WVn0JPPjcxZYyIUR5qskFPNgJl", + "rWfMrSVvgiqRRBXQnVMJmkWhEFESR0eGQtfyOdjNYmJvmvDArqElNjznt0T0YjIncRkJjDjSk024ICjH", + "E7NplVVRNscxjUaUpZkXJRpB+TQToF+aThEe80wZUx02rDwInJuBjTDR7LrdsW3hqF4aWtuZGzr+UsEn", + "NPYsA4xW+9aKdOcSe74/uOzt/B/wg71k8cLwAcqMoZvwiPRrwYrQvvXyLprmlEeKovLsltaUuyY87tHc", + "2nUQsUZ3iBkaE2TFpHHqgtukGKRg8I98DHMicELG2WRCxCjxWFpP9XtkGhgfFGXo/HGVaWrm3Fbduqhs", + "DuhbExzaQL920PdYcrVldEvQfOPfrlfExDY0hRLorRK2jY0m6KMXeWwuenZxJVHhTvKYeC1P7S5mC6mN", + "E9OjiQyirGyZAXK2ZsMXxYfWhvUw48TLgBwhoM58mmZAhpevemcvr7eTiMy7lTmBC2jGY6LnvVXSreYu", + "oKA4YqwcucybVGSDGLItAZVglVNwayCV6NUDHcUVjkcy5sozm9f6JYKXqHP91Bwk6xl0UVrZSv17CQoV", + "/D7wUozmSE3DXsKAdVu7QuBr3R6JEVvl5VUG9ZHKLwTHJpK/is9FbJrbeH5T3Wh+s5Z6bSe+cc/cqVtN", + "ciYe2+Xk/NRYZiFnClNGBEqIwvbeQOl4G6Isgm7Q08pAhEkCPtHJf60+8G7w3eTossr6P1kKA/4mln9D", + "qJtmcvGcRCjBjE6IVDbUrTKynOHdBwdHJsg2IpP9Bwf9ft9/wqPEIuXUF+P4JH/Xbiu2zflor+izL2df", + "tg/f4CC/zVo+BhfHr38JjoLtTIrtmIc43pZjyo5Kf+d/Fi/gwfw5pswbANAqLptOluKxK9ubapllfj/S", + "K2EkzBGSg5a41jfpl+QvNGrG9AOJkDcsSuEp0vo3YNyXxT99QSRzcZ1GlSKYy8cELaKZ6YfV5rZTjKCN", + "HTNjisZFoPeyof1ZofpyZeTjUtRjSlge6xjH5inkbK6pwhf4WGHg7t3SZtxycUPZdBRRD3b+w7xEERUk", + "VBBXsp6Ggm2cputR0a/85TytbRC3DeHySJfvzsk/x+FaHf3l9G/v/q+8ePjbzrvn19f/PX/2t9MX9L+v", + "44uXXxRysjp677uG4K08UwMvYyX0ri16nGMVehSfGZeqAWr2DVIcJfrjPjoBA+1oyHroOVVE4PgIDQOc", + "0r4FZj/kyTBAHfIeh8p8hThDuit7dLylP74wYTf644/OBvxU7yOyZ8TCAjkP55DZOOIJpmxryIbM9oXc", + "QiQc2uinCIU4VZkgeke0rhkv0FjgsDgbLgbvoo84TT9tDRlYouS9EnoFKRYqD/V1I8BG21mZQyHbnERo", + "juOMSGvJDlkuP8A0150oLKZE9XMXIjhqagczDUDxmhlcVGMbDgddzz4i3U5vZEylIgzlXgkqAXlRxwWp", + "HA4q5H84OFx//pjj0Ar0A+xevlzrkLIFfRgEhqENMx7NlErXhy8AvzE0gn55/fpCg0H/e4lcRwUs8i02", + "xhhO05gSaU7VVAw6iY0L2gp8J2dmd1su6LVprD+LW4RhPIGB0evnl0gRkVBm+Hcn1OCc0FCvD853qJSZ", + "RkWK0fHJ+ZOtfovbwQDbfP4r9vF1vsLaMYJzbi1bmPBF4TTX8O2is9OuVqcshRaKFpybPuUCxYbBFHR9", + "hK4kqUYxwFaZIx6zk/Gi8JAZrj4MtlyPaZ1THKFXuX6H86nkVxAKZHBdFnQJ3drAFnOou9R7tzpXOK62", + "9otlbXCEixWyTm8Qxc2sYDX5eyAONM9Z3fe4GW2XnZZ6MD9qFHv/zTWQvU1tyU3DuatBaaUgxDyi+/uG", + "Yn9OYLXboWcXVxC+jOVIMpzKGVfNwRkYuTaIvKdSyeU4tlbhBMuB3FXxZEK0VwQGfs2QbJExBpER9WV8", + "m2Dr7xlw8OMFeq8Mzf7S+GqrpX2j8OpGruALTa4yCPPz1w2U/ibTqYQ8+zhCWZi5aLDPjnLuBtQTCXMs", + "JZ0yEqGzi+L+X+H1cN3X1vRot79zcNjfGQz6O4M2PqAEhyvGPj8+aT/4YNdYxUd4fBRGR2TyBT4oi9hG", + "68DxLV5INHR64TAwimhJAy2RrdUdW53vLQeTf17seF0SrosO3yQavF2Y94qL+ZfVK/mtlYsH//yi2/uk", + "rSy+hMbuq9Em3lGCQp7FEfuzQmNNecYeIJE1WyRRRbYDINYrdsP4Lasu3TjJNP2+y4hYoOvz84pLVZCJ", + "vfjdYuE8TRv3gacbbcPuGh1v7WxKEdd3EWVd54QlCfTVY6rL/h8X3GGwroUfqNABvWellBlw671fsaaa", + "BR+R+SjLfIqOfuXCNK+uzk4rG47xwc7h4PBR73C8c9DbjwY7Pbyzd9DbfYAHk73w4V5DipT2sRKfH/5Q", + "pdDmsGgAPHjDTCR7dKRpKI9fGGcK5TfVNHGeaI0RlZRREwQMBuoro5fqHkC6hvpNvMj11ZUfX2BNqO7b", + "FP5a/cXlLFNaDYJv5CxTSP8FU9ZLsPr+6i4MzR+hFxy+sTPtakFZMxxMc8yi8WK5ed3I6NgwEEGk4oJE", + "MJhlYEfoac60crZn2VxHEvtoeKkNl4JQsC1jVVsd3+5W0A0s1INuYEAYdAMHGf1oVghPMPmgG9iJeCMt", + "y3jj8xYTHAMPKyIxMkVj+sGQnJ46lYqGxs7CsJtNZGevs5FoZERo03mOOd63Yjb/yFH19TnqwOWDvyBr", + "hum/tvKznzIJ7e8+2n908HD30UGr0MViguu58QkEnyxPbi1rDtNs5FJFNSz95OIKhI8WbDJLTKykXXth", + "u2nGEWptjzJU5J4qBn/Uf1SO2Ix4No5L7gYbsg1hgW0ShTUcdryj8ZxOJuzdh/Bm9zdBk533B3J37DWO", + "8oH8muRZ2UW2ZHaRcc9cOvYH1QFCCdkYd/qKSFgBuiQKAf70NMPSEjWPGbEo56JTLcS9iLW/t7d3+PDB", + "biu8srMrEc4I7L/lWZ7bGZRIDFqizqvLS7RdQjjTpwukSwWRenHmKoWXztAwGwz2CBpUYuy07bHnw5IG", + "haXAGtv3PGkE+bXVWOyiLNAh9CXXZpao3Avtvb3Bw/0Hhw/akbG1eEbi/WoOY9vZI2NBQkLnlZ3vgFv1", + "9fEF0r2LCQ6rGv7O7t7+g4OHhxvNSm00KyUwkwlVaqOJHT48eLC/t7vTLoDa5zq1VwMqBFvlXR6i8yCF", + "Zzc8oFhmvd0maeHTEpfj7VaG+BUxg/UAsU0iQovrYFRCr7QUjIg6WokqK6SlK01bbfwMfhapx2lKQKnV", + "xbbBmqtjMy+wmp2xCV/2jW9i8NmIF3cSkWrFR0JqrogwSiLHu3LLz+pSEEMTS4KijFjIGd1IYAtwbM4H", + "UqxmoKzCh5RNq9HDSwO2McPMHFZf/oNxbcM2HiPpj9J4LTKAlfHqSoSLeI1WLmoqR36rYrljQaZZjAWq", + "BySvmLJcJDFlN216l4tkzGMaIv1B3Zyf8DjmtyP9Sv4V1rLVanX6g1FxNFkzz83k7MG02ZDauMUS/qpX", + "uVULdQHJv22+34YMw20ccN6Q3afaeDMxu1eMvi8hevUmzf7uoCmyqaHTSkzTcrz3przdoqyP4l0o9nGe", + "ccJzLmaObGoWbFUPrqzXt1o42loVx7WsCaCO8+m5m0pVuJZuDLUSxO1O1+reazebbUnC6uj7hw8eHrS8", + "svVFqvaKHKxfoFjPkxUKdcNOnbfR2g4fHD56tLf/4NHuRvqRO+ho2J+mw47y/tQSy9R0tgcD+N9GkzJH", + "Hf4pNRx3VCdUSRLz2RP6tIJ0i1sUDVb3qvznxU46M7+qgLdTcVdoS8cVlauUMKxDJhMCjqORgVuvmEwt", + "qqfVHEKc4pCqhccCxLcQ6IDyJrXbAC16r03WA1LbN8ITRQScRshsXFyH67jB0X8ay66GC4etb37KbNxk", + "Rb6sj2psSBMZFNU8FC0cBAYjfMfgtzkw0S2WFa++fg4VibqlhHD14x/Ton2+W4frecrb4mDbd6PFn962", + "vP217SxZHRUluQ7xVSK0mQS1RgBhR20c7B6J7LkmE64Po6jxBysAP++r0bh8J3vlpffKBe5C6m4+brsU", + "fMvfGQm2+XilE/xNPqxfTwV8tHOwIC/67lZQwodN5nylKfdJ4gqD1G6vUpNq3V5RQqXGqEOSVC1cGL6z", + "TLc2O+85zjv0IuNXDpwaPPoaodtXK2O1/4dk0ykfsblB1h6uLe1pY4CkX109rYevGJvQZhOohlvU7khL", + "taKiwKrqNaaMDBh8Njh5mtVvU21QsabJxC8ox5UKcCVr1lmuK/1ppZWVZtK8N+Z89QvL+1Dp6vp8Jsis", + "+bU+2tecUWkDuFdPN2EuqwoK9pwFkAGsBkFuoi/7AVaHfZzj9/kIYC1jiWoJ+sw6Shlvnz2GC+ivXNoB", + "OnFdwDTqqRYff1ndI4dVy5uxqhCSO8H3Ep7lPys4WhNt1ZCzGKO7utaSZl0kzARVi0stEGxwGsGCiOPM", + "oCFIClgE/FwMDhHvnz6BmTrxaKvPCCOChuj44gywJMEMT/WWXZ+jmE5IuAhjYgOWl8524b79y5Oznrlp", + "kWfEgzIGCgDiUlEdX5xBFhxbQCAY9Hf7kPWXp4ThlAZHwV5/B/L8aDDAErfhIhs8WkeUpkOQZGeRlbiP", + "TRMNWplyJg1wdgeDWkEKXGQa2f5NGg+LEa+tlUJT8Wc53mIpDtdpAnb6n7rB/mBno/msTQ7iG/aK4UzN", + "uKAfCEzzwYZA+KxBz5ixql1OYmIbFjgbHP1axdZf33x60w1kliRYq4gGXAWsUi6bVBgiEUaM3Nobjr/x", + "cR9dGpsEMoUUtdSMy4BEmiVhpLDoTz8gLMIZnZMhs5zYJHrBAq5zJEhzYBNMX0UzM7TZfUPCRKrHPFrU", + "oJt3t627A22kCuCNK3XkWQvThpIdPu5okiPJkHuzQhGGmSpy7ZisSDcEDjEn9L03IB7ie/3e7tP8navt", + "UuXtWt2lLIyzqBCA1Zoa3ovWpjaEzfN0Qzz6wjNoYedfDoV2kobxiJiw1nShZpyZ52ycMZWZ57Hgt5II", + "LY/svQwLFm025zW5TBI9msDdCHOTU4+5baa4/fGGLD71h+w4StzNW5vLFceS2wRYJkCBSpRnFB4yrwYt", + "R1j3Mxq74mI1RZVAV8NAi8phoJ+nAmuVLJMzhEMISNA/loHTMdjMBYi7rfpcQ8xQytMs1soDbI/JkFXp", + "A6644ThGCvDHfauFKMCkYT2ShIL4bKW/Xb58gYB/QpEVaFZEl8MaKNPSL88UqwfsD9kTHM6QEYyQQXEY", + "0GgYFMU0tkCIZZIY2dTrgWT9K1QZMsN0afTXfl93ZYT2Efr1o+nlSGNNmowUvyFsGHzqotKLKVWzbJy/", + "e9Ow4AZfzWUF5VHHMKQtdylYr7DEmw0zwyxC3DKAeIEwKmitbJKNKcNi0VSZhmeqOd7F3Jm2zYoLfQeD", + "wdb68wy7VI+6UmmoMfXTknTe/WqCyQrlZcFUqkKnxQCzF+IjI47vQDI+xpG7p/VTBVijAljbpSTc4Xur", + "AG5/pNEng74xMfGVNQkNxYqchE6xwAlRkKn6Vz/OQ2gp1X+700fwNRhLvoq83RJ46gr9myXE3m+sApXX", + "UwJc2L8D/INxizRlMO6juxoXxyZJbl6Z8l6hI2yWQ8Su3/p4RtSPgHGDu2KlLpvid8Tf+4I/z4hVkQqg", + "1bjZNqSnL5u29SsQguBE2l5MY23LXMKcepeEKQT1B2Xf/uvUbIgufxvz6dsjZEAY2+qL0ubHy33AWiha", + "WMJHJn1I/p3NqhPOMJsSiTpGfv7+r3+7CnK//+vftoLc7//6N5D7tq2HCt3ltQ/fHqG/E5L2cEznxC0G", + "IibJnIgF2hvYghzwypOjRw7ZkL0iKhNM5vFGel0AE9MhqOwM1kNZRiSSAELInj2xgTDGxeQx8RwtG1De", + "KUV3lyxdu4LSArRUdDgAJ5uUUUVxjHimTKJLmAdcyikmYtYclAeve8uW/Kfr+Ysi75XB3p6Z4IYMxtQO", + "9dCdKadp+kSdy8snW30E6r7BCgh2Aruh6MZaAv2fPGk9TzIcpcpQAMqGN5XSMzb62k5tm7twtjWlbmz2", + "tgnIM0+07eoW81PtbuF588PNeeF8rrBTl0682Rf2+ev1lRZtZVN+vX12uLcMc5srvwDZ97AmUcemOc6z", + "mVQS8n8vpL8TBlyq45BzYcRNDpU7s3BOOJvENFSo5+Ziy03mVk8VQe4LO3hlZ42wW1c9Qr8sKrYrAWeN", + "QiOPPbtL6VEbdBMxUtwiKHDtpyRZhzqnVIZcf1vCll6IUwCkBWJBp2UsWufbOYXfc5GzUjHPC8A6grw7", + "L48dOmN12XAHTPG0xhC/IyOspfko3bu5T9h8le+iK52ywgn0Y6Hm4O60oLt2CPnQ/D55hKIa2DQXnOXZ", + "xZvQy+Yf/4YbbUfwLPySCEfVZqImvUSxLPMpCmckvDELshV+VmkEZ64I0LfXA0wS9Q2kv53+T3HfwnAs", + "YLXKWDyzOUe+na0II2xkKn6940eLYB4gQ5TG2DlSTToPLBcs3PpDnUDeiWSoV+S5R5R0kcWxc8TPiVBF", + "KvkyP93+qPWDFnqyo7aVusjVq+c9wkIOMTkGdI0Kicsc/XW1ZbNhZik/0aSNfQWgcojRrIx+wf6b0CmU", + "Z3P80+5Tm8/xT7tPTUbHP+0dm5yOW98MWQZ3xZrvWnu9x8inlVdaBRqwJpPfeZ22l7e6E4XPJtLfROXL", + "J/hT62uj9ZXBtVLxy2safEPVz6aK/z7nBDmy+aANr1z82R9M5btb15PFyFL1v4ov3iY24aJIz25rh92/", + "ADmaY1yZ/7b0oRYEuVI7cKh7dtq1mfdNvvw8QPyOPKpuHneuJdpx796depyM6TTjmSwHtEOhBSKLqrQV", + "Bnzf9NdCPDdqsD8wlg7uUnTcuYL6E++/kepc31DDvG0h2zXKs2t1N8pzcVTTXnt2M/ypPbfSnkvgWq09", + "53lcv6X6bAb5bvqzwzcfwO0V5p8a9F1o0DKbTGhICVNFDqKlqBabwuwe3ith1glfOo2uMOHWGnSRXHm1", + "cmKR93tEIuSD373i7BKd3c/4WG4i4iOnqhbCsFlX/dHwYXC3zPnuddT7jGLPykXt/NqguRwS8+n6qyF5", + "T+4ehOduyJC5CnhvDVN/i3JERYojSWISKnQ7o+EM7ono36B/c40Ep+nb/GLo1hF6BvGn5auqMHhHEkFx", + "DBnTeWyS/b+dJ8nbo+WcEdfn5/CRuSJiskO8PUIuT0ROY1K3Kt/70KuIsVTohb3N0tEbLngcm+zMbzU8", + "S+vbsjdCiju0Q+a7HcLIre2QTtDb0kWRtw03RRwSPte79J0ov9ucHN+sRXEkAHDmzjphUcMtEQ01/x2R", + "nYE39VHL+ypmGt/4usrSZJ7zaZ5foILKOE3boq+dJmDxPElW4DDqlAoCSBXxTP1FqogIU7TWYncTcqMO", + "Ds0fCt+YEquVGnOmBIUPVPbutRdUgSkk7SpXmL/mSRKYgncJ9lWi+PJ7P/UOlw1GvTOlyz0/ZcYm13aq", + "zL50b6cmOWwJFMg24rUuX5kGf3jNxdWK+c5o+B0svWIWFErIsGi8gL0tivDcr0sLsJHFykDe2XV5acS9", + "a6QRW7vnD08jBX78wakk5AKqfktXgO/+RJeVLI4SuXeg4ldRSavrrN7r8/OtJqIxhaMbSUb8NIdtoOcf", + "XqZAEbT7Ry2m/ifOF7DKWagJQjXa6M5mrRRIHPNM976UPhUKg8iFVCQxBvski+HmHYTV2wQGuFz4pIuo", + "kpCGuwsuq1LRiyEbk4mWhykRemz9OaRnK2wPn1l7qXBOvheGBn8MuxYyqoIph1UT1GrVRdLUJVP12U55", + "/tfPntJTMFSrhVck6sT0xlQTRHOJYv2wtdLSNVVZvnZ6hs+nrLzukO/arcHZHJn/CBzurMbWXF3Ne8fW", + "npEysTj+AxvtZ2tyLV8TGxamdLArFajsD9k5UUK3wYKgkMcx1CMw+vt2Kni4DUXzwpRGpnoeTA4YXvPr", + "BEY8ubiCdiYFfHfI9B/LZdvqE3XV3862X67x/ZmCnf+D9RyzwFVk4d/wn26dzY8CGmlINpAoT1dp4jz9", + "qYjbOrw/zdZ7abbCWWy+ms5U4BCUYmkrLftNVFuebPujeThbd6KvcDi7dtUifgxt1yaXXzeMW+C9IEq7", + "poiYtAB3T5M8z/9/T69+acC5JYASU45N8EsBU1fkj4bdXz9OrgzHjaLk7pS2XMqNH4a27lry2Tm4QLUy", + "PO4LmRtMcyuBBOhl75MoFzhbaZu5+lNQbS9XLV3dtW65/J/J8Jn7kIq6MXmlsf6Q5aXVXIZRbV11nWmF", + "IipvTA/WeuojfwU8Y+fZMnhDpjgKcRyavPN5KThTvlE2WF+vSuURvxm9FYN4NjqvgSfzkmX3yeTw4wTs", + "XrkmGmCcVadWxqdf2zZ3EZ1uhdkGseluBT8j01tEppeA1aYCiyloZ7mVrUSWl8+AalD9hkIquVLy7eLa", + "P0Nefz30cHjaKK1/RrTfmUJQXAk9O73/Yexlmqvw6G1tFfRseaOya2gVBVsQpYL0XP2XyADMwsPYGvXq", + "Sf0hez0j7i9EXSgliWwF/XiBKIOCN64I3p8lEpyrosJ+c5UlQyJPBU+O7WrWGC+ty0H6DmI2zlfR9ZTA", + "o0mW5MXinz2G8tfCRPahCaYxxJU6kJL3ISGRBJzcqpeZ9Ib65fUk185yRYxmXkgqzKTiidv7s1PUwZni", + "vSlhei+Kmk2p4HMa1WsGV+p1+mYLFuJXMNKmH2haJb219W6WCa+KtygvUmUL7hT46XYn+Ckm6hmG9W5r", + "I88BUXGOYiymZOunKLnPoqTsTXJyoyJR2l2Iaudgaun3+RaXoXLn491ehbr+cXwipYys9zBhwDw3+pru", + "YP1YKDi4O/lw13evru+xD/0ZcQZu6d4VdKB79CHMcx7iGEVkTmKeQilq0zboBpmIbWHdo+3tWLebcamO", + "DgeHg+DTm0//PwAA//+YHpPWP+IAAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/lib/resources/cpu.go b/lib/resources/cpu.go index 883cbff7..edac6e50 100644 --- a/lib/resources/cpu.go +++ b/lib/resources/cpu.go @@ -1,12 +1,7 @@ package resources import ( - "bufio" "context" - "fmt" - "os" - "strconv" - "strings" ) // CPUResource implements Resource for CPU discovery and tracking. @@ -15,7 +10,7 @@ type CPUResource struct { instanceLister InstanceLister } -// NewCPUResource discovers host CPU capacity from /proc/cpuinfo. +// NewCPUResource discovers host CPU capacity. func NewCPUResource() (*CPUResource, error) { capacity, err := detectCPUCapacity() if err != nil { @@ -59,78 +54,6 @@ func (c *CPUResource) Allocated(ctx context.Context) (int64, error) { return total, nil } -// detectCPUCapacity reads /proc/cpuinfo to determine total vCPU count. -// Returns threads × cores × sockets. -func detectCPUCapacity() (int64, error) { - file, err := os.Open("/proc/cpuinfo") - if err != nil { - return 0, fmt.Errorf("open /proc/cpuinfo: %w", err) - } - defer file.Close() - - var ( - siblings int - physicalIDs = make(map[int]bool) - hasSiblings bool - hasPhysicalID bool - ) - - scanner := bufio.NewScanner(file) - for scanner.Scan() { - line := scanner.Text() - - parts := strings.SplitN(line, ":", 2) - if len(parts) != 2 { - continue - } - - key := strings.TrimSpace(parts[0]) - value := strings.TrimSpace(parts[1]) - - switch key { - case "siblings": - if !hasSiblings { - siblings, _ = strconv.Atoi(value) - hasSiblings = true - } - case "physical id": - physicalID, _ := strconv.Atoi(value) - physicalIDs[physicalID] = true - hasPhysicalID = true - } - } - - if err := scanner.Err(); err != nil { - return 0, err - } - - // Calculate total vCPUs - if hasSiblings && hasPhysicalID { - // siblings = threads per socket, physicalIDs = number of sockets - sockets := len(physicalIDs) - if sockets < 1 { - sockets = 1 - } - return int64(siblings * sockets), nil - } - - // Fallback: count processor entries - file.Seek(0, 0) - scanner = bufio.NewScanner(file) - count := 0 - for scanner.Scan() { - if strings.HasPrefix(scanner.Text(), "processor") { - count++ - } - } - if count > 0 { - return int64(count), nil - } - - // Ultimate fallback - return 1, nil -} - // isActiveState returns true if the instance state indicates it's consuming resources. func isActiveState(state string) bool { switch state { diff --git a/lib/resources/cpu_darwin.go b/lib/resources/cpu_darwin.go new file mode 100644 index 00000000..8931af85 --- /dev/null +++ b/lib/resources/cpu_darwin.go @@ -0,0 +1,13 @@ +//go:build darwin + +package resources + +import ( + "runtime" +) + +// detectCPUCapacity returns the number of logical CPUs on macOS. +// Uses runtime.NumCPU() which calls sysctl on macOS. +func detectCPUCapacity() (int64, error) { + return int64(runtime.NumCPU()), nil +} diff --git a/lib/resources/cpu_linux.go b/lib/resources/cpu_linux.go new file mode 100644 index 00000000..606cd718 --- /dev/null +++ b/lib/resources/cpu_linux.go @@ -0,0 +1,83 @@ +//go:build linux + +package resources + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// detectCPUCapacity reads /proc/cpuinfo to determine total vCPU count. +// Returns threads × cores × sockets. +func detectCPUCapacity() (int64, error) { + file, err := os.Open("/proc/cpuinfo") + if err != nil { + return 0, fmt.Errorf("open /proc/cpuinfo: %w", err) + } + defer file.Close() + + var ( + siblings int + physicalIDs = make(map[int]bool) + hasSiblings bool + hasPhysicalID bool + ) + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + + parts := strings.SplitN(line, ":", 2) + if len(parts) != 2 { + continue + } + + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + + switch key { + case "siblings": + if !hasSiblings { + siblings, _ = strconv.Atoi(value) + hasSiblings = true + } + case "physical id": + physicalID, _ := strconv.Atoi(value) + physicalIDs[physicalID] = true + hasPhysicalID = true + } + } + + if err := scanner.Err(); err != nil { + return 0, err + } + + // Calculate total vCPUs + if hasSiblings && hasPhysicalID { + // siblings = threads per socket, physicalIDs = number of sockets + sockets := len(physicalIDs) + if sockets < 1 { + sockets = 1 + } + return int64(siblings * sockets), nil + } + + // Fallback: count processor entries + file.Seek(0, 0) + scanner = bufio.NewScanner(file) + count := 0 + for scanner.Scan() { + if strings.HasPrefix(scanner.Text(), "processor") { + count++ + } + } + if count > 0 { + return int64(count), nil + } + + // Ultimate fallback + return 1, nil +} diff --git a/lib/resources/disk.go b/lib/resources/disk.go index 2b6bf76d..087438d9 100644 --- a/lib/resources/disk.go +++ b/lib/resources/disk.go @@ -3,11 +3,8 @@ package resources import ( "context" "strings" - "syscall" "github.com/c2h5oh/datasize" - "github.com/kernel/hypeman/cmd/api/config" - "github.com/kernel/hypeman/lib/paths" ) // DiskResource implements Resource for disk space discovery and tracking. @@ -19,48 +16,17 @@ type DiskResource struct { volumeLister VolumeLister } -// NewDiskResource discovers disk capacity for the data directory. -// If cfg.DiskLimit is set, uses that as capacity; otherwise auto-detects via statfs. -func NewDiskResource(cfg *config.Config, p *paths.Paths, instLister InstanceLister, imgLister ImageLister, volLister VolumeLister) (*DiskResource, error) { - var capacity int64 - - if cfg.DiskLimit != "" { - // Parse configured limit - var ds datasize.ByteSize - if err := ds.UnmarshalText([]byte(cfg.DiskLimit)); err != nil { - return nil, err - } - capacity = int64(ds.Bytes()) - } else { - // Auto-detect from filesystem - var stat syscall.Statfs_t - if err := syscall.Statfs(cfg.DataDir, &stat); err != nil { - return nil, err - } - // Total space = blocks * block size - capacity = int64(stat.Blocks) * int64(stat.Bsize) - } - - return &DiskResource{ - capacity: capacity, - dataDir: cfg.DataDir, - instanceLister: instLister, - imageLister: imgLister, - volumeLister: volLister, - }, nil -} - // Type returns the resource type. func (d *DiskResource) Type() ResourceType { return ResourceDisk } -// Capacity returns the total disk space in bytes. +// Capacity returns the disk capacity in bytes. func (d *DiskResource) Capacity() int64 { return d.capacity } -// Allocated returns total disk space used by images, OCI cache, volumes, and overlays. +// Allocated returns currently allocated disk space. func (d *DiskResource) Allocated(ctx context.Context) (int64, error) { breakdown, err := d.GetBreakdown(ctx) if err != nil { @@ -73,13 +39,12 @@ func (d *DiskResource) Allocated(ctx context.Context) (int64, error) { func (d *DiskResource) GetBreakdown(ctx context.Context) (*DiskBreakdown, error) { var breakdown DiskBreakdown - // Get image sizes (exported rootfs disks) + // Get image sizes if d.imageLister != nil { imageBytes, err := d.imageLister.TotalImageBytes(ctx) if err == nil { breakdown.Images = imageBytes } - // Get OCI layer cache size ociCacheBytes, err := d.imageLister.TotalOCICacheBytes(ctx) if err == nil { breakdown.OCICache = ociCacheBytes @@ -94,7 +59,7 @@ func (d *DiskResource) GetBreakdown(ctx context.Context) (*DiskBreakdown, error) } } - // Get overlay sizes from instances (rootfs overlays + volume overlays) + // Get overlay sizes from instances if d.instanceLister != nil { instances, err := d.instanceLister.ListInstanceAllocations(ctx) if err == nil { diff --git a/lib/resources/disk_darwin.go b/lib/resources/disk_darwin.go new file mode 100644 index 00000000..8a5d32f3 --- /dev/null +++ b/lib/resources/disk_darwin.go @@ -0,0 +1,49 @@ +//go:build darwin + +package resources + +import ( + "os" + + "github.com/c2h5oh/datasize" + "github.com/kernel/hypeman/cmd/api/config" + "github.com/kernel/hypeman/lib/paths" + "golang.org/x/sys/unix" +) + +// NewDiskResource discovers disk capacity on macOS. +func NewDiskResource(cfg *config.Config, p *paths.Paths, instLister InstanceLister, imgLister ImageLister, volLister VolumeLister) (*DiskResource, error) { + var capacity int64 + + if cfg.DiskLimit != "" { + // Parse configured limit + var ds datasize.ByteSize + if err := ds.UnmarshalText([]byte(cfg.DiskLimit)); err != nil { + return nil, err + } + capacity = int64(ds.Bytes()) + } else { + // Auto-detect from filesystem using statfs + var stat unix.Statfs_t + dataDir := cfg.DataDir + if err := unix.Statfs(dataDir, &stat); err != nil { + // Fallback: try to stat the root if data dir doesn't exist yet + if os.IsNotExist(err) { + if err := unix.Statfs("/", &stat); err != nil { + return nil, err + } + } else { + return nil, err + } + } + capacity = int64(stat.Blocks) * int64(stat.Bsize) + } + + return &DiskResource{ + capacity: capacity, + dataDir: cfg.DataDir, + instanceLister: instLister, + imageLister: imgLister, + volumeLister: volLister, + }, nil +} diff --git a/lib/resources/disk_linux.go b/lib/resources/disk_linux.go new file mode 100644 index 00000000..e6cc8fb1 --- /dev/null +++ b/lib/resources/disk_linux.go @@ -0,0 +1,42 @@ +//go:build linux + +package resources + +import ( + "syscall" + + "github.com/c2h5oh/datasize" + "github.com/kernel/hypeman/cmd/api/config" + "github.com/kernel/hypeman/lib/paths" +) + +// NewDiskResource discovers disk capacity for the data directory. +// If cfg.DiskLimit is set, uses that as capacity; otherwise auto-detects via statfs. +func NewDiskResource(cfg *config.Config, p *paths.Paths, instLister InstanceLister, imgLister ImageLister, volLister VolumeLister) (*DiskResource, error) { + var capacity int64 + + if cfg.DiskLimit != "" { + // Parse configured limit + var ds datasize.ByteSize + if err := ds.UnmarshalText([]byte(cfg.DiskLimit)); err != nil { + return nil, err + } + capacity = int64(ds.Bytes()) + } else { + // Auto-detect from filesystem + var stat syscall.Statfs_t + if err := syscall.Statfs(cfg.DataDir, &stat); err != nil { + return nil, err + } + // Total space = blocks * block size + capacity = int64(stat.Blocks) * int64(stat.Bsize) + } + + return &DiskResource{ + capacity: capacity, + dataDir: cfg.DataDir, + instanceLister: instLister, + imageLister: imgLister, + volumeLister: volLister, + }, nil +} diff --git a/lib/resources/memory.go b/lib/resources/memory.go index 52cebd78..0e334cff 100644 --- a/lib/resources/memory.go +++ b/lib/resources/memory.go @@ -1,12 +1,7 @@ package resources import ( - "bufio" "context" - "fmt" - "os" - "strconv" - "strings" ) // MemoryResource implements Resource for memory discovery and tracking. @@ -15,7 +10,7 @@ type MemoryResource struct { instanceLister InstanceLister } -// NewMemoryResource discovers host memory capacity from /proc/meminfo. +// NewMemoryResource discovers host memory capacity. func NewMemoryResource() (*MemoryResource, error) { capacity, err := detectMemoryCapacity() if err != nil { @@ -58,34 +53,3 @@ func (m *MemoryResource) Allocated(ctx context.Context) (int64, error) { } return total, nil } - -// detectMemoryCapacity reads /proc/meminfo to determine total memory. -func detectMemoryCapacity() (int64, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return 0, err - } - defer file.Close() - - scanner := bufio.NewScanner(file) - for scanner.Scan() { - line := scanner.Text() - if strings.HasPrefix(line, "MemTotal:") { - // Format: "MemTotal: 16384000 kB" - fields := strings.Fields(line) - if len(fields) >= 2 { - kb, err := strconv.ParseInt(fields[1], 10, 64) - if err != nil { - return 0, fmt.Errorf("parse MemTotal: %w", err) - } - return kb * 1024, nil // Convert KB to bytes - } - } - } - - if err := scanner.Err(); err != nil { - return 0, err - } - - return 0, fmt.Errorf("MemTotal not found in /proc/meminfo") -} diff --git a/lib/resources/memory_darwin.go b/lib/resources/memory_darwin.go new file mode 100644 index 00000000..01989aa9 --- /dev/null +++ b/lib/resources/memory_darwin.go @@ -0,0 +1,17 @@ +//go:build darwin + +package resources + +import ( + "golang.org/x/sys/unix" +) + +// detectMemoryCapacity returns total physical memory on macOS using sysctl. +func detectMemoryCapacity() (int64, error) { + // Use sysctl to get hw.memsize + memsize, err := unix.SysctlUint64("hw.memsize") + if err != nil { + return 0, err + } + return int64(memsize), nil +} diff --git a/lib/resources/memory_linux.go b/lib/resources/memory_linux.go new file mode 100644 index 00000000..1ed59d26 --- /dev/null +++ b/lib/resources/memory_linux.go @@ -0,0 +1,42 @@ +//go:build linux + +package resources + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// detectMemoryCapacity reads /proc/meminfo to determine total memory. +func detectMemoryCapacity() (int64, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return 0, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "MemTotal:") { + // Format: "MemTotal: 16384000 kB" + fields := strings.Fields(line) + if len(fields) >= 2 { + kb, err := strconv.ParseInt(fields[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("parse MemTotal: %w", err) + } + return kb * 1024, nil // Convert KB to bytes + } + } + } + + if err := scanner.Err(); err != nil { + return 0, err + } + + return 0, fmt.Errorf("MemTotal not found in /proc/meminfo") +} diff --git a/lib/resources/network_darwin.go b/lib/resources/network_darwin.go new file mode 100644 index 00000000..4e662975 --- /dev/null +++ b/lib/resources/network_darwin.go @@ -0,0 +1,49 @@ +//go:build darwin + +package resources + +import ( + "context" + + "github.com/kernel/hypeman/cmd/api/config" +) + +// NetworkResource implements Resource for network bandwidth discovery and tracking. +// On macOS, network rate limiting is not supported. +type NetworkResource struct { + capacity int64 // bytes per second (set to high value on macOS) + instanceLister InstanceLister +} + +// NewNetworkResource creates a network resource on macOS. +// Network capacity detection and rate limiting are not supported on macOS. +func NewNetworkResource(ctx context.Context, cfg *config.Config, instLister InstanceLister) (*NetworkResource, error) { + // Default to 10 Gbps as a reasonable high limit on macOS + // Network rate limiting is not enforced on macOS + return &NetworkResource{ + capacity: 10 * 1024 * 1024 * 1024 / 8, // 10 Gbps in bytes/sec + instanceLister: instLister, + }, nil +} + +// Type returns the resource type. +func (n *NetworkResource) Type() ResourceType { + return ResourceNetwork +} + +// Capacity returns the network capacity in bytes per second. +func (n *NetworkResource) Capacity() int64 { + return n.capacity +} + +// Allocated returns currently allocated network bandwidth. +// On macOS, this is always 0 as rate limiting is not supported. +func (n *NetworkResource) Allocated(ctx context.Context) (int64, error) { + return 0, nil +} + +// AvailableFor returns available network bandwidth. +// On macOS, this always returns the full capacity. +func (n *NetworkResource) AvailableFor(ctx context.Context, requested int64) (int64, error) { + return n.capacity, nil +} diff --git a/lib/resources/network.go b/lib/resources/network_linux.go similarity index 73% rename from lib/resources/network.go rename to lib/resources/network_linux.go index 41ba3d8e..6fa285f1 100644 --- a/lib/resources/network.go +++ b/lib/resources/network_linux.go @@ -1,3 +1,5 @@ +//go:build linux + package resources import ( @@ -7,7 +9,6 @@ import ( "strconv" "strings" - "github.com/c2h5oh/datasize" "github.com/kernel/hypeman/cmd/api/config" "github.com/kernel/hypeman/lib/logger" "github.com/vishvananda/netlink" @@ -139,50 +140,3 @@ func getInterfaceSpeed(iface string) (int64, error) { return speed, nil } - -// ParseBandwidth parses a bandwidth string like "10Gbps", "1GB/s", "125MB/s". -// Handles both bit-based (bps) and byte-based (/s) formats. -// Returns bytes per second. -func ParseBandwidth(limit string) (int64, error) { - limit = strings.TrimSpace(limit) - limit = strings.ToLower(limit) - - // Handle bps variants (bits per second) - if strings.HasSuffix(limit, "bps") { - // Remove "bps" suffix - numPart := strings.TrimSuffix(limit, "bps") - numPart = strings.TrimSpace(numPart) - - // Check for multiplier prefix - var multiplier int64 = 1 - if strings.HasSuffix(numPart, "g") { - multiplier = 1000 * 1000 * 1000 - numPart = strings.TrimSuffix(numPart, "g") - } else if strings.HasSuffix(numPart, "m") { - multiplier = 1000 * 1000 - numPart = strings.TrimSuffix(numPart, "m") - } else if strings.HasSuffix(numPart, "k") { - multiplier = 1000 - numPart = strings.TrimSuffix(numPart, "k") - } - - bits, err := strconv.ParseInt(strings.TrimSpace(numPart), 10, 64) - if err != nil { - return 0, fmt.Errorf("invalid number: %s", numPart) - } - - // Convert bits to bytes - return (bits * multiplier) / 8, nil - } - - // Handle byte-based variants (e.g., "125MB/s", "1GB") - limit = strings.TrimSuffix(limit, "/s") - limit = strings.TrimSuffix(limit, "ps") - - var ds datasize.ByteSize - if err := ds.UnmarshalText([]byte(limit)); err != nil { - return 0, fmt.Errorf("parse as bytes: %w", err) - } - - return int64(ds.Bytes()), nil -} diff --git a/lib/resources/resource_test.go b/lib/resources/resource_test.go index 7868d9b3..3fb9d66a 100644 --- a/lib/resources/resource_test.go +++ b/lib/resources/resource_test.go @@ -2,6 +2,7 @@ package resources import ( "context" + "runtime" "testing" "github.com/kernel/hypeman/cmd/api/config" @@ -353,6 +354,9 @@ func TestGetFullStatus_ReturnsAllResourceAllocations(t *testing.T) { // TestNetworkResource_Allocated verifies network allocation tracking // uses max(download, upload) since they share the physical link. func TestNetworkResource_Allocated(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("network rate limiting not supported on this platform") + } cfg := &config.Config{ DataDir: t.TempDir(), NetworkLimit: "1Gbps", // 125MB/s diff --git a/lib/resources/util.go b/lib/resources/util.go new file mode 100644 index 00000000..619037c8 --- /dev/null +++ b/lib/resources/util.go @@ -0,0 +1,56 @@ +package resources + +import ( + "fmt" + "strconv" + "strings" + + "github.com/c2h5oh/datasize" +) + +// ParseBandwidth parses a bandwidth string like "10Gbps", "1GB/s", "125MB/s". +// Handles both bit-based (bps) and byte-based (/s) formats. +// Returns bytes per second. +func ParseBandwidth(limit string) (int64, error) { + limit = strings.TrimSpace(limit) + limit = strings.ToLower(limit) + + // Handle bps variants (bits per second) + if strings.HasSuffix(limit, "bps") { + // Remove "bps" suffix + numPart := strings.TrimSuffix(limit, "bps") + numPart = strings.TrimSpace(numPart) + + // Check for multiplier prefix + var multiplier int64 = 1 + if strings.HasSuffix(numPart, "g") { + multiplier = 1000 * 1000 * 1000 + numPart = strings.TrimSuffix(numPart, "g") + } else if strings.HasSuffix(numPart, "m") { + multiplier = 1000 * 1000 + numPart = strings.TrimSuffix(numPart, "m") + } else if strings.HasSuffix(numPart, "k") { + multiplier = 1000 + numPart = strings.TrimSuffix(numPart, "k") + } + + bits, err := strconv.ParseInt(strings.TrimSpace(numPart), 10, 64) + if err != nil { + return 0, fmt.Errorf("invalid number: %s", numPart) + } + + // Convert bits to bytes + return (bits * multiplier) / 8, nil + } + + // Handle byte-based variants (e.g., "125MB/s", "1GB") + limit = strings.TrimSuffix(limit, "/s") + limit = strings.TrimSuffix(limit, "ps") + + var ds datasize.ByteSize + if err := ds.UnmarshalText([]byte(limit)); err != nil { + return 0, fmt.Errorf("parse as bytes: %w", err) + } + + return int64(ds.Bytes()), nil +} diff --git a/lib/system/init/logger.go b/lib/system/init/logger.go index 6d0a5217..588c8bfb 100644 --- a/lib/system/init/logger.go +++ b/lib/system/init/logger.go @@ -17,12 +17,17 @@ func NewLogger() *Logger { l := &Logger{} // Open serial console for output - // ttyS0 for x86_64, ttyAMA0 for ARM64 (PL011 UART) - if f, err := os.OpenFile("/dev/ttyAMA0", os.O_WRONLY, 0); err == nil { - l.console = f - } else if f, err := os.OpenFile("/dev/ttyS0", os.O_WRONLY, 0); err == nil { - l.console = f - } else { + // hvc0 for Virtualization.framework (vz) on macOS + // ttyAMA0 for ARM64 PL011 UART (cloud-hypervisor) + // ttyS0 for x86_64 (QEMU, cloud-hypervisor) + consoles := []string{"/dev/hvc0", "/dev/ttyAMA0", "/dev/ttyS0"} + for _, console := range consoles { + if f, err := os.OpenFile(console, os.O_WRONLY, 0); err == nil { + l.console = f + break + } + } + if l.console == nil { // Fallback to stdout l.console = os.Stdout } diff --git a/lib/system/init/mount.go b/lib/system/init/mount.go index 50ebc079..07894d01 100644 --- a/lib/system/init/mount.go +++ b/lib/system/init/mount.go @@ -49,16 +49,20 @@ func mountEssentials(log *Logger) error { log.Info("mount", "mounted devpts/shm") // Set up serial console now that /dev is mounted - // ttyS0 for x86_64, ttyAMA0 for ARM64 (PL011 UART) - if _, err := os.Stat("/dev/ttyAMA0"); err == nil { - log.SetConsole("/dev/ttyAMA0") - redirectToConsole("/dev/ttyAMA0") - } else if _, err := os.Stat("/dev/ttyS0"); err == nil { - log.SetConsole("/dev/ttyS0") - redirectToConsole("/dev/ttyS0") + // hvc0 for Virtualization.framework (vz) on macOS + // ttyAMA0 for ARM64 PL011 UART (cloud-hypervisor) + // ttyS0 for x86_64 (QEMU, cloud-hypervisor) + consoles := []string{"/dev/hvc0", "/dev/ttyAMA0", "/dev/ttyS0"} + for _, console := range consoles { + if _, err := os.Stat(console); err == nil { + log.SetConsole(console) + redirectToConsole(console) + log.Info("mount", "using console "+console) + break + } } - log.Info("mount", "redirected to serial console") + log.Info("mount", "console setup complete") return nil } diff --git a/lib/system/initrd.go b/lib/system/initrd.go index e3891bdf..3ef4c103 100644 --- a/lib/system/initrd.go +++ b/lib/system/initrd.go @@ -35,14 +35,14 @@ func (m *manager) buildInitrd(ctx context.Context, arch string) (string, error) return "", fmt.Errorf("create oci client: %w", err) } - // Inspect Alpine base to get digest - digest, err := ociClient.InspectManifest(ctx, alpineBaseImage) + // Inspect Alpine base to get digest (always use Linux platform since this is for guest VMs) + digest, err := ociClient.InspectManifestForLinux(ctx, alpineBaseImage) if err != nil { return "", fmt.Errorf("inspect alpine manifest: %w", err) } - // Pull and unpack Alpine base - if err := ociClient.PullAndUnpack(ctx, alpineBaseImage, digest, rootfsDir); err != nil { + // Pull and unpack Alpine base (always use Linux platform since this is for guest VMs) + if err := ociClient.PullAndUnpackForLinux(ctx, alpineBaseImage, digest, rootfsDir); err != nil { return "", fmt.Errorf("pull alpine base: %w", err) } @@ -71,7 +71,7 @@ func (m *manager) buildInitrd(ctx context.Context, arch string) (string, error) } // Download and add kernel headers tarball (for DKMS support) - if err := downloadKernelHeaders(arch, rootfsDir); err != nil { + if err := downloadKernelHeaders(ctx, arch, rootfsDir); err != nil { return "", fmt.Errorf("download kernel headers: %w", err) } @@ -162,7 +162,7 @@ func computeInitrdHash(arch string) string { } // downloadKernelHeaders downloads kernel headers tarball and adds it to the initrd rootfs -func downloadKernelHeaders(arch, rootfsDir string) error { +func downloadKernelHeaders(ctx context.Context, arch, rootfsDir string) error { url, ok := KernelHeaderURLs[DefaultKernelVersion][arch] if !ok { // No headers available for this arch, skip (non-fatal) @@ -178,7 +178,12 @@ func downloadKernelHeaders(arch, rootfsDir string) error { }, } - resp, err := client.Get(url) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return fmt.Errorf("create request: %w", err) + } + + resp, err := client.Do(req) if err != nil { return fmt.Errorf("http get: %w", err) } diff --git a/lib/vm_metrics/collector_darwin.go b/lib/vm_metrics/collector_darwin.go new file mode 100644 index 00000000..1b2a1405 --- /dev/null +++ b/lib/vm_metrics/collector_darwin.go @@ -0,0 +1,20 @@ +//go:build darwin + +package vm_metrics + +import "fmt" + +// ReadProcStat is not available on macOS (/proc does not exist). +func ReadProcStat(pid int) (uint64, error) { + return 0, fmt.Errorf("read proc stat: not supported on macOS") +} + +// ReadProcStatm is not available on macOS (/proc does not exist). +func ReadProcStatm(pid int) (rssBytes, vmsBytes uint64, err error) { + return 0, 0, fmt.Errorf("read proc statm: not supported on macOS") +} + +// ReadTAPStats is not available on macOS (/sys does not exist). +func ReadTAPStats(tapName string) (rxBytes, txBytes uint64, err error) { + return 0, 0, fmt.Errorf("read TAP stats: not supported on macOS") +} diff --git a/lib/vm_metrics/collector.go b/lib/vm_metrics/collector_linux.go similarity index 99% rename from lib/vm_metrics/collector.go rename to lib/vm_metrics/collector_linux.go index f9ba89b9..507627d3 100644 --- a/lib/vm_metrics/collector.go +++ b/lib/vm_metrics/collector_linux.go @@ -1,3 +1,5 @@ +//go:build linux + package vm_metrics import ( diff --git a/lib/vm_metrics/collector_test.go b/lib/vm_metrics/collector_test.go index 6905a35b..5c4ca688 100644 --- a/lib/vm_metrics/collector_test.go +++ b/lib/vm_metrics/collector_test.go @@ -1,3 +1,5 @@ +//go:build linux + package vm_metrics import ( diff --git a/lib/vm_metrics/manager_test.go b/lib/vm_metrics/manager_test.go index 851771e7..67a94c45 100644 --- a/lib/vm_metrics/manager_test.go +++ b/lib/vm_metrics/manager_test.go @@ -1,3 +1,5 @@ +//go:build linux + package vm_metrics import ( diff --git a/lib/vm_metrics/metrics_test.go b/lib/vm_metrics/metrics_test.go index 65bee0d3..c14b4a0b 100644 --- a/lib/vm_metrics/metrics_test.go +++ b/lib/vm_metrics/metrics_test.go @@ -1,3 +1,5 @@ +//go:build linux + package vm_metrics import ( diff --git a/lib/vmm/binaries_darwin.go b/lib/vmm/binaries_darwin.go new file mode 100644 index 00000000..370c027c --- /dev/null +++ b/lib/vmm/binaries_darwin.go @@ -0,0 +1,34 @@ +//go:build darwin + +package vmm + +import ( + "fmt" + + "github.com/kernel/hypeman/lib/paths" +) + +// CHVersion represents Cloud Hypervisor version +type CHVersion string + +const ( + V48_0 CHVersion = "v48.0" + V49_0 CHVersion = "v49.0" +) + +// SupportedVersions lists supported Cloud Hypervisor versions. +// On macOS, Cloud Hypervisor is not supported (use vz instead). +var SupportedVersions = []CHVersion{} + +// ErrNotSupportedOnMacOS indicates Cloud Hypervisor is not available on macOS +var ErrNotSupportedOnMacOS = fmt.Errorf("cloud-hypervisor is not supported on macOS; use vz hypervisor instead") + +// ExtractBinary is not supported on macOS +func ExtractBinary(p *paths.Paths, version CHVersion) (string, error) { + return "", ErrNotSupportedOnMacOS +} + +// GetBinaryPath is not supported on macOS +func GetBinaryPath(p *paths.Paths, version CHVersion) (string, error) { + return "", ErrNotSupportedOnMacOS +} diff --git a/lib/vmm/binaries.go b/lib/vmm/binaries_linux.go similarity index 98% rename from lib/vmm/binaries.go rename to lib/vmm/binaries_linux.go index 319884a2..73064a41 100644 --- a/lib/vmm/binaries.go +++ b/lib/vmm/binaries_linux.go @@ -1,3 +1,5 @@ +//go:build linux + package vmm import ( diff --git a/lib/vmm/client_test.go b/lib/vmm/client_test.go index 2162551a..33febedb 100644 --- a/lib/vmm/client_test.go +++ b/lib/vmm/client_test.go @@ -1,3 +1,5 @@ +//go:build linux + package vmm import ( diff --git a/openapi.yaml b/openapi.yaml index d87dedd5..4d13dcef 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -176,7 +176,7 @@ components: $ref: "#/components/schemas/VolumeMount" hypervisor: type: string - enum: [cloud-hypervisor, qemu] + enum: [cloud-hypervisor, qemu, vz] description: Hypervisor to use for this instance. Defaults to server configuration. example: cloud-hypervisor skip_kernel_headers: @@ -306,7 +306,7 @@ components: example: false hypervisor: type: string - enum: [cloud-hypervisor, qemu] + enum: [cloud-hypervisor, qemu, vz] description: Hypervisor running this instance example: cloud-hypervisor From 83ca39ee6fc9a814f57c4b0610a30e778537a450 Mon Sep 17 00:00:00 2001 From: Rafael Garcia Date: Tue, 10 Feb 2026 16:44:29 -0500 Subject: [PATCH 2/5] fix(builds): prevent log loss and push builder image to registry - Restore persisting result.Logs to disk after build completion, since streamed log lines can be dropped when the bounded channel overflows - Add docker tag + push after building the builder image locally so it is available in the registry for builder VMs to pull - Synchronize log streaming with build result delivery by waiting for logsDone channel before sending build_result, preventing the host from closing the connection before all logs are delivered Co-Authored-By: Claude Opus 4.6 --- lib/builds/builder_agent/main.go | 6 ++++++ lib/builds/manager.go | 33 +++++++++++++++++++++++++++++--- lib/builds/storage.go | 12 ++++++++++++ 3 files changed, 48 insertions(+), 3 deletions(-) diff --git a/lib/builds/builder_agent/main.go b/lib/builds/builder_agent/main.go index f85322fb..a6bca84d 100644 --- a/lib/builds/builder_agent/main.go +++ b/lib/builds/builder_agent/main.go @@ -268,7 +268,9 @@ func handleHostConnection(conn net.Conn) { }) // Start streaming logs to host + logsDone := make(chan struct{}) go func() { + defer close(logsDone) for logLine := range logChan { encoderLock.Lock() err := encoder.Encode(VsockMessage{Type: "log", Log: logLine}) @@ -283,6 +285,10 @@ func handleHostConnection(conn net.Conn) { // Wait for build to complete and send result to host go func() { <-buildDone + // Wait for all buffered log messages to be sent before sending the result. + // This prevents the host from receiving build_result before all logs, + // which would cause it to close the connection and lose remaining logs. + <-logsDone buildResultLock.Lock() result := buildResult diff --git a/lib/builds/manager.go b/lib/builds/manager.go index b4eb6097..43f8463c 100644 --- a/lib/builds/manager.go +++ b/lib/builds/manager.go @@ -233,6 +233,28 @@ func (m *manager) ensureBuilderImage(ctx context.Context) { } m.logger.Info("builder image built successfully", "image", builderImage) + + // Tag the image with the registry prefix so it can be pushed + tagCmd := exec.CommandContext(ctx, "docker", "tag", builderImage, imageRef) + tagCmd.Env = append(os.Environ(), fmt.Sprintf("DOCKER_HOST=unix://%s", dockerSocket)) + if tagOutput, err := tagCmd.CombinedOutput(); err != nil { + m.logger.Warn("failed to tag builder image for registry", + "error", err, + "output", string(tagOutput)) + return + } + + // Push the image to the registry so builder VMs can pull it + pushCmd := exec.CommandContext(ctx, "docker", "push", imageRef) + pushCmd.Env = append(os.Environ(), fmt.Sprintf("DOCKER_HOST=unix://%s", dockerSocket)) + if pushOutput, err := pushCmd.CombinedOutput(); err != nil { + m.logger.Warn("failed to push builder image to registry", + "error", err, + "output", string(pushOutput)) + return + } + + m.logger.Info("builder image pushed to registry", "image", imageRef) } // CreateBuild starts a new build job @@ -395,9 +417,14 @@ func (m *manager) runBuild(ctx context.Context, id string, req CreateBuildReques return } - // Note: Logs are now streamed via vsock "log" messages and written incrementally - // in waitForResult, so we no longer need to save them here. - // The result.Logs field is kept for backward compatibility but is redundant. + // Save complete build logs from result.Logs as the authoritative log file. + // Streamed "log" messages may have dropped lines due to channel overflow, + // so we overwrite with the complete buffer to ensure no logs are lost. + if result.Logs != "" { + if err := writeLog(m.paths, id, []byte(result.Logs)); err != nil { + m.logger.Warn("failed to save build logs", "id", id, "error", err) + } + } if !result.Success { m.logger.Error("build failed", "id", id, "error", result.Error, "duration", duration) diff --git a/lib/builds/storage.go b/lib/builds/storage.go index 7f65fccc..24b97588 100644 --- a/lib/builds/storage.go +++ b/lib/builds/storage.go @@ -191,6 +191,18 @@ func appendLog(p *paths.Paths, id string, data []byte) error { return nil } +// writeLog writes the complete build log file, replacing any existing content. +// This is used to persist the authoritative complete logs from result.Logs, +// which may contain lines that were dropped during streaming due to channel overflow. +func writeLog(p *paths.Paths, id string, data []byte) error { + if err := ensureLogsDir(p, id); err != nil { + return err + } + + logPath := p.BuildLog(id) + return os.WriteFile(logPath, data, 0644) +} + // readLog reads the build log file func readLog(p *paths.Paths, id string) ([]byte, error) { logPath := p.BuildLog(id) From 365821845c0a7b52419c262d698bf57f33228706 Mon Sep 17 00:00:00 2001 From: Rafael Garcia Date: Tue, 10 Feb 2026 17:47:38 -0500 Subject: [PATCH 3/5] fix(builds): embed Dockerfile, import builder image via OCI layout Rework builder image provisioning: - Embed generic/Dockerfile with go:embed instead of reading from filesystem - Remove hardcoded "hypeman/builder:latest" default; if BUILDER_IMAGE is unset, build from embedded Dockerfile (dev mode) - After docker build+save, write image directly into OCI layout cache and call ImportLocalImage, bypassing docker push entirely - Move RecoverPendingBuilds after ensureBuilderImage to prevent race where recovered builds fail with "builder image is being prepared" Also fix review feedback: - Add sector alignment to erofs disk conversion (macOS VF compat) - Remove redundant ForLinux OCI client aliases and update callers Co-Authored-By: Claude Opus 4.6 --- lib/builds/manager.go | 228 +++++++++++++++++++++++++++++---------- lib/images/disk.go | 9 ++ lib/images/oci_public.go | 14 +-- lib/system/initrd.go | 8 +- 4 files changed, 184 insertions(+), 75 deletions(-) diff --git a/lib/builds/manager.go b/lib/builds/manager.go index 43f8463c..a23b77e4 100644 --- a/lib/builds/manager.go +++ b/lib/builds/manager.go @@ -3,6 +3,7 @@ package builds import ( "bufio" "context" + _ "embed" "encoding/json" "fmt" "log/slog" @@ -15,6 +16,9 @@ import ( "sync/atomic" "time" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/layout" + "github.com/google/go-containerregistry/pkg/v1/tarball" "github.com/nrednav/cuid2" "github.com/kernel/hypeman/lib/images" "github.com/kernel/hypeman/lib/instances" @@ -23,6 +27,9 @@ import ( "go.opentelemetry.io/otel/metric" ) +//go:embed images/generic/Dockerfile +var builderDockerfile []byte + // Manager interface for the build system type Manager interface { // Start starts the build manager's background services (vsock handler, etc.) @@ -87,7 +94,6 @@ type Config struct { func DefaultConfig() Config { return Config{ MaxConcurrentBuilds: 2, - BuilderImage: "hypeman/builder:latest", RegistryURL: "localhost:8080", DefaultTimeout: 600, // 10 minutes } @@ -161,100 +167,204 @@ func NewManager( m.metrics = metrics } - // Recover any pending builds from disk - m.RecoverPendingBuilds() - return m, nil } // Start starts the build manager's background services func (m *manager) Start(ctx context.Context) error { - go m.ensureBuilderImage(ctx) + go func() { + m.ensureBuilderImage(ctx) + // Recover pending builds only after the builder image is ready, + // otherwise recovered builds fail with "builder image is being prepared". + m.RecoverPendingBuilds() + }() m.logger.Info("build manager started") return nil } -// ensureBuilderImage ensures the builder image is available in the registry. -// If BUILDER_IMAGE is unset/empty, it builds from the embedded Dockerfile. -// If BUILDER_IMAGE is set, it checks if the image exists. +// ensureBuilderImage ensures the builder image is available in the image store. +// +// If BUILDER_IMAGE is set, it checks whether the image is already in the store +// and attempts to pull it from a remote registry if not. +// +// If BUILDER_IMAGE is unset/empty, it builds the image from the embedded Dockerfile +// using Docker, imports the result directly into the OCI layout cache (no docker push), +// and triggers ext4 conversion via ImportLocalImage. +// // This runs in a background goroutine during startup. func (m *manager) ensureBuilderImage(ctx context.Context) { defer m.builderReady.Store(true) - builderImage := m.config.BuilderImage - if builderImage == "" { - builderImage = "hypeman/builder:latest" + if m.config.BuilderImage != "" { + // Explicit builder image configured - check if already available + if _, err := m.imageManager.GetImage(ctx, m.config.BuilderImage); err == nil { + m.logger.Info("builder image already available", "image", m.config.BuilderImage) + return + } + + // Not in store - try to pull it from remote registry + m.logger.Info("pulling builder image", "image", m.config.BuilderImage) + if _, err := m.imageManager.CreateImage(ctx, images.CreateImageRequest{ + Name: m.config.BuilderImage, + }); err != nil { + m.logger.Warn("failed to pull builder image", "image", m.config.BuilderImage, "error", err) + return + } + if err := m.waitForBuilderImageReady(ctx, m.config.BuilderImage); err != nil { + m.logger.Warn("builder image failed to become ready", "image", m.config.BuilderImage, "error", err) + } + return } - // Check if image already exists in the registry - registryHost := stripRegistryScheme(m.config.RegistryURL) - imageRef := fmt.Sprintf("%s/%s", registryHost, builderImage) - if _, err := m.imageManager.GetImage(ctx, imageRef); err == nil { - m.logger.Info("builder image already available", "image", imageRef) + // No builder image configured - build from embedded Dockerfile + m.logger.Info("building builder image from embedded Dockerfile") + imageRef, err := m.buildBuilderFromDockerfile(ctx) + if err != nil { + m.logger.Warn("failed to build builder image", "error", err) return } + m.config.BuilderImage = imageRef + m.logger.Info("builder image ready", "image", imageRef) +} - // Try to build the image using Docker +// buildBuilderFromDockerfile builds the builder image from the embedded Dockerfile +// and imports it into the image store without using docker push. +// +// The flow is: +// 1. Write embedded Dockerfile to a temp directory +// 2. Build with Docker (uses cwd as context for COPY directives) +// 3. Export with docker save to a tarball +// 4. Load tarball with go-containerregistry and write to the shared OCI layout cache +// 5. Call ImportLocalImage to trigger ext4 conversion +// 6. Wait for the image to be ready +// +// This is intended for development; in production, set BUILDER_IMAGE to a pre-built image. +func (m *manager) buildBuilderFromDockerfile(ctx context.Context) (string, error) { dockerSocket := m.config.DockerSocket if dockerSocket == "" { dockerSocket = "/var/run/docker.sock" } - - // Check if Docker socket exists if _, err := os.Stat(dockerSocket); err != nil { - m.logger.Warn("Docker socket not found, skipping builder image build", - "socket", dockerSocket, - "error", err) - return + return "", fmt.Errorf("Docker socket not found at %s: %w", dockerSocket, err) } - m.logger.Info("building builder image", "image", builderImage) + dockerEnv := append(os.Environ(), fmt.Sprintf("DOCKER_HOST=unix://%s", dockerSocket)) - // Find the Dockerfile - look relative to the binary or in common locations - dockerfilePath := "lib/builds/images/generic/Dockerfile" - if _, err := os.Stat(dockerfilePath); err != nil { - // Try relative to executable - if execPath, err := os.Executable(); err == nil { - altPath := filepath.Join(filepath.Dir(execPath), "..", dockerfilePath) - if _, err := os.Stat(altPath); err == nil { - dockerfilePath = altPath - } - } + // Write embedded Dockerfile to temp dir + tmpDir, err := os.MkdirTemp("", "hypeman-builder-*") + if err != nil { + return "", fmt.Errorf("create temp dir: %w", err) } + defer os.RemoveAll(tmpDir) - cmd := exec.CommandContext(ctx, "docker", "build", "-t", builderImage, "-f", dockerfilePath, ".") - cmd.Env = append(os.Environ(), fmt.Sprintf("DOCKER_HOST=unix://%s", dockerSocket)) - output, err := cmd.CombinedOutput() + dockerfilePath := filepath.Join(tmpDir, "Dockerfile") + if err := os.WriteFile(dockerfilePath, builderDockerfile, 0644); err != nil { + return "", fmt.Errorf("write Dockerfile: %w", err) + } + + // Build with Docker (context is cwd = repo root in development) + localTag := fmt.Sprintf("hypeman-builder-tmp:%d", time.Now().Unix()) + m.logger.Info("building builder image with Docker", "tag", localTag) + + buildCmd := exec.CommandContext(ctx, "docker", "build", "-t", localTag, "-f", dockerfilePath, ".") + buildCmd.Env = dockerEnv + if output, err := buildCmd.CombinedOutput(); err != nil { + return "", fmt.Errorf("docker build: %s: %w", string(output), err) + } + defer func() { + rmCmd := exec.Command("docker", "rmi", localTag) + rmCmd.Env = dockerEnv + rmCmd.Run() + }() + + // Export image to tarball (avoids docker push) + tarPath := filepath.Join(tmpDir, "builder.tar") + saveCmd := exec.CommandContext(ctx, "docker", "save", "-o", tarPath, localTag) + saveCmd.Env = dockerEnv + if output, err := saveCmd.CombinedOutput(); err != nil { + return "", fmt.Errorf("docker save: %s: %w", string(output), err) + } + + // Load tarball as a v1.Image + img, err := tarball.ImageFromPath(tarPath, nil) if err != nil { - m.logger.Warn("failed to build builder image", - "error", err, - "output", string(output)) - return + return "", fmt.Errorf("load image tarball: %w", err) } - m.logger.Info("builder image built successfully", "image", builderImage) + // Get image digest + digestHash, err := img.Digest() + if err != nil { + return "", fmt.Errorf("get image digest: %w", err) + } + digest := digestHash.String() // "sha256:abc123..." + digestHex := digestHash.Hex // "abc123..." - // Tag the image with the registry prefix so it can be pushed - tagCmd := exec.CommandContext(ctx, "docker", "tag", builderImage, imageRef) - tagCmd.Env = append(os.Environ(), fmt.Sprintf("DOCKER_HOST=unix://%s", dockerSocket)) - if tagOutput, err := tagCmd.CombinedOutput(); err != nil { - m.logger.Warn("failed to tag builder image for registry", - "error", err, - "output", string(tagOutput)) - return + // Write directly to the shared OCI layout cache. + // This is the same cache used by the image manager's OCI client, so when + // ImportLocalImage triggers buildImage → pullAndExport, it will find the + // layers already cached and skip the network pull entirely. + cacheDir := m.paths.SystemOCICache() + layoutPath, err := layout.FromPath(cacheDir) + if err != nil { + layoutPath, err = layout.Write(cacheDir, empty.Index) + if err != nil { + return "", fmt.Errorf("create OCI layout: %w", err) + } } - // Push the image to the registry so builder VMs can pull it - pushCmd := exec.CommandContext(ctx, "docker", "push", imageRef) - pushCmd.Env = append(os.Environ(), fmt.Sprintf("DOCKER_HOST=unix://%s", dockerSocket)) - if pushOutput, err := pushCmd.CombinedOutput(); err != nil { - m.logger.Warn("failed to push builder image to registry", - "error", err, - "output", string(pushOutput)) - return + if err := layoutPath.AppendImage(img, layout.WithAnnotations(map[string]string{ + "org.opencontainers.image.ref.name": digestHex, + })); err != nil { + return "", fmt.Errorf("add image to OCI layout: %w", err) + } + + m.logger.Info("builder image added to OCI cache", "digest", digest) + + // Import into the image store (triggers async ext4 conversion). + // The repo includes the registry host so the image reference is consistent + // with how other images are stored and looked up. + registryHost := stripRegistryScheme(m.config.RegistryURL) + repo := registryHost + "/internal/builder" + reference := "latest" + imageRef := repo + ":" + reference + + if _, err := m.imageManager.ImportLocalImage(ctx, repo, reference, digest); err != nil { + return "", fmt.Errorf("import builder image: %w", err) + } + + // Wait for ext4 conversion to complete + if err := m.waitForBuilderImageReady(ctx, imageRef); err != nil { + return "", fmt.Errorf("builder image conversion: %w", err) + } + + return imageRef, nil +} + +// waitForBuilderImageReady polls the image manager until the image is ready. +func (m *manager) waitForBuilderImageReady(ctx context.Context, imageRef string) error { + const maxAttempts = 240 + const pollInterval = 500 * time.Millisecond + + for attempt := 0; attempt < maxAttempts; attempt++ { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + img, err := m.imageManager.GetImage(ctx, imageRef) + if err == nil { + switch img.Status { + case images.StatusReady: + return nil + case images.StatusFailed: + return fmt.Errorf("image conversion failed") + } + } + time.Sleep(pollInterval) } - m.logger.Info("builder image pushed to registry", "image", imageRef) + return fmt.Errorf("timeout waiting for builder image after %v", time.Duration(maxAttempts)*pollInterval) } // CreateBuild starts a new build job diff --git a/lib/images/disk.go b/lib/images/disk.go index c76660d6..c7be831a 100644 --- a/lib/images/disk.go +++ b/lib/images/disk.go @@ -206,6 +206,15 @@ func convertToErofs(rootfsDir, diskPath string) (int64, error) { return 0, fmt.Errorf("stat disk: %w", err) } + // Align to sector boundary (required by macOS Virtualization.framework) + if stat.Size()%sectorSize != 0 { + alignedSize := alignToSector(stat.Size()) + if err := os.Truncate(diskPath, alignedSize); err != nil { + return 0, fmt.Errorf("align erofs disk to sector boundary: %w", err) + } + return alignedSize, nil + } + return stat.Size(), nil } diff --git a/lib/images/oci_public.go b/lib/images/oci_public.go index 66643b97..a7a7c53f 100644 --- a/lib/images/oci_public.go +++ b/lib/images/oci_public.go @@ -19,18 +19,13 @@ func NewOCIClient(cacheDir string) (*OCIClient, error) { return &OCIClient{client: client}, nil } -// InspectManifest inspects a remote image to get its digest (public for system manager) +// InspectManifest inspects a remote image to get its digest (public for system manager). // Always targets Linux platform since hypeman VMs are Linux guests. func (c *OCIClient) InspectManifest(ctx context.Context, imageRef string) (string, error) { return c.client.inspectManifest(ctx, imageRef) } -// InspectManifestForLinux is an alias for InspectManifest (all images target Linux) -func (c *OCIClient) InspectManifestForLinux(ctx context.Context, imageRef string) (string, error) { - return c.InspectManifest(ctx, imageRef) -} - -// PullAndUnpack pulls an OCI image and unpacks it to a directory (public for system manager) +// PullAndUnpack pulls an OCI image and unpacks it to a directory (public for system manager). // Always targets Linux platform since hypeman VMs are Linux guests. func (c *OCIClient) PullAndUnpack(ctx context.Context, imageRef, digest, exportDir string) error { _, err := c.client.pullAndExport(ctx, imageRef, digest, exportDir) @@ -39,8 +34,3 @@ func (c *OCIClient) PullAndUnpack(ctx context.Context, imageRef, digest, exportD } return nil } - -// PullAndUnpackForLinux is an alias for PullAndUnpack (all images target Linux) -func (c *OCIClient) PullAndUnpackForLinux(ctx context.Context, imageRef, digest, exportDir string) error { - return c.PullAndUnpack(ctx, imageRef, digest, exportDir) -} diff --git a/lib/system/initrd.go b/lib/system/initrd.go index 3ef4c103..22f64713 100644 --- a/lib/system/initrd.go +++ b/lib/system/initrd.go @@ -35,14 +35,14 @@ func (m *manager) buildInitrd(ctx context.Context, arch string) (string, error) return "", fmt.Errorf("create oci client: %w", err) } - // Inspect Alpine base to get digest (always use Linux platform since this is for guest VMs) - digest, err := ociClient.InspectManifestForLinux(ctx, alpineBaseImage) + // Inspect Alpine base to get digest + digest, err := ociClient.InspectManifest(ctx, alpineBaseImage) if err != nil { return "", fmt.Errorf("inspect alpine manifest: %w", err) } - // Pull and unpack Alpine base (always use Linux platform since this is for guest VMs) - if err := ociClient.PullAndUnpackForLinux(ctx, alpineBaseImage, digest, rootfsDir); err != nil { + // Pull and unpack Alpine base + if err := ociClient.PullAndUnpack(ctx, alpineBaseImage, digest, rootfsDir); err != nil { return "", fmt.Errorf("pull alpine base: %w", err) } From 011e86164a655eff3f2b2f87d24a92eb5a58976d Mon Sep 17 00:00:00 2001 From: Rafael Garcia Date: Tue, 10 Feb 2026 18:46:35 -0500 Subject: [PATCH 4/5] test(images): add OCI layout roundtrip and import tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add three tests verifying the builder image import pipeline: - TestDockerSaveTarballToOCILayoutRoundtrip: full pipeline from docker save tarball → load → OCI layout → existsInLayout → extractMetadata → unpackLayers with rootfs verification - TestDockerSaveToOCILayoutCacheHit: verifies pullAndExport skips remote pull when image exists in OCI layout cache (uses bogus registry URL that would fail if pull was attempted) - TestImportLocalImageFromOCICache: end-to-end integration test simulating buildBuilderFromDockerfile's flow: write to OCI cache → ImportLocalImage → async build → verify GetImage metadata and GetDiskPath returns valid ext4 disk Co-Authored-By: Claude Opus 4.6 --- lib/images/manager_test.go | 75 +++++++++++++ lib/images/oci_test.go | 213 +++++++++++++++++++++++++++++++++++++ 2 files changed, 288 insertions(+) diff --git a/lib/images/manager_test.go b/lib/images/manager_test.go index 312cef82..65445676 100644 --- a/lib/images/manager_test.go +++ b/lib/images/manager_test.go @@ -8,6 +8,8 @@ import ( "testing" "time" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/layout" "github.com/kernel/hypeman/lib/paths" "github.com/stretchr/testify/require" ) @@ -345,6 +347,79 @@ func countFiles(dir string) (int, error) { return len(entries), nil } +// TestImportLocalImageFromOCICache is an integration test that simulates the full +// builder image import flow used by buildBuilderFromDockerfile: +// +// 1. Create a synthetic Docker image (simulates docker build output) +// 2. Write it to the OCI layout cache with digest annotation (simulates buildBuilderFromDockerfile) +// 3. Call ImportLocalImage (what buildBuilderFromDockerfile calls after writing to cache) +// 4. Wait for the image to become ready (async build pipeline) +// 5. Verify GetImage returns correct metadata (entrypoint, workdir, env) +// 6. Verify GetDiskPath returns path to a valid ext4 disk file +// +// This proves the end-to-end flow: OCI cache write → ImportLocalImage → buildImage +// → pullAndExport (cache hit) → ExportRootfs → ready. +func TestImportLocalImageFromOCICache(t *testing.T) { + dataDir := t.TempDir() + p := paths.New(dataDir) + mgr, err := NewManager(p, 1, nil) + require.NoError(t, err) + + ctx := context.Background() + + // Step 1: Create synthetic Docker image + img := createTestDockerImage(t) + + imgDigest, err := img.Digest() + require.NoError(t, err) + digestStr := imgDigest.String() // "sha256:abc123..." + layoutTag := digestToLayoutTag(digestStr) + + // Step 2: Write to OCI layout cache (same path the image manager uses) + cacheDir := p.SystemOCICache() + require.NoError(t, os.MkdirAll(cacheDir, 0755)) + + path, err := layout.Write(cacheDir, empty.Index) + require.NoError(t, err) + + err = path.AppendImage(img, layout.WithAnnotations(map[string]string{ + "org.opencontainers.image.ref.name": layoutTag, + })) + require.NoError(t, err) + t.Logf("Wrote image to OCI cache: digest=%s, layoutTag=%s", digestStr, layoutTag) + + // Step 3: Call ImportLocalImage (what buildBuilderFromDockerfile does) + imported, err := mgr.ImportLocalImage(ctx, "localhost:8080/internal/builder", "latest", digestStr) + require.NoError(t, err) + require.NotNil(t, imported) + require.Equal(t, "localhost:8080/internal/builder:latest", imported.Name) + t.Logf("ImportLocalImage returned: name=%s, status=%s, digest=%s", imported.Name, imported.Status, imported.Digest) + + // Step 4: Wait for the async build pipeline to complete + waitForReady(t, mgr, ctx, imported.Name) + + // Step 5: Verify GetImage returns correct metadata + ready, err := mgr.GetImage(ctx, imported.Name) + require.NoError(t, err) + require.Equal(t, StatusReady, ready.Status) + require.Equal(t, digestStr, ready.Digest) + require.Equal(t, []string{"/usr/local/bin/guest-agent"}, ready.Entrypoint) + require.Equal(t, "/app", ready.WorkingDir) + require.Contains(t, ready.Env, "PATH") + require.NotNil(t, ready.SizeBytes) + require.Greater(t, *ready.SizeBytes, int64(0)) + t.Logf("Image ready: entrypoint=%v, workdir=%s, size=%d", ready.Entrypoint, ready.WorkingDir, *ready.SizeBytes) + + // Step 6: Verify GetDiskPath returns path to a valid disk file + diskPath, err := GetDiskPath(p, imported.Name, digestStr) + require.NoError(t, err) + diskStat, err := os.Stat(diskPath) + require.NoError(t, err, "disk file should exist at %s", diskPath) + require.False(t, diskStat.IsDir()) + require.Greater(t, diskStat.Size(), int64(0), "disk file should not be empty") + t.Logf("Disk path verified: %s (%d bytes)", diskPath, diskStat.Size()) +} + // waitForReady waits for an image build to complete func waitForReady(t *testing.T, mgr Manager, ctx context.Context, imageName string) { for i := 0; i < 600; i++ { diff --git a/lib/images/oci_test.go b/lib/images/oci_test.go index 592da9ac..51005bf6 100644 --- a/lib/images/oci_test.go +++ b/lib/images/oci_test.go @@ -1,14 +1,24 @@ package images import ( + "archive/tar" + "bytes" + "compress/gzip" "context" "crypto/sha256" "encoding/hex" "encoding/json" + "io" "os" "path/filepath" "testing" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/layout" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/tarball" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -188,3 +198,206 @@ func TestConvertToOCIMediaTypePassesThroughBuildKitType(t *testing.T) { assert.Equal(t, "application/vnd.oci.image.config.v1+json", convertToOCIMediaType("application/vnd.docker.container.image.v1+json")) } + +// createTestDockerImage builds a synthetic Docker image using go-containerregistry. +// This simulates what "docker build + docker save" produces without requiring Docker. +// The image contains a fake builder binary and config file, with Docker v2 mediatypes +// (matching what docker save outputs). +func createTestDockerImage(t *testing.T) v1.Image { + t.Helper() + + // Build a gzipped tar layer with test files + var layerBuf bytes.Buffer + gzw := gzip.NewWriter(&layerBuf) + tw := tar.NewWriter(gzw) + + files := []struct { + name string + content string + mode int64 + isDir bool + }{ + {name: "usr/", isDir: true, mode: 0755}, + {name: "usr/local/", isDir: true, mode: 0755}, + {name: "usr/local/bin/", isDir: true, mode: 0755}, + {name: "usr/local/bin/guest-agent", content: "fake-builder-binary-v1", mode: 0755}, + {name: "etc/", isDir: true, mode: 0755}, + {name: "etc/builder.json", content: `{"version":"1.0"}`, mode: 0644}, + {name: "app/", isDir: true, mode: 0755}, + } + + for _, f := range files { + if f.isDir { + require.NoError(t, tw.WriteHeader(&tar.Header{ + Name: f.name, + Typeflag: tar.TypeDir, + Mode: f.mode, + })) + } else { + require.NoError(t, tw.WriteHeader(&tar.Header{ + Name: f.name, + Size: int64(len(f.content)), + Typeflag: tar.TypeReg, + Mode: f.mode, + })) + _, err := tw.Write([]byte(f.content)) + require.NoError(t, err) + } + } + require.NoError(t, tw.Close()) + require.NoError(t, gzw.Close()) + + layerBytes := layerBuf.Bytes() + + // Create layer from bytes + layer, err := tarball.LayerFromOpener(func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(layerBytes)), nil + }) + require.NoError(t, err) + + // Start with empty image and add our layer + img, err := mutate.AppendLayers(empty.Image, layer) + require.NoError(t, err) + + // Set config (entrypoint, env, workdir) - matches what a real builder image would have + img, err = mutate.Config(img, v1.Config{ + Entrypoint: []string{"/usr/local/bin/guest-agent"}, + Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + WorkingDir: "/app", + }) + require.NoError(t, err) + + return img +} + +// TestDockerSaveTarballToOCILayoutRoundtrip tests the exact pipeline used by +// buildBuilderFromDockerfile: docker save tarball → load via go-containerregistry +// → write to OCI layout cache → verify existsInLayout + extractMetadata + unpackLayers. +// +// This simulates: +// 1. docker build → docker save (we use go-containerregistry to create the tarball) +// 2. tarball.ImageFromPath (load the docker save output) +// 3. layout.AppendImage with digest annotation (write to OCI cache) +// 4. existsInLayout (cache hit detection) +// 5. extractOCIMetadata (read config from cache) +// 6. unpackLayers (unpack rootfs from cache) +func TestDockerSaveTarballToOCILayoutRoundtrip(t *testing.T) { + // Step 1: Create a synthetic Docker image (simulates docker build output) + img := createTestDockerImage(t) + + // Step 2: Save as docker save tarball (simulates docker save) + tarPath := filepath.Join(t.TempDir(), "image.tar") + tag, err := name.NewTag("localhost:5000/test/builder:latest") + require.NoError(t, err) + require.NoError(t, tarball.WriteToFile(tarPath, tag, img)) + + // Step 3: Load from tarball (this is what buildBuilderFromDockerfile does) + loadedImg, err := tarball.ImageFromPath(tarPath, nil) + require.NoError(t, err) + + // Get digest (used as OCI layout tag) + imgDigest, err := loadedImg.Digest() + require.NoError(t, err) + digestStr := imgDigest.String() // "sha256:abc123..." + layoutTag := digestToLayoutTag(digestStr) + t.Logf("Image digest: %s, layoutTag: %s", digestStr, layoutTag) + + // Step 4: Write to OCI layout (simulates the layout.AppendImage in buildBuilderFromDockerfile) + cacheDir := t.TempDir() + path, err := layout.Write(cacheDir, empty.Index) + require.NoError(t, err) + + err = path.AppendImage(loadedImg, layout.WithAnnotations(map[string]string{ + "org.opencontainers.image.ref.name": layoutTag, + })) + require.NoError(t, err) + + // Step 5: Create OCI client and verify existsInLayout (cache hit detection) + client, err := newOCIClient(cacheDir) + require.NoError(t, err) + assert.True(t, client.existsInLayout(layoutTag), "image should exist in layout after AppendImage") + + // Step 6: Verify extractOCIMetadata reads correct config + meta, err := client.extractOCIMetadata(layoutTag) + require.NoError(t, err) + assert.Equal(t, []string{"/usr/local/bin/guest-agent"}, meta.Entrypoint) + assert.Equal(t, "/app", meta.WorkingDir) + assert.Contains(t, meta.Env, "PATH") + + // Step 7: Verify unpackLayers produces correct rootfs + // umoci's UnpackRootfs extracts directly into the target directory + unpackDir := filepath.Join(t.TempDir(), "unpack") + err = client.unpackLayers(context.Background(), layoutTag, unpackDir) + require.NoError(t, err) + + // Verify expected files exist in unpacked rootfs + agentPath := filepath.Join(unpackDir, "usr", "local", "bin", "guest-agent") + agentContent, err := os.ReadFile(agentPath) + require.NoError(t, err, "guest-agent binary should exist in unpacked rootfs") + assert.Equal(t, "fake-builder-binary-v1", string(agentContent)) + + builderJSON := filepath.Join(unpackDir, "etc", "builder.json") + jsonContent, err := os.ReadFile(builderJSON) + require.NoError(t, err, "builder.json should exist in unpacked rootfs") + assert.Equal(t, `{"version":"1.0"}`, string(jsonContent)) + + appDir := filepath.Join(unpackDir, "app") + stat, err := os.Stat(appDir) + require.NoError(t, err, "/app directory should exist") + assert.True(t, stat.IsDir()) + + t.Log("Full roundtrip verified: docker save tarball → OCI layout → existsInLayout → extractMetadata → unpackLayers") +} + +// TestDockerSaveToOCILayoutCacheHit verifies that pullAndExport correctly +// detects a cache hit when the image has already been written to OCI layout +// (via AppendImage), skipping the remote pull entirely. This is the exact +// flow when buildBuilderFromDockerfile writes to cache and then ImportLocalImage +// triggers buildImage → pullAndExport. +func TestDockerSaveToOCILayoutCacheHit(t *testing.T) { + // Create synthetic image and write to OCI layout + img := createTestDockerImage(t) + + imgDigest, err := img.Digest() + require.NoError(t, err) + digestStr := imgDigest.String() + layoutTag := digestToLayoutTag(digestStr) + + cacheDir := t.TempDir() + path, err := layout.Write(cacheDir, empty.Index) + require.NoError(t, err) + + err = path.AppendImage(img, layout.WithAnnotations(map[string]string{ + "org.opencontainers.image.ref.name": layoutTag, + })) + require.NoError(t, err) + + // Create OCI client pointing at same cache dir + client, err := newOCIClient(cacheDir) + require.NoError(t, err) + + // Call pullAndExport with a bogus imageRef — since the digest is already cached, + // it should NOT attempt a remote pull and should succeed from cache alone + exportDir := filepath.Join(t.TempDir(), "export") + result, err := client.pullAndExport( + context.Background(), + "localhost:9999/nonexistent/image:v1", // would fail if it tried to pull + digestStr, + exportDir, + ) + require.NoError(t, err, "pullAndExport should succeed from cache without remote pull") + require.NotNil(t, result) + + // Verify metadata was extracted + assert.Equal(t, []string{"/usr/local/bin/guest-agent"}, result.Metadata.Entrypoint) + assert.Equal(t, "/app", result.Metadata.WorkingDir) + assert.Equal(t, digestStr, result.Digest) + + // Verify rootfs was unpacked (umoci extracts directly into exportDir) + agentPath := filepath.Join(exportDir, "usr", "local", "bin", "guest-agent") + content, err := os.ReadFile(agentPath) + require.NoError(t, err) + assert.Equal(t, "fake-builder-binary-v1", string(content)) + + t.Log("Cache hit verified: pullAndExport skipped remote pull and used OCI layout cache") +} From 964eacb4eb80b9f57b09f07d256c7c031cba18f8 Mon Sep 17 00:00:00 2001 From: Rafael Garcia Date: Tue, 10 Feb 2026 19:57:37 -0500 Subject: [PATCH 5/5] refactor(resources): unify disk resource into single cross-platform file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Merge disk_darwin.go and disk_linux.go into disk.go since both implementations are nearly identical — the only difference was syscall.Statfs (linux) vs unix.Statfs (darwin). Use unix.Statfs from golang.org/x/sys/unix which works on both platforms. Addresses review feedback from @sjmiller. Co-Authored-By: Claude Opus 4.6 --- lib/resources/disk.go | 33 ++++++++++++++++++++++++ lib/resources/disk_darwin.go | 49 ------------------------------------ lib/resources/disk_linux.go | 42 ------------------------------- 3 files changed, 33 insertions(+), 91 deletions(-) delete mode 100644 lib/resources/disk_darwin.go delete mode 100644 lib/resources/disk_linux.go diff --git a/lib/resources/disk.go b/lib/resources/disk.go index 087438d9..d431ec20 100644 --- a/lib/resources/disk.go +++ b/lib/resources/disk.go @@ -5,6 +5,9 @@ import ( "strings" "github.com/c2h5oh/datasize" + "github.com/kernel/hypeman/cmd/api/config" + "github.com/kernel/hypeman/lib/paths" + "golang.org/x/sys/unix" ) // DiskResource implements Resource for disk space discovery and tracking. @@ -16,6 +19,36 @@ type DiskResource struct { volumeLister VolumeLister } +// NewDiskResource discovers disk capacity for the data directory. +// If cfg.DiskLimit is set, uses that as capacity; otherwise auto-detects via statfs. +func NewDiskResource(cfg *config.Config, p *paths.Paths, instLister InstanceLister, imgLister ImageLister, volLister VolumeLister) (*DiskResource, error) { + var capacity int64 + + if cfg.DiskLimit != "" { + // Parse configured limit + var ds datasize.ByteSize + if err := ds.UnmarshalText([]byte(cfg.DiskLimit)); err != nil { + return nil, err + } + capacity = int64(ds.Bytes()) + } else { + // Auto-detect from filesystem + var stat unix.Statfs_t + if err := unix.Statfs(cfg.DataDir, &stat); err != nil { + return nil, err + } + capacity = int64(stat.Blocks) * int64(stat.Bsize) + } + + return &DiskResource{ + capacity: capacity, + dataDir: cfg.DataDir, + instanceLister: instLister, + imageLister: imgLister, + volumeLister: volLister, + }, nil +} + // Type returns the resource type. func (d *DiskResource) Type() ResourceType { return ResourceDisk diff --git a/lib/resources/disk_darwin.go b/lib/resources/disk_darwin.go deleted file mode 100644 index 8a5d32f3..00000000 --- a/lib/resources/disk_darwin.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build darwin - -package resources - -import ( - "os" - - "github.com/c2h5oh/datasize" - "github.com/kernel/hypeman/cmd/api/config" - "github.com/kernel/hypeman/lib/paths" - "golang.org/x/sys/unix" -) - -// NewDiskResource discovers disk capacity on macOS. -func NewDiskResource(cfg *config.Config, p *paths.Paths, instLister InstanceLister, imgLister ImageLister, volLister VolumeLister) (*DiskResource, error) { - var capacity int64 - - if cfg.DiskLimit != "" { - // Parse configured limit - var ds datasize.ByteSize - if err := ds.UnmarshalText([]byte(cfg.DiskLimit)); err != nil { - return nil, err - } - capacity = int64(ds.Bytes()) - } else { - // Auto-detect from filesystem using statfs - var stat unix.Statfs_t - dataDir := cfg.DataDir - if err := unix.Statfs(dataDir, &stat); err != nil { - // Fallback: try to stat the root if data dir doesn't exist yet - if os.IsNotExist(err) { - if err := unix.Statfs("/", &stat); err != nil { - return nil, err - } - } else { - return nil, err - } - } - capacity = int64(stat.Blocks) * int64(stat.Bsize) - } - - return &DiskResource{ - capacity: capacity, - dataDir: cfg.DataDir, - instanceLister: instLister, - imageLister: imgLister, - volumeLister: volLister, - }, nil -} diff --git a/lib/resources/disk_linux.go b/lib/resources/disk_linux.go deleted file mode 100644 index e6cc8fb1..00000000 --- a/lib/resources/disk_linux.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build linux - -package resources - -import ( - "syscall" - - "github.com/c2h5oh/datasize" - "github.com/kernel/hypeman/cmd/api/config" - "github.com/kernel/hypeman/lib/paths" -) - -// NewDiskResource discovers disk capacity for the data directory. -// If cfg.DiskLimit is set, uses that as capacity; otherwise auto-detects via statfs. -func NewDiskResource(cfg *config.Config, p *paths.Paths, instLister InstanceLister, imgLister ImageLister, volLister VolumeLister) (*DiskResource, error) { - var capacity int64 - - if cfg.DiskLimit != "" { - // Parse configured limit - var ds datasize.ByteSize - if err := ds.UnmarshalText([]byte(cfg.DiskLimit)); err != nil { - return nil, err - } - capacity = int64(ds.Bytes()) - } else { - // Auto-detect from filesystem - var stat syscall.Statfs_t - if err := syscall.Statfs(cfg.DataDir, &stat); err != nil { - return nil, err - } - // Total space = blocks * block size - capacity = int64(stat.Blocks) * int64(stat.Bsize) - } - - return &DiskResource{ - capacity: capacity, - dataDir: cfg.DataDir, - instanceLister: instLister, - imageLister: imgLister, - volumeLister: volLister, - }, nil -}