Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 27 additions & 4 deletions cmd/api/api/instances.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,10 +104,27 @@ func (s *ApiService) CreateInstance(ctx context.Context, request oapi.CreateInst
if vol.Readonly != nil {
readonly = *vol.Readonly
}
overlay := false
if vol.Overlay != nil {
overlay = *vol.Overlay
}
var overlaySize int64
if vol.OverlaySize != nil && *vol.OverlaySize != "" {
var overlaySizeBytes datasize.ByteSize
if err := overlaySizeBytes.UnmarshalText([]byte(*vol.OverlaySize)); err != nil {
return oapi.CreateInstance400JSONResponse{
Code: "invalid_overlay_size",
Message: fmt.Sprintf("invalid overlay_size for volume %s: %v", vol.VolumeId, err),
}, nil
}
overlaySize = int64(overlaySizeBytes)
}
volumes[i] = instances.VolumeAttachment{
VolumeID: vol.VolumeId,
MountPath: vol.MountPath,
Readonly: readonly,
VolumeID: vol.VolumeId,
MountPath: vol.MountPath,
Readonly: readonly,
Overlay: overlay,
OverlaySize: overlaySize,
}
}
}
Expand Down Expand Up @@ -461,11 +478,17 @@ func instanceToOAPI(inst instances.Instance) oapi.Instance {
if len(inst.Volumes) > 0 {
oapiVolumes := make([]oapi.VolumeAttachment, len(inst.Volumes))
for i, vol := range inst.Volumes {
oapiVolumes[i] = oapi.VolumeAttachment{
oapiVol := oapi.VolumeAttachment{
VolumeId: vol.VolumeID,
MountPath: vol.MountPath,
Readonly: lo.ToPtr(vol.Readonly),
}
if vol.Overlay {
oapiVol.Overlay = lo.ToPtr(true)
overlaySizeStr := datasize.ByteSize(vol.OverlaySize).HR()
oapiVol.OverlaySize = lo.ToPtr(overlaySizeStr)
}
oapiVolumes[i] = oapiVol
}
oapiInst.Volumes = &oapiVolumes
}
Expand Down
18 changes: 13 additions & 5 deletions cmd/api/api/volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,12 +140,20 @@ func volumeToOAPI(vol volumes.Volume) oapi.Volume {
SizeGb: vol.SizeGb,
CreatedAt: vol.CreatedAt,
}
if vol.AttachedTo != nil {
oapiVol.AttachedTo = vol.AttachedTo
}
if vol.MountPath != nil {
oapiVol.MountPath = vol.MountPath

// Convert attachments
if len(vol.Attachments) > 0 {
attachments := make([]oapi.VolumeAttachmentInfo, len(vol.Attachments))
for i, att := range vol.Attachments {
attachments[i] = oapi.VolumeAttachmentInfo{
InstanceId: att.InstanceID,
MountPath: att.MountPath,
Readonly: att.Readonly,
}
}
oapiVol.Attachments = &attachments
}

return oapiVol
}

26 changes: 18 additions & 8 deletions lib/instances/configdisk.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,22 +107,32 @@ GUEST_DNS="%s"

// Build volume mounts section
// Volumes are attached as /dev/vdd, /dev/vde, etc. (after vda=rootfs, vdb=overlay, vdc=config)
// For overlay volumes, two devices are used: base + overlay disk
// Format: device:path:mode[:overlay_device]
volumeSection := ""
if len(inst.Volumes) > 0 {
var volumeLines strings.Builder
volumeLines.WriteString("\n# Volume mounts (device:path:readonly)\n")
volumeLines.WriteString("\n# Volume mounts (device:path:mode[:overlay_device])\n")
volumeLines.WriteString("VOLUME_MOUNTS=\"")
deviceIdx := 0 // Track device index (starts at 'd' = vdd)
for i, vol := range inst.Volumes {
// Device naming: vdd, vde, vdf, ...
device := fmt.Sprintf("/dev/vd%c", 'd'+i)
readonly := "rw"
if vol.Readonly {
readonly = "ro"
}
device := fmt.Sprintf("/dev/vd%c", 'd'+deviceIdx)
if i > 0 {
volumeLines.WriteString(" ")
}
volumeLines.WriteString(fmt.Sprintf("%s:%s:%s", device, vol.MountPath, readonly))
if vol.Overlay {
// Overlay mode: base device + overlay device
overlayDevice := fmt.Sprintf("/dev/vd%c", 'd'+deviceIdx+1)
volumeLines.WriteString(fmt.Sprintf("%s:%s:overlay:%s", device, vol.MountPath, overlayDevice))
deviceIdx += 2 // Overlay uses 2 devices
} else {
mode := "rw"
if vol.Readonly {
mode = "ro"
}
volumeLines.WriteString(fmt.Sprintf("%s:%s:%s", device, vol.MountPath, mode))
deviceIdx++ // Regular volume uses 1 device
}
}
volumeLines.WriteString("\"\n")
volumeSection = volumeLines.String()
Expand Down
65 changes: 51 additions & 14 deletions lib/instances/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -263,18 +263,14 @@ func (m *manager) createInstance(
if len(req.Volumes) > 0 {
log.DebugContext(ctx, "validating volumes", "id", id, "count", len(req.Volumes))
for _, volAttach := range req.Volumes {
// Check volume exists and is not attached
vol, err := m.volumeManager.GetVolume(ctx, volAttach.VolumeID)
// Check volume exists
_, err := m.volumeManager.GetVolume(ctx, volAttach.VolumeID)
if err != nil {
log.ErrorContext(ctx, "volume not found", "id", id, "volume_id", volAttach.VolumeID, "error", err)
return nil, fmt.Errorf("volume %s: %w", volAttach.VolumeID, err)
}
if vol.AttachedTo != nil {
log.ErrorContext(ctx, "volume already attached", "id", id, "volume_id", volAttach.VolumeID, "attached_to", *vol.AttachedTo)
return nil, fmt.Errorf("volume %s is already attached to instance %s", volAttach.VolumeID, *vol.AttachedTo)
}

// Mark volume as attached
// Mark volume as attached (AttachVolume handles multi-attach validation)
if err := m.volumeManager.AttachVolume(ctx, volAttach.VolumeID, volumes.AttachVolumeRequest{
InstanceID: id,
MountPath: volAttach.MountPath,
Expand All @@ -287,8 +283,17 @@ func (m *manager) createInstance(
// Add volume cleanup to stack
volumeID := volAttach.VolumeID // capture for closure
cu.Add(func() {
m.volumeManager.DetachVolume(ctx, volumeID)
m.volumeManager.DetachVolume(ctx, volumeID, id)
})

// Create overlay disk for volumes with overlay enabled
if volAttach.Overlay {
log.DebugContext(ctx, "creating volume overlay disk", "id", id, "volume_id", volAttach.VolumeID, "size", volAttach.OverlaySize)
if err := m.createVolumeOverlayDisk(id, volAttach.VolumeID, volAttach.OverlaySize); err != nil {
log.ErrorContext(ctx, "failed to create volume overlay disk", "id", id, "volume_id", volAttach.VolumeID, "error", err)
return nil, fmt.Errorf("create volume overlay disk %s: %w", volAttach.VolumeID, err)
}
}
}
// Store volume attachments in metadata
stored.Volumes = req.Volumes
Expand Down Expand Up @@ -377,8 +382,16 @@ func validateCreateRequest(req CreateInstanceRequest) error {

// validateVolumeAttachments validates volume attachment requests
func validateVolumeAttachments(volumes []VolumeAttachment) error {
if len(volumes) > MaxVolumesPerInstance {
return fmt.Errorf("cannot attach more than %d volumes per instance", MaxVolumesPerInstance)
// Count total devices needed (each overlay volume needs 2 devices: base + overlay)
totalDevices := 0
for _, vol := range volumes {
totalDevices++
if vol.Overlay {
totalDevices++ // Overlay needs an additional device
}
}
if totalDevices > MaxVolumesPerInstance {
return fmt.Errorf("cannot attach more than %d volume devices per instance (overlay volumes count as 2)", MaxVolumesPerInstance)
}

seenPaths := make(map[string]bool)
Expand All @@ -401,6 +414,16 @@ func validateVolumeAttachments(volumes []VolumeAttachment) error {
return fmt.Errorf("duplicate mount path %q", cleanPath)
}
seenPaths[cleanPath] = true

// Validate overlay mode requirements
if vol.Overlay {
if !vol.Readonly {
return fmt.Errorf("volume %s: overlay mode requires readonly=true", vol.VolumeID)
}
if vol.OverlaySize <= 0 {
return fmt.Errorf("volume %s: overlay_size is required when overlay=true", vol.VolumeID)
}
}
}

return nil
Expand Down Expand Up @@ -556,12 +579,26 @@ func (m *manager) buildVMConfig(inst *Instance, imageInfo *images.Image, netConf
}

// Add attached volumes as additional disks
// For overlay volumes, add both base (readonly) and overlay disk
for _, volAttach := range inst.Volumes {
volumePath := m.volumeManager.GetVolumePath(volAttach.VolumeID)
disks = append(disks, vmm.DiskConfig{
Path: &volumePath,
Readonly: ptr(volAttach.Readonly),
})
if volAttach.Overlay {
// Base volume is always read-only when overlay is enabled
disks = append(disks, vmm.DiskConfig{
Path: &volumePath,
Readonly: ptr(true),
})
// Overlay disk is writable
overlayPath := m.paths.InstanceVolumeOverlay(inst.Id, volAttach.VolumeID)
disks = append(disks, vmm.DiskConfig{
Path: &overlayPath,
})
} else {
disks = append(disks, vmm.DiskConfig{
Path: &volumePath,
Readonly: ptr(volAttach.Readonly),
})
}
}

// Serial console configuration
Expand Down
2 changes: 1 addition & 1 deletion lib/instances/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func (m *manager) deleteInstance(
if len(inst.Volumes) > 0 {
log.DebugContext(ctx, "detaching volumes", "id", id, "count", len(inst.Volumes))
for _, volAttach := range inst.Volumes {
if err := m.volumeManager.DetachVolume(ctx, volAttach.VolumeID); err != nil {
if err := m.volumeManager.DetachVolume(ctx, volAttach.VolumeID, id); err != nil {
// Log error but continue with cleanup
log.WarnContext(ctx, "failed to detach volume, continuing with cleanup", "id", id, "volume_id", volAttach.VolumeID, "error", err)
}
Expand Down
10 changes: 5 additions & 5 deletions lib/instances/manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ func TestCreateAndDeleteInstance(t *testing.T) {

// Verify volume file exists and is not attached
assert.FileExists(t, p.VolumeData(vol.Id))
assert.Nil(t, vol.AttachedTo, "Volume should not be attached yet")
assert.Empty(t, vol.Attachments, "Volume should not be attached yet")

// Create instance with real nginx image and attached volume
req := CreateInstanceRequest{
Expand Down Expand Up @@ -267,9 +267,9 @@ func TestCreateAndDeleteInstance(t *testing.T) {
// Verify volume shows as attached
vol, err = volumeManager.GetVolume(ctx, vol.Id)
require.NoError(t, err)
require.NotNil(t, vol.AttachedTo, "Volume should be attached")
assert.Equal(t, inst.Id, *vol.AttachedTo)
assert.Equal(t, "/mnt/data", *vol.MountPath)
require.Len(t, vol.Attachments, 1, "Volume should be attached")
assert.Equal(t, inst.Id, vol.Attachments[0].InstanceID)
assert.Equal(t, "/mnt/data", vol.Attachments[0].MountPath)

// Verify directories exist
assert.DirExists(t, p.InstanceDir(inst.Id))
Expand Down Expand Up @@ -444,7 +444,7 @@ func TestCreateAndDeleteInstance(t *testing.T) {
// Verify volume is detached but still exists
vol, err = volumeManager.GetVolume(ctx, vol.Id)
require.NoError(t, err)
assert.Nil(t, vol.AttachedTo, "Volume should be detached after instance deletion")
assert.Empty(t, vol.Attachments, "Volume should be detached after instance deletion")
assert.FileExists(t, p.VolumeData(vol.Id), "Volume file should still exist")

// Delete volume
Expand Down
66 changes: 65 additions & 1 deletion lib/instances/resource_limits_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func TestValidateVolumeAttachments_MaxVolumes(t *testing.T) {

err := validateVolumeAttachments(volumes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "cannot attach more than 23 volumes")
assert.Contains(t, err.Error(), "cannot attach more than 23")
}

func TestValidateVolumeAttachments_SystemDirectory(t *testing.T) {
Expand Down Expand Up @@ -83,6 +83,70 @@ func TestValidateVolumeAttachments_Empty(t *testing.T) {
assert.NoError(t, err)
}

func TestValidateVolumeAttachments_OverlayRequiresReadonly(t *testing.T) {
// Overlay=true with Readonly=false should fail
volumes := []VolumeAttachment{{
VolumeID: "vol-1",
MountPath: "/mnt/data",
Readonly: false, // Invalid: overlay requires readonly=true
Overlay: true,
OverlaySize: 100 * 1024 * 1024,
}}

err := validateVolumeAttachments(volumes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "overlay mode requires readonly=true")
}

func TestValidateVolumeAttachments_OverlayRequiresSize(t *testing.T) {
// Overlay=true without OverlaySize should fail
volumes := []VolumeAttachment{{
VolumeID: "vol-1",
MountPath: "/mnt/data",
Readonly: true,
Overlay: true,
OverlaySize: 0, // Invalid: overlay requires size
}}

err := validateVolumeAttachments(volumes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "overlay_size is required")
}

func TestValidateVolumeAttachments_OverlayValid(t *testing.T) {
// Valid overlay configuration
volumes := []VolumeAttachment{{
VolumeID: "vol-1",
MountPath: "/mnt/data",
Readonly: true,
Overlay: true,
OverlaySize: 100 * 1024 * 1024, // 100MB
}}

err := validateVolumeAttachments(volumes)
assert.NoError(t, err)
}

func TestValidateVolumeAttachments_OverlayCountsAsTwoDevices(t *testing.T) {
// 12 regular volumes + 12 overlay volumes = 12 + 24 = 36 devices (exceeds 23)
// But let's be more precise: 11 overlay volumes = 22 devices, + 1 regular = 23 (at limit)
// 12 overlay volumes = 24 devices (exceeds limit)
volumes := make([]VolumeAttachment, 12)
for i := range volumes {
volumes[i] = VolumeAttachment{
VolumeID: "vol-" + string(rune('a'+i)),
MountPath: "/mnt/vol" + string(rune('a'+i)),
Readonly: true,
Overlay: true,
OverlaySize: 100 * 1024 * 1024,
}
}

err := validateVolumeAttachments(volumes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "cannot attach more than 23")
}

// createTestManager creates a manager with specified limits for testing
func createTestManager(t *testing.T, limits ResourceLimits) *manager {
t.Helper()
Expand Down
15 changes: 15 additions & 0 deletions lib/instances/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,21 @@ func (m *manager) createOverlayDisk(id string, sizeBytes int64) error {
return images.CreateEmptyExt4Disk(overlayPath, sizeBytes)
}

// createVolumeOverlayDisk creates a sparse overlay disk for a volume attachment.
// Cleanup note: If instance creation fails after this point, the overlay disk is
// cleaned up automatically by deleteInstanceData() which removes the entire instance
// directory (including vol-overlays/) via the cleanup stack in createInstance().
func (m *manager) createVolumeOverlayDisk(instanceID, volumeID string, sizeBytes int64) error {
// Ensure vol-overlays directory exists
overlaysDir := m.paths.InstanceVolumeOverlaysDir(instanceID)
if err := os.MkdirAll(overlaysDir, 0755); err != nil {
return fmt.Errorf("create vol-overlays directory: %w", err)
}

overlayPath := m.paths.InstanceVolumeOverlay(instanceID, volumeID)
return images.CreateEmptyExt4Disk(overlayPath, sizeBytes)
}

// deleteInstanceData removes all instance data from disk
func (m *manager) deleteInstanceData(id string) error {
instDir := m.paths.InstanceDir(id)
Expand Down
8 changes: 5 additions & 3 deletions lib/instances/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,11 @@ const (

// VolumeAttachment represents a volume attached to an instance
type VolumeAttachment struct {
VolumeID string // Volume ID
MountPath string // Mount path in guest
Readonly bool // Whether mounted read-only
VolumeID string // Volume ID
MountPath string // Mount path in guest
Readonly bool // Whether mounted read-only
Overlay bool // If true, create per-instance overlay for writes (requires Readonly=true)
OverlaySize int64 // Size of overlay disk in bytes (max diff from base)
}

// StoredMetadata represents instance metadata that is persisted to disk
Expand Down
Loading
Loading