diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index 0c26c8a5..26160c4b 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -136,41 +136,65 @@ type ProxmoxMachineSpec struct { // Storage is the physical storage on the node. type Storage struct { // BootVolume defines the storage size for the boot volume. + // Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (one item + // with boot:true). Use BootVolume for v1alpha1/v1alpha2, but plan to migrate to `volumes[]` when + // available. // This field is optional, and should only be set if you want // to change the size of the boot volume. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" // +optional - BootVolume *DiskSize `json:"bootVolume,omitempty"` + BootVolume *DiskSpec `json:"bootVolume,omitempty"` - // TODO Intended to add handling for additional volumes, - // which will be added to the node. - // e.g. AdditionalVolumes []DiskSize. + // AdditionalVolumes defines additional volumes to be added to the virtual machine. + // Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (additional + // items with boot:false). Use for v1alpha1/v1alpha2; plan to migrate to `volumes[]` when available. + // +optional + AdditionalVolumes []DiskSpec `json:"additionalVolumes,omitempty"` } -// DiskSize is contains values for the disk device and size. -type DiskSize struct { +// DiskSpec is contains values for the disk device and size. +type DiskSpec struct { // Disk is the name of the disk device, that should be resized. // Example values are: ide[0-3], scsi[0-30], sata[0-5]. Disk string `json:"disk"` - // Size defines the size in gigabyte. - // // As Proxmox does not support shrinking, the size // must be bigger than the already configured size in the // template. - // // +kubebuilder:validation:Minimum=5 SizeGB int32 `json:"sizeGb"` + // Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + // If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. + // +optional + Storage *string `json:"storage,omitempty"` + // Format is optional: + // +optional + Format *TargetFileStorageFormat `json:"format,omitempty"` + // Discard enables TRIM/UNMAP support for this virtual disk. + // Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + // If omitted or false, the flag is not set. + // +optional + Discard *bool `json:"discard,omitempty"` + // IOThread enables the option IO Thread, + // With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + // The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller + IOThread *bool `json:"ioThread,omitempty"` + // SSD enables SSD emulation feature + // SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + // There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + // SSD emulation is not supported on VirtIO Block drives. + SSD *bool `json:"ssd,omitempty"` } // TargetFileStorageFormat the target format of the cloned disk. +// +kubebuilder:validation:Enum=raw;qcow2;vmdk type TargetFileStorageFormat string // Supported disk formats. const ( - TargetStorageFormatRaw TargetFileStorageFormat = "raw" - TargetStorageFormatQcow2 TargetFileStorageFormat = "qcow2" - TargetStorageFormatVmdk TargetFileStorageFormat = "vmdk" + TargetFileStorageFormatRaw TargetFileStorageFormat = "raw" + TargetFileStorageFormatQCOW2 TargetFileStorageFormat = "qcow2" + TargetFileStorageFormatVMDK TargetFileStorageFormat = "vmdk" ) // TemplateSource defines the source of the template VM. @@ -632,7 +656,7 @@ func (r *ProxmoxMachine) GetNode() string { } // FormatSize returns the format required for the Proxmox API. -func (d *DiskSize) FormatSize() string { +func (d *DiskSpec) FormatSize() string { return fmt.Sprintf("%dG", d.SizeGB) } diff --git a/api/v1alpha1/proxmoxmachine_types_test.go b/api/v1alpha1/proxmoxmachine_types_test.go index e6f14fb5..6173a748 100644 --- a/api/v1alpha1/proxmoxmachine_types_test.go +++ b/api/v1alpha1/proxmoxmachine_types_test.go @@ -18,6 +18,7 @@ package v1alpha1 import ( "context" + "encoding/json" "strconv" . "github.com/onsi/ginkgo/v2" @@ -44,7 +45,7 @@ func defaultMachine() *ProxmoxMachine { }, }, Disks: &Storage{ - BootVolume: &DiskSize{ + BootVolume: &DiskSpec{ Disk: "scsi0", SizeGB: 100, }, @@ -62,7 +63,7 @@ var _ = Describe("ProxmoxMachine Test", func() { Context("VirtualMachineCloneSpec", func() { It("Should not allow specifying format if full clone is disabled", func() { dm := defaultMachine() - dm.Spec.Format = ptr.To(TargetStorageFormatRaw) + dm.Spec.Format = ptr.To(TargetFileStorageFormatRaw) dm.Spec.Full = ptr.To(false) Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("Must set full=true when specifying format"))) @@ -177,6 +178,272 @@ var _ = Describe("ProxmoxMachine Test", func() { }) }) + Context("AdditionalVolumes format/storage - JSON marshalling", func() { + It("includes format and storage when set", func() { + f := TargetFileStorageFormat("qcow2") + s := "nfs-templates" + ds := DiskSpec{ + Disk: "scsi1", + SizeGB: 80, + Format: &f, + Storage: &s, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi1"`)) + Expect(js).To(ContainSubstring(`"sizeGb":80`)) + Expect(js).To(ContainSubstring(`"format":"qcow2"`)) + Expect(js).To(ContainSubstring(`"storage":"nfs-templates"`)) + }) + It("omits format and storage when nil", func() { + ds := DiskSpec{ + Disk: "scsi2", + SizeGB: 120, + Format: nil, + Storage: nil, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi2"`)) + Expect(js).To(ContainSubstring(`"sizeGb":120`)) + Expect(js).NotTo(ContainSubstring(`"format"`)) + Expect(js).NotTo(ContainSubstring(`"storage"`)) + }) + }) + + Context("AdditionalVolumes format/storage - DeepCopy", func() { + It("preserves per-volume format and storage and performs a deep copy", func() { + qcow2 := TargetFileStorageFormat("qcow2") + store := "filestore-a" + src := &Storage{ + AdditionalVolumes: []DiskSpec{ + {Disk: "scsi1", SizeGB: 80, Format: &qcow2, Storage: &store}, + }, + } + dst := src.DeepCopy() + Expect(dst).NotTo(BeNil()) + Expect(dst.AdditionalVolumes).To(HaveLen(1)) + got := dst.AdditionalVolumes[0] + Expect(got.Disk).To(Equal("scsi1")) + Expect(got.SizeGB).To(Equal(int32(80))) + Expect(got.Format).NotTo(BeNil()) + Expect(*got.Format).To(Equal(TargetFileStorageFormat("qcow2"))) + Expect(got.Storage).NotTo(BeNil()) + Expect(*got.Storage).To(Equal("filestore-a")) + newFmt := TargetFileStorageFormat("raw") + newStore := "filestore-b" + *src.AdditionalVolumes[0].Format = newFmt + *src.AdditionalVolumes[0].Storage = newStore + Expect(dst.AdditionalVolumes[0].Format).NotTo(BeNil()) + Expect(*dst.AdditionalVolumes[0].Format).To(Equal(TargetFileStorageFormat("qcow2"))) + Expect(dst.AdditionalVolumes[0].Storage).NotTo(BeNil()) + Expect(*dst.AdditionalVolumes[0].Storage).To(Equal("filestore-a")) + }) + }) + + Context("AdditionalVolumes discard - JSON marshalling", func() { + It("includes discard when explicitly true", func() { + dTrue := true + ds := DiskSpec{ + Disk: "scsi3", + SizeGB: 60, + Discard: &dTrue, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi3"`)) + Expect(js).To(ContainSubstring(`"sizeGb":60`)) + Expect(js).To(ContainSubstring(`"discard":true`)) + }) + It("includes discard when explicitly false (non-nil pointer)", func() { + dFalse := false + ds := DiskSpec{ + Disk: "scsi4", + SizeGB: 70, + Discard: &dFalse, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi4"`)) + Expect(js).To(ContainSubstring(`"sizeGb":70`)) + // Because Discard is a bool, omitempty does NOT drop a false: + Expect(js).To(ContainSubstring(`"discard":false`)) + }) + It("omits discard when nil", func() { + ds := DiskSpec{ + Disk: "scsi5", + SizeGB: 80, + Discard: nil, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi5"`)) + Expect(js).To(ContainSubstring(`"sizeGb":80`)) + Expect(js).NotTo(ContainSubstring(`"discard"`)) + }) + }) + + Context("AdditionalVolumes discard - DeepCopy", func() { + It("preserves per-volume discard and performs a deep copy", func() { + dTrue := true + src := &Storage{ + AdditionalVolumes: []DiskSpec{ + {Disk: "scsi6", SizeGB: 90, Discard: &dTrue}, + }, + } + dst := src.DeepCopy() + Expect(dst).NotTo(BeNil()) + Expect(dst.AdditionalVolumes).To(HaveLen(1)) + got := dst.AdditionalVolumes[0] + Expect(got.Disk).To(Equal("scsi6")) + Expect(got.SizeGB).To(Equal(int32(90))) + Expect(got.Discard).NotTo(BeNil()) + Expect(*got.Discard).To(BeTrue()) + *src.AdditionalVolumes[0].Discard = false + Expect(dst.AdditionalVolumes[0].Discard).NotTo(BeNil()) + Expect(*dst.AdditionalVolumes[0].Discard).To(BeTrue()) + }) + }) + Context("AdditionalVolumes iothread - JSON marshalling", func() { + It("includes iothread when explicitly true", func() { + tTrue := true + ds := DiskSpec{ + Disk: "scsi7", + SizeGB: 60, + IOThread: &tTrue, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi7"`)) + Expect(js).To(ContainSubstring(`"sizeGb":60`)) + Expect(js).To(ContainSubstring(`"ioThread":true`)) + }) + It("includes iothread when explicitly false", func() { + tFalse := false + ds := DiskSpec{ + Disk: "scsi8", + SizeGB: 70, + IOThread: &tFalse, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi8"`)) + Expect(js).To(ContainSubstring(`"sizeGb":70`)) + Expect(js).To(ContainSubstring(`"ioThread":false`)) // non-nil -> present + }) + + It("omits iothread when nil", func() { + ds := DiskSpec{ + Disk: "scsi9", + SizeGB: 80, + IOThread: nil, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi9"`)) + Expect(js).To(ContainSubstring(`"sizeGb":80`)) + Expect(js).NotTo(ContainSubstring(`"ioThread"`)) + }) + }) + + Context("AdditionalVolumes iothread - DeepCopy", func() { + It("preserves per-volume iothread and performs a deep copy", func() { + tTrue := true + src := &Storage{ + AdditionalVolumes: []DiskSpec{ + {Disk: "scsi10", SizeGB: 90, IOThread: &tTrue}, + }, + } + dst := src.DeepCopy() + Expect(dst).NotTo(BeNil()) + Expect(dst.AdditionalVolumes).To(HaveLen(1)) + got := dst.AdditionalVolumes[0] + Expect(got.Disk).To(Equal("scsi10")) + Expect(got.SizeGB).To(Equal(int32(90))) + Expect(got.IOThread).NotTo(BeNil()) + Expect(*got.IOThread).To(BeTrue()) + *src.AdditionalVolumes[0].IOThread = false + Expect(dst.AdditionalVolumes[0].IOThread).NotTo(BeNil()) + Expect(*dst.AdditionalVolumes[0].IOThread).To(BeTrue()) + }) + }) + Context("AdditionalVolumes ssd - JSON marshalling", func() { + It("includes ssd when explicitly true", func() { + sTrue := true + ds := DiskSpec{ + Disk: "scsi11", + SizeGB: 60, + SSD: &sTrue, + } + + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + + Expect(js).To(ContainSubstring(`"disk":"scsi11"`)) + Expect(js).To(ContainSubstring(`"sizeGb":60`)) + Expect(js).To(ContainSubstring(`"ssd":true`)) + }) + It("includes ssd when explicitly false", func() { + sFalse := false + ds := DiskSpec{ + Disk: "scsi12", + SizeGB: 70, + SSD: &sFalse, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi12"`)) + Expect(js).To(ContainSubstring(`"sizeGb":70`)) + Expect(js).To(ContainSubstring(`"ssd":false`)) // non-nil -> present + }) + It("omits ssd when nil", func() { + ds := DiskSpec{ + Disk: "scsi13", + SizeGB: 80, + SSD: nil, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi13"`)) + Expect(js).To(ContainSubstring(`"sizeGb":80`)) + Expect(js).NotTo(ContainSubstring(`"ssd"`)) + }) + }) + + Context("AdditionalVolumes ssd - DeepCopy", func() { + It("preserves per-volume ssd and performs a deep copy", func() { + sTrue := true + src := &Storage{ + AdditionalVolumes: []DiskSpec{ + {Disk: "scsi14", SizeGB: 90, SSD: &sTrue}, + }, + } + dst := src.DeepCopy() + Expect(dst).NotTo(BeNil()) + Expect(dst.AdditionalVolumes).To(HaveLen(1)) + got := dst.AdditionalVolumes[0] + Expect(got.Disk).To(Equal("scsi14")) + Expect(got.SizeGB).To(Equal(int32(90))) + Expect(got.SSD).NotTo(BeNil()) + Expect(*got.SSD).To(BeTrue()) + // Mutate source; destination should remain unchanged + *src.AdditionalVolumes[0].SSD = false + Expect(dst.AdditionalVolumes[0].SSD).NotTo(BeNil()) + Expect(*dst.AdditionalVolumes[0].SSD).To(BeTrue()) + }) + }) + Context("Network", func() { It("Should set default bridge", func() { dm := defaultMachine() diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index c1951100..ca8a7fbc 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -29,16 +29,41 @@ func (in *AdditionalNetworkDevice) DeepCopy() *AdditionalNetworkDevice { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DiskSize) DeepCopyInto(out *DiskSize) { +func (in *DiskSpec) DeepCopyInto(out *DiskSpec) { *out = *in + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(TargetFileStorageFormat) + **out = **in + } + if in.Discard != nil { + in, out := &in.Discard, &out.Discard + *out = new(bool) + **out = **in + } + if in.IOThread != nil { + in, out := &in.IOThread, &out.IOThread + *out = new(bool) + **out = **in + } + if in.SSD != nil { + in, out := &in.SSD, &out.SSD + *out = new(bool) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSize. -func (in *DiskSize) DeepCopy() *DiskSize { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSpec. +func (in *DiskSpec) DeepCopy() *DiskSpec { if in == nil { return nil } - out := new(DiskSize) + out := new(DiskSpec) in.DeepCopyInto(out) return out } @@ -933,8 +958,15 @@ func (in *Storage) DeepCopyInto(out *Storage) { *out = *in if in.BootVolume != nil { in, out := &in.BootVolume, &out.BootVolume - *out = new(DiskSize) - **out = **in + *out = new(DiskSpec) + (*in).DeepCopyInto(*out) + } + if in.AdditionalVolumes != nil { + in, out := &in.AdditionalVolumes, &out.AdditionalVolumes + *out = make([]DiskSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index 5c6ffdda..096e0dd0 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -105,27 +105,119 @@ spec: Disks contains a set of disk configuration options, which will be applied before the first startup. properties: + additionalVolumes: + description: |- + AdditionalVolumes defines additional volumes to be added to the virtual machine. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (additional + items with boot:false). Use for v1alpha1/v1alpha2; plan to migrate to `volumes[]` when available. + items: + description: DiskSpec is contains values for the disk + device and size. + properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean + disk: + description: |- + Disk is the name of the disk device, that should be resized. + Example values are: ide[0-3], scsi[0-30], sata[0-5]. + type: string + format: + description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk + type: string + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller + type: boolean + sizeGb: + description: |- + Size defines the size in gigabyte. + As Proxmox does not support shrinking, the size + must be bigger than the already configured size in the + template. + format: int32 + minimum: 5 + type: integer + ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. + type: string + required: + - disk + - sizeGb + type: object + type: array bootVolume: description: |- BootVolume defines the storage size for the boot volume. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (one item + with boot:true). Use BootVolume for v1alpha1/v1alpha2, but plan to migrate to `volumes[]` when + available. This field is optional, and should only be set if you want to change the size of the boot volume. properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean disk: description: |- Disk is the name of the disk device, that should be resized. Example values are: ide[0-3], scsi[0-30], sata[0-5]. type: string + format: + description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk + type: string + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller + type: boolean sizeGb: description: |- Size defines the size in gigabyte. - As Proxmox does not support shrinking, the size must be bigger than the already configured size in the template. format: int32 minimum: 5 type: integer + ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. + type: string required: - disk - sizeGb @@ -135,12 +227,17 @@ spec: rule: self == oldSelf type: object format: + allOf: + - enum: + - raw + - qcow2 + - vmdk + - enum: + - raw + - qcow2 + - vmdk description: Format for file storage. Only valid for full clone. - enum: - - raw - - qcow2 - - vmdk type: string full: default: true diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 26e26e1e..b5e5daa3 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -129,27 +129,119 @@ spec: Disks contains a set of disk configuration options, which will be applied before the first startup. properties: + additionalVolumes: + description: |- + AdditionalVolumes defines additional volumes to be added to the virtual machine. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (additional + items with boot:false). Use for v1alpha1/v1alpha2; plan to migrate to `volumes[]` when available. + items: + description: DiskSpec is contains values for + the disk device and size. + properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean + disk: + description: |- + Disk is the name of the disk device, that should be resized. + Example values are: ide[0-3], scsi[0-30], sata[0-5]. + type: string + format: + description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk + type: string + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller + type: boolean + sizeGb: + description: |- + Size defines the size in gigabyte. + As Proxmox does not support shrinking, the size + must be bigger than the already configured size in the + template. + format: int32 + minimum: 5 + type: integer + ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. + type: string + required: + - disk + - sizeGb + type: object + type: array bootVolume: description: |- BootVolume defines the storage size for the boot volume. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (one item + with boot:true). Use BootVolume for v1alpha1/v1alpha2, but plan to migrate to `volumes[]` when + available. This field is optional, and should only be set if you want to change the size of the boot volume. properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean disk: description: |- Disk is the name of the disk device, that should be resized. Example values are: ide[0-3], scsi[0-30], sata[0-5]. type: string + format: + description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk + type: string + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller + type: boolean sizeGb: description: |- Size defines the size in gigabyte. - As Proxmox does not support shrinking, the size must be bigger than the already configured size in the template. format: int32 minimum: 5 type: integer + ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. + type: string required: - disk - sizeGb @@ -159,12 +251,17 @@ spec: rule: self == oldSelf type: object format: + allOf: + - enum: + - raw + - qcow2 + - vmdk + - enum: + - raw + - qcow2 + - vmdk description: Format for file storage. Only valid for full clone. - enum: - - raw - - qcow2 - - vmdk type: string full: default: true diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index 45cff41b..aa25c7d8 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -97,27 +97,119 @@ spec: Disks contains a set of disk configuration options, which will be applied before the first startup. properties: + additionalVolumes: + description: |- + AdditionalVolumes defines additional volumes to be added to the virtual machine. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (additional + items with boot:false). Use for v1alpha1/v1alpha2; plan to migrate to `volumes[]` when available. + items: + description: DiskSpec is contains values for the disk device + and size. + properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean + disk: + description: |- + Disk is the name of the disk device, that should be resized. + Example values are: ide[0-3], scsi[0-30], sata[0-5]. + type: string + format: + description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk + type: string + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller + type: boolean + sizeGb: + description: |- + Size defines the size in gigabyte. + As Proxmox does not support shrinking, the size + must be bigger than the already configured size in the + template. + format: int32 + minimum: 5 + type: integer + ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. + type: string + required: + - disk + - sizeGb + type: object + type: array bootVolume: description: |- BootVolume defines the storage size for the boot volume. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (one item + with boot:true). Use BootVolume for v1alpha1/v1alpha2, but plan to migrate to `volumes[]` when + available. This field is optional, and should only be set if you want to change the size of the boot volume. properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean disk: description: |- Disk is the name of the disk device, that should be resized. Example values are: ide[0-3], scsi[0-30], sata[0-5]. type: string + format: + description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk + type: string + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller + type: boolean sizeGb: description: |- Size defines the size in gigabyte. - As Proxmox does not support shrinking, the size must be bigger than the already configured size in the template. format: int32 minimum: 5 type: integer + ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. + type: string required: - disk - sizeGb @@ -127,11 +219,16 @@ spec: rule: self == oldSelf type: object format: + allOf: + - enum: + - raw + - qcow2 + - vmdk + - enum: + - raw + - qcow2 + - vmdk description: Format for file storage. Only valid for full clone. - enum: - - raw - - qcow2 - - vmdk type: string full: default: true diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index c6e1a2ec..8925b67a 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -109,27 +109,119 @@ spec: Disks contains a set of disk configuration options, which will be applied before the first startup. properties: + additionalVolumes: + description: |- + AdditionalVolumes defines additional volumes to be added to the virtual machine. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (additional + items with boot:false). Use for v1alpha1/v1alpha2; plan to migrate to `volumes[]` when available. + items: + description: DiskSpec is contains values for the disk + device and size. + properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean + disk: + description: |- + Disk is the name of the disk device, that should be resized. + Example values are: ide[0-3], scsi[0-30], sata[0-5]. + type: string + format: + description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk + type: string + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller + type: boolean + sizeGb: + description: |- + Size defines the size in gigabyte. + As Proxmox does not support shrinking, the size + must be bigger than the already configured size in the + template. + format: int32 + minimum: 5 + type: integer + ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. + type: string + required: + - disk + - sizeGb + type: object + type: array bootVolume: description: |- BootVolume defines the storage size for the boot volume. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (one item + with boot:true). Use BootVolume for v1alpha1/v1alpha2, but plan to migrate to `volumes[]` when + available. This field is optional, and should only be set if you want to change the size of the boot volume. properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean disk: description: |- Disk is the name of the disk device, that should be resized. Example values are: ide[0-3], scsi[0-30], sata[0-5]. type: string + format: + description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk + type: string + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller + type: boolean sizeGb: description: |- Size defines the size in gigabyte. - As Proxmox does not support shrinking, the size must be bigger than the already configured size in the template. format: int32 minimum: 5 type: integer + ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. + type: string required: - disk - sizeGb @@ -139,12 +231,17 @@ spec: rule: self == oldSelf type: object format: + allOf: + - enum: + - raw + - qcow2 + - vmdk + - enum: + - raw + - qcow2 + - vmdk description: Format for file storage. Only valid for full clone. - enum: - - raw - - qcow2 - - vmdk type: string full: default: true diff --git a/docs/advanced-setups.md b/docs/advanced-setups.md index 38c8f24f..218bcf45 100644 --- a/docs/advanced-setups.md +++ b/docs/advanced-setups.md @@ -308,6 +308,71 @@ spec: You can set either `ipv4PoolRef` or `ipv6PoolRef` or you can also set them both for dual-stack. It's up for you also to manage the IP Pool, you can choose a `GlobalInClusterIPPool` or an `InClusterIPPool`. +## Additional Volumes +By default, only a boot volume is created in machines. If additional disks are required for data storage, they can be +specified in the ProxmoxMachineTemplates. + +```yaml +kind: ProxmoxMachineTemplate +spec: + template: + spec: + storage: local-lvm # Optional: a default storage to use when a volume doesn't set .storage + disks: + additionalVolumes: + - disk: scsi1 + sizeGb: 200 + - disk: scsi2 # target slot (e.g. scsi1, sata1, virtio1, ide2) + sizeGb: 80 # capacity in gigabytes + # Optional flags: + storage: my-nfs # Optional per-volume storage override. Uses .spec.template.spec.storage if omitted + format: qcow2 # Only specify if using file-backed storage. If omitted, default for disk is used. + discard: true + ioThread: true + ssd: true +``` +In the same way, additionalVolumes can also be specified in ProxmoxClusters, ProxmoxClusterTemplates, +and ProxmoxMachines. Flags: format, discard, ioThread, and ssd are supported by this provider. +See Proxmox [docs](https://pve.proxmox.com/pve-docs/qm.1.html#qm_hard_disk) for details about these flags. + +Alternatively if using cluster-class, define additionalVolumes in your cluster: +```yaml +kind: Cluster +spec: + topology: + class: proxmox-clusterclass-cilium-v0.1.0 + variables: + - name: workerAdditionalVolumes + value: + - { disk: scsi1, sizeGb: 80, storage: my-lvm } + - { disk: ide1, sizeGb: 80, storage: my-zfs } + - name: controlPlaneAdditionalVolumes + value: + - { disk: virtio1, sizeGb: 80, storage: my-zfs } + - name: loadBalancerAdditionalVolumes + value: + - { disk: sata1, sizeGb: 80, storage: my-nfs, format: qcow2 } +``` +To use the same storage for all machines of a given type, can specify a `Storage` variable and then omit `storage` +from the `workerAdditionalVolumes`. Eg for workers: +```yaml +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/proxmox-cluster-cni: cilium + name: capmox-cluster +spec: + topology: + class: proxmox-clusterclass-cilium-v0.1.0 + variables: + - name: workerStorage + value: my-lvm + - name: workerAdditionalVolumes + value: + - { disk: scsi1, sizeGb: 80 } + - { disk: scsi2, sizeGb: 80 } +``` + ## Notes * Clusters with IPV6 only is supported. diff --git a/go.sum b/go.sum index b87e8d5d..de00b833 100644 --- a/go.sum +++ b/go.sum @@ -157,6 +157,7 @@ github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= diff --git a/internal/service/vmservice/pending_guard.go b/internal/service/vmservice/pending_guard.go new file mode 100644 index 00000000..0d01e3cf --- /dev/null +++ b/internal/service/vmservice/pending_guard.go @@ -0,0 +1,84 @@ +// Unlike VMs, which have a unique ID for tracking, PVE provides no server-side ID for configuration items like disks. +// When configuration items are specified, duplicate requests for the same slot (eg "scsi1") occur, and PVE creates +// spurious duplicates (eg "Unused Disks"). This "pending guard" mechanism avoids. We queue the item (eg disk) to be +// added, mark it as pending, and skip further adds for that slot until the TTL expires. While implemented for +// additionalVolumes, this is generalsed to other VM configuration items. "slot" is applicable to NICs (net#), +// USB passthrough (usb#), serial ports (serial#), PCI devices (hostpci#), and ISO/CDROMs. + +package vmservice + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope" +) + +var ( + pendingAdds sync.Map + pendingTTL = 15 * time.Second + pendingGuardEnabled = false // defaults to off +) + +// Toggle the guard and clear any leftover keys: +func EnablePendingGuard(enable bool) { + pendingGuardEnabled = enable + pendingAdds.Range(func(key, _ any) bool { + pendingAdds.Delete(key) + return true + }) +} + +// Build a key. Fall-back to machineScope & slot if identifying fields are missing: +func buildPendingKey(machineScope *scope.MachineScope, slotName string) string { + namespace, machineName, machineUID := "", "", "" + if machineScope != nil && machineScope.ProxmoxMachine != nil { + namespace = machineScope.ProxmoxMachine.Namespace + machineName = machineScope.ProxmoxMachine.Name + machineUID = string(machineScope.ProxmoxMachine.UID) + } + slot := strings.ToLower(strings.TrimSpace(slotName)) + if namespace == "" && machineName == "" && machineUID == "" { + return fmt.Sprintf("addr=%p|slot=%s", machineScope, slot) + } + return fmt.Sprintf( + "ns=%s|name=%s|uid=%s|slot=%s", + namespace, machineName, machineUID, slot, + ) +} + +func isPending(machineScope *scope.MachineScope, slot string) bool { + if !pendingGuardEnabled { + return false + } + key := buildPendingKey(machineScope, slot) + if raw, found := pendingAdds.Load(key); found { + if deadline, ok := raw.(time.Time); ok { + if time.Now().Before(deadline) { + return true // don't queue another + } + pendingAdds.Delete(key) // delete expired + } else { + pendingAdds.Delete(key) + } + } + return false +} + +func markPending(machineScope *scope.MachineScope, slot string) { + if !pendingGuardEnabled { + return + } + key := buildPendingKey(machineScope, slot) + pendingAdds.Store(key, time.Now().Add(pendingTTL)) +} + +func clearPending(machineScope *scope.MachineScope, slot string) { + if !pendingGuardEnabled { + return + } + key := buildPendingKey(machineScope, slot) + pendingAdds.Delete(key) +} diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index 5b2ef743..df66e27b 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -19,6 +19,8 @@ package vmservice import ( "context" + "fmt" + "reflect" "slices" "strings" @@ -53,6 +55,10 @@ const ( // ErrNoVMIDInRangeFree is returned if no free VMID is found in the specified vmIDRange. var ErrNoVMIDInRangeFree = errors.New("No free vmid found in vmIDRange") +func init() { + EnablePendingGuard(true) // prevents race-condition duplicates, eg additionalVolumes +} + // ReconcileVM makes sure that the VM is in the desired state by: // 1. Creating the VM if it does not exist, then... // 2. Updating the VM with the bootstrap data, such as the cloud-init meta and user data, before... @@ -115,6 +121,40 @@ func ReconcileVM(ctx context.Context, scope *scope.MachineScope) (infrav1alpha1. return vm, nil } +// Report whether a VM disk slot (eg "scsi1") is already set in the VM's config: +func diskSlotOccupied(cfg any, slot string) bool { + if cfg == nil || slot == "" { + return false + } + slot = strings.TrimSpace(strings.ToLower(slot)) + var fieldName string + switch { + case strings.HasPrefix(slot, "scsi"): + fieldName = "SCSI" + slot[4:] + case strings.HasPrefix(slot, "sata"): + fieldName = "SATA" + slot[4:] + case strings.HasPrefix(slot, "ide"): + fieldName = "IDE" + slot[3:] + case strings.HasPrefix(slot, "virtio"): + fieldName = "VirtIO" + slot[6:] + default: + // Unknown bus: assume occupied to avoid creating junk: + return true + } + cfgValue := reflect.ValueOf(cfg) + if cfgValue.Kind() == reflect.Pointer { + cfgValue = cfgValue.Elem() + } + if !cfgValue.IsValid() { + return false + } + fieldValue := cfgValue.FieldByName(fieldName) + if !fieldValue.IsValid() || fieldValue.Kind() != reflect.String { + return false + } + return strings.TrimSpace(fieldValue.String()) != "" +} + func checkCloudInitStatus(ctx context.Context, machineScope *scope.MachineScope) (requeue bool, err error) { if !machineScope.VirtualMachine.IsRunning() { // skip if the vm is not running. @@ -287,6 +327,10 @@ func reconcileVirtualMachineConfig(ctx context.Context, machineScope *scope.Mach } } + if err := reconcileAdditionalVolumes(machineScope, vmConfig, &vmOptions); err != nil { + return false, err + } + if len(vmOptions) == 0 { return false, nil } @@ -497,3 +541,115 @@ var selectNextNode = scheduler.ScheduleVM func unmountCloudInitISO(ctx context.Context, machineScope *scope.MachineScope) error { return machineScope.InfraCluster.ProxmoxClient.UnmountCloudInitISO(ctx, machineScope.VirtualMachine, inject.CloudInitISODevice) } + +func reconcileAdditionalVolumes(machineScope *scope.MachineScope, vmConfig any, vmOptions *[]proxmox.VirtualMachineOption) error { + disksSpec := machineScope.ProxmoxMachine.Spec.Disks + if disksSpec == nil || len(disksSpec.AdditionalVolumes) == 0 { + return nil + } + findMatchingUnusedVolume := func(cfg any, storageName string) string { + if cfg == nil || storageName == "" { + return "" + } + cfgVal := reflect.ValueOf(cfg) + if cfgVal.Kind() == reflect.Pointer { + cfgVal = cfgVal.Elem() + } + if !cfgVal.IsValid() { + return "" + } + cfgType := cfgVal.Type() + for i := 0; i < cfgType.NumField(); i++ { + fieldMetadata := cfgType.Field(i) + if !strings.HasPrefix(fieldMetadata.Name, "Unused") { + continue + } + fieldValue := cfgVal.Field(i) + if !fieldValue.IsValid() || fieldValue.Kind() != reflect.String { + continue + } + volID := strings.TrimSpace(fieldValue.String()) // e.g. "vg_xxx:vm-103-disk-2" + if volID != "" && strings.HasPrefix(volID, storageName+":") { + return volID + } + } + return "" + } + pendingInReconcile := map[string]struct{}{} + for _, vol := range disksSpec.AdditionalVolumes { + slotName := strings.ToLower(strings.TrimSpace(vol.Disk)) + alreadySet := diskSlotOccupied(vmConfig, slotName) + machineScope.V(4).Info("additionalVolume: slot state", + "machine", machineScope.Name(), "slot", slotName, "occupied", alreadySet) + if alreadySet { + if pendingGuardEnabled { + clearPending(machineScope, slotName) + } + continue + } + if pendingGuardEnabled && isPending(machineScope, slotName) { + machineScope.V(4).Info("additionalVolume: skip, pending add in effect", + "machine", machineScope.Name(), "slot", slotName) + continue + } + if _, seen := pendingInReconcile[slotName]; seen { + machineScope.V(4).Info("additionalVolume: skip, add already queued in this reconcile", + "machine", machineScope.Name(), "slot", slotName) + continue + } + // Resolve storage (per-volume overrides machine-level) + var storageName string + if vol.Storage != nil && *vol.Storage != "" { + storageName = *vol.Storage + } else if machineScope.ProxmoxMachine.Spec.Storage != nil && *machineScope.ProxmoxMachine.Spec.Storage != "" { + storageName = *machineScope.ProxmoxMachine.Spec.Storage + } else { + return errors.New("additionalVolumes requires a storage to be set (either per-volume .storage or spec.storage)") + } + machineScope.V(4).Info("additionalVolume: resolved storage", + "machine", machineScope.Name(), "slot", slotName, "storage", storageName) + volumeValue := findMatchingUnusedVolume(vmConfig, storageName) + if volumeValue != "" { + machineScope.V(4).Info("additionalVolume: reattaching existing unused volume to avoid spurious extra disks due to reconcile race-condition (dedup)", + "machine", machineScope.Name(), "slot", slotName, "volumeID", volumeValue) + } else { + if vol.Format != nil && *vol.Format != "" { + volumeValue = fmt.Sprintf("%s:0,size=%dG,format=%s", storageName, vol.SizeGB, string(*vol.Format)) + machineScope.Info("additionalVolume: creating file-backed volume (with format notation)", + "machine", machineScope.Name(), "slot", slotName, "value", volumeValue) + } else { + volumeValue = fmt.Sprintf("%s:%d", storageName, vol.SizeGB) + machineScope.Info("additionalVolume: creating block-backed volume (without format notation)", + "machine", machineScope.Name(), "slot", slotName, "value", volumeValue) + } + } + // Add flags + if vol.Discard != nil && *vol.Discard { + volumeValue = fmt.Sprintf("%s,discard=on", volumeValue) + machineScope.V(4).Info("additionalVolume: appended flag", + "machine", machineScope.Name(), "slot", slotName, "flag", "discard=on") + } + if vol.IOThread != nil && *vol.IOThread { + volumeValue = fmt.Sprintf("%s,iothread=1", volumeValue) + machineScope.V(4).Info("additionalVolume: appended flag", + "machine", machineScope.Name(), "slot", slotName, "flag", "iothread=1") + } + if vol.SSD != nil && *vol.SSD { + volumeValue = fmt.Sprintf("%s,ssd=1", volumeValue) + machineScope.V(4).Info("additionalVolume: appended flag", + "machine", machineScope.Name(), "slot", slotName, "flag", "ssd=1") + } + *vmOptions = append(*vmOptions, proxmox.VirtualMachineOption{ + Name: vol.Disk, + Value: volumeValue, + }) + machineScope.V(4).Info("additionalVolume: queued vm option", + "machine", machineScope.Name(), "slot", slotName, "name", vol.Disk, "value", volumeValue) + pendingInReconcile[slotName] = struct{}{} + if pendingGuardEnabled { + markPending(machineScope, slotName) + } + } + + return nil +} diff --git a/internal/service/vmservice/vm_test.go b/internal/service/vmservice/vm_test.go index cbec7958..3eaa900a 100644 --- a/internal/service/vmservice/vm_test.go +++ b/internal/service/vmservice/vm_test.go @@ -35,6 +35,10 @@ import ( "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope" ) +func init() { + EnablePendingGuard(false) +} + func TestReconcileVM_EverythingReady(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) vm := newRunningVM() @@ -116,7 +120,7 @@ func TestReconcileVM_InitCheckDisabled(t *testing.T) { func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") - machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) + machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetFileStorageFormatRaw) machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") @@ -157,7 +161,7 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector(t *testing.T }, } machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") - machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) + machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetFileStorageFormatRaw) machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") @@ -202,7 +206,7 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_VMTemplateNo }, } machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") - machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) + machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetFileStorageFormatRaw) machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") @@ -496,7 +500,7 @@ func TestReconcileVirtualMachineConfigTags(t *testing.T) { func TestReconcileDisks_RunningVM(t *testing.T) { machineScope, _, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ - BootVolume: &infrav1alpha1.DiskSize{Disk: "ide0", SizeGB: 100}, + BootVolume: &infrav1alpha1.DiskSpec{Disk: "ide0", SizeGB: 100}, } machineScope.SetVirtualMachine(newRunningVM()) @@ -506,7 +510,7 @@ func TestReconcileDisks_RunningVM(t *testing.T) { func TestReconcileDisks_ResizeDisk(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ - BootVolume: &infrav1alpha1.DiskSize{Disk: "ide0", SizeGB: 100}, + BootVolume: &infrav1alpha1.DiskSpec{Disk: "ide0", SizeGB: 100}, } vm := newStoppedVM() machineScope.SetVirtualMachine(vm) @@ -517,6 +521,575 @@ func TestReconcileDisks_ResizeDisk(t *testing.T) { require.NoError(t, reconcileDisks(context.Background(), machineScope)) } +func TestReconcileVirtualMachineConfig_AdditionalVolumes(t *testing.T) { + ctx := context.Background() + + // 1) Block-backed syntax when no formats are specified + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + vm.VirtualMachineConfig.Sockets = 2 + vm.VirtualMachineConfig.Cores = 1 + machineScope.SetVirtualMachine(vm) + + // Machine-level format present, but NO per-volume format -> should still use BLOCK syntax + storage := "nfs-templates" + machineScope.ProxmoxMachine.Spec.Storage = &storage + + rawFmt := infrav1alpha1.TargetFileStorageFormat("raw") + machineScope.ProxmoxMachine.Spec.Format = &rawFmt // ignored for additional volumes + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 50}, // no per-volume format + }, + } + + // Expect ":" (block-backed syntax - no 'G', no format) + expectedOptions := []interface{}{ + proxmox.VirtualMachineOption{ + Name: "scsi1", + Value: "nfs-templates:50", + }, + } + proxmoxClient. + EXPECT(). + ConfigureVM(ctx, vm, expectedOptions...). + Return(newTask(), nil). + Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue, "ConfigureVM should queue follow-up while task completes") + } + + // 2) File-backed syntax with per-volume format (per-volume overrides machine-level) + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + vm.VirtualMachineConfig.Sockets = 2 + vm.VirtualMachineConfig.Cores = 1 + machineScope.SetVirtualMachine(vm) + + storage := "nfs-store" // name only used in value rendering; presence of format selects file-backed syntax + machineScope.ProxmoxMachine.Spec.Storage = &storage + + perVolFmt := infrav1alpha1.TargetFileStorageFormat("qcow2") + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi2", SizeGB: 80, Format: &perVolFmt}, + }, + } + + // Expect ":0,size=G,format=" (file-backed) + expectedOptions := []interface{}{ + proxmox.VirtualMachineOption{ + Name: "scsi2", + Value: "nfs-store:0,size=80G,format=qcow2", + }, + } + proxmoxClient. + EXPECT(). + ConfigureVM(ctx, vm, expectedOptions...). + Return(newTask(), nil). + Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } + + // 3) File-backed syntax with machine-level format fallback (no per-volume format) + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + vm.VirtualMachineConfig.Sockets = 2 + vm.VirtualMachineConfig.Cores = 1 + machineScope.SetVirtualMachine(vm) + + storage := "nfs-store" + machineScope.ProxmoxMachine.Spec.Storage = &storage + + machineFmt := infrav1alpha1.TargetFileStorageFormat("raw") + machineScope.ProxmoxMachine.Spec.Format = &machineFmt + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi3", SizeGB: 200}, // no per-volume format + }, + } + + expectedOptions := []interface{}{ + proxmox.VirtualMachineOption{ + Name: "scsi3", + Value: "nfs-store:200", + }, + } + proxmoxClient. + EXPECT(). + ConfigureVM(ctx, vm, expectedOptions...). + Return(newTask(), nil). + Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_Block_NoFormat(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + // Machine-level storage is block-backed (e.g., LVM-thin); no format anywhere. + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 90}, // no per-volume format/storage + }, + } + + // Expect block syntax ":" + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi1", Value: "local-lvm:90"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_File_PerVolumeFormatAndStorage(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + // Per-volume specifies file-backed storage and format. + nfs := "nfs-store" + qcow2 := infrav1alpha1.TargetFileStorageFormat("qcow2") + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 80, Storage: &nfs, Format: &qcow2}, + }, + } + + // Expect file syntax ":0,size=NG,format=fmt" + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi1", Value: "nfs-store:0,size=80G,format=qcow2"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_File_MachineFormatUsedWhenPerVolumeMissing(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + // Machine-level format present -> file syntax. + nfs := "nfs-templates" + format := infrav1alpha1.TargetFileStorageFormat("raw") + machineScope.ProxmoxMachine.Spec.Format = &format + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi2", SizeGB: 50, Storage: &nfs}, // no per-volume format + }, + } + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi2", Value: "nfs-templates:50"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_PerVolumeStorageOverridesMachine(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + // Machine-level storage is block; per-volume chooses file store + format. + machineStorage := "local-lvm" + perVolStore := "nfs-a" + perVolFmt := infrav1alpha1.TargetFileStorageFormat("qcow2") + machineScope.ProxmoxMachine.Spec.Storage = &machineStorage + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 30, Storage: &perVolStore, Format: &perVolFmt}, + }, + } + + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi1", Value: "nfs-a:0,size=30G,format=qcow2"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_ErrorWhenNoStorageAnywhere(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, _, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + // No machine storage, no per-volume storage -> error + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 10}, // no Storage, no Format + }, + } + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.Error(t, err) + require.False(t, requeue) + require.Contains(t, err.Error(), "requires a storage to be set") +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_IdempotentWhenSlotOccupied(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, _, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + // Pretend scsi1 is already populated in VM config — reconcile should NOT call ConfigureVM. + vm.VirtualMachineConfig.SCSI1 = "local-lvm:20" + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 40}, + }, + } + + // No EXPECT() on proxmoxClient.ConfigureVM — any call would be an unexpected invocation and fail the test. + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.False(t, requeue, "reconcile should be a no-op when slot already occupied") +} +func TestReconcileVirtualMachineConfig_AdditionalVolumes_Block_DiscardTrue(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + dTrue := true + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 90, Discard: &dTrue}, + }, + } + + // Expect block syntax with ",discard=on" appended. + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi1", Value: "local-lvm:90,discard=on"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_File_PerVolumeFormat_DiscardTrue(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + nfs := "nfs-store" + qcow2 := infrav1alpha1.TargetFileStorageFormat("qcow2") + dTrue := true + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi2", SizeGB: 80, Storage: &nfs, Format: &qcow2, Discard: &dTrue}, + }, + } + + // Expect file syntax with ",discard=on" appended. + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi2", Value: "nfs-store:0,size=80G,format=qcow2,discard=on"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_DiscardOmittedWhenNilOrFalse(t *testing.T) { + t.Parallel() + ctx := context.Background() + + // Case A: discard=nil -> omitted + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + // discard not set (nil) + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi3", SizeGB: 20}, + }, + } + + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi3", Value: "local-lvm:20"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } + + // Case B: discard=false -> omitted (we only emit when explicitly true) + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + dFalse := false + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi4", SizeGB: 25, Discard: &dFalse}, + }, + } + + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi4", Value: "local-lvm:25"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_Block_IothreadTrue(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + iTrue := true + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 90, IOThread: &iTrue}, + }, + } + + // Expect block syntax with ",ioThread=1" appended. + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi1", Value: "local-lvm:90,iothread=1"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_File_PerVolumeFormat_IothreadTrue(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + nfs := "nfs-store" + qcow2 := infrav1alpha1.TargetFileStorageFormat("qcow2") + iTrue := true + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi2", SizeGB: 80, Storage: &nfs, Format: &qcow2, IOThread: &iTrue}, + }, + } + + // Expect file syntax with ",iothread=1" appended. + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi2", Value: "nfs-store:0,size=80G,format=qcow2,iothread=1"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_Iothread_OmittedWhenNilOrFalse(t *testing.T) { + t.Parallel() + ctx := context.Background() + + // Case A: ioThread=nil -> omitted + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi3", SizeGB: 20}, + }, + } + + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi3", Value: "local-lvm:20"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } + + // Case B: ioThread=false -> omitted (only emit when explicitly true) + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + iFalse := false + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi4", SizeGB: 25, IOThread: &iFalse}, + }, + } + + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi4", Value: "local-lvm:25"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } +} +func TestReconcileVirtualMachineConfig_AdditionalVolumes_Block_SSDTrue(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + sTrue := true + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 90, SSD: &sTrue}, + }, + } + + // Expect block syntax with ",ssd=1" appended. + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi1", Value: "local-lvm:90,ssd=1"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_File_PerVolumeFormat_SSDTrue(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + nfs := "nfs-store" + qcow2 := infrav1alpha1.TargetFileStorageFormat("qcow2") + sTrue := true + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi2", SizeGB: 80, Storage: &nfs, Format: &qcow2, SSD: &sTrue}, + }, + } + + // Expect file syntax with ",ssd=1" appended. + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi2", Value: "nfs-store:0,size=80G,format=qcow2,ssd=1"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_SSD_OmittedWhenNilOrFalse(t *testing.T) { + t.Parallel() + ctx := context.Background() + + // Case A: ssd=nil -> omitted + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi3", SizeGB: 20}, + }, + } + + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi3", Value: "local-lvm:20"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } + + // Case B: ssd=false -> omitted (only emit when explicitly true) + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + sFalse := false + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi4", SizeGB: 25, SSD: &sFalse}, + }, + } + + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi4", Value: "local-lvm:25"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } +} + func TestReconcileMachineAddresses_IPV4(t *testing.T) { machineScope, _, _ := setupReconcilerTest(t) vm := newRunningVM() diff --git a/internal/webhook/proxmoxmachine_webhook_test.go b/internal/webhook/proxmoxmachine_webhook_test.go index c39bf0fa..b1ec8f6c 100644 --- a/internal/webhook/proxmoxmachine_webhook_test.go +++ b/internal/webhook/proxmoxmachine_webhook_test.go @@ -151,7 +151,7 @@ func validProxmoxMachine(name string) infrav1.ProxmoxMachine { NumCores: 1, MemoryMiB: 1024, Disks: &infrav1.Storage{ - BootVolume: &infrav1.DiskSize{ + BootVolume: &infrav1.DiskSpec{ Disk: "scsi[0]", SizeGB: 10, }, diff --git a/templates/cluster-class-calico.yaml b/templates/cluster-class-calico.yaml index 83e80a70..bcc4c0a7 100644 --- a/templates/cluster-class-calico.yaml +++ b/templates/cluster-class-calico.yaml @@ -89,6 +89,75 @@ spec: namingStrategy: template: "{{ .cluster.name }}-loadbalancer-{{ .random }}" variables: + - name: workerAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for worker nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)" + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: workerStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + - name: controlPlaneAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for control-plane nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)" + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: controlPlaneStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for control plane nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)" + - name: loadbalancerAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for load-balancer nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: loadBalancerStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for load-balancer nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." - name: controlPlaneEndpoint required: true schema: @@ -378,6 +447,125 @@ spec: workerNode: *machineSpec loadBalancer: *machineSpec patches: + - name: worker-additional-volumes + description: "Add additional disks to workers via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .workerAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-worker"] + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .workerAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: worker-storage + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .workerStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-worker"] + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: workerStorage + - name: controlplane-additional-volumes + description: "Add additional disks to control-plane via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .controlPlaneAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .controlPlaneAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: control-plane-storage + description: "Default Proxmox storage for control-plane nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .controlPlaneStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: controlPlaneStorage + - name: lb-additional-volumes + description: "Add additional disks to load-balancers via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .loadBalancerAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: [ "proxmox-loadbalancer" ] + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .loadBalancerAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: load-balancer-storage + description: "Default Proxmox storage for loadbalancer nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .loadBalancerStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-loadbalancer"] + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: workerStorage - name: ProxmoxClusterTemplateGeneral description: "Configure Cluster" definitions: diff --git a/templates/cluster-class-cilium.yaml b/templates/cluster-class-cilium.yaml index 7b696ae7..6c53bcd4 100644 --- a/templates/cluster-class-cilium.yaml +++ b/templates/cluster-class-cilium.yaml @@ -59,7 +59,7 @@ spec: status: "False" timeout: 300s namingStrategy: - template: "{{ .cluster.name }}-worker-{{ .random }}" + template: "{{ .cluster.name }}-worker-{{ .random }}" - class: proxmox-loadbalancer template: bootstrap: @@ -87,8 +87,77 @@ spec: status: "False" timeout: 300s namingStrategy: - template: "{{ .cluster.name }}-loadbalancer-{{ .random }}" + template: "{{ .cluster.name }}-loadbalancer-{{ .random }}" variables: + - name: workerAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for worker nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)" + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: workerStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + - name: controlPlaneAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for control-plane nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)" + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: controlPlaneStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for control plane nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)" + - name: loadbalancerAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for load-balancer nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: loadBalancerStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for load-balancer nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." - name: controlPlaneEndpoint required: true schema: @@ -378,6 +447,125 @@ spec: workerNode: *machineSpec loadBalancer: *machineSpec patches: + - name: worker-additional-volumes # Deprecated in favor of .spec.disks.volumes (coming in a future API) + description: "Add additional disks to workers via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .workerAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-worker"] + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .workerAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: worker-storage + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .workerStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-worker"] + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: workerStorage + - name: controlplane-additional-volumes + description: "Add additional disks to control-plane via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .controlPlaneAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .controlPlaneAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: control-plane-storage + description: "Default Proxmox storage for control-plane nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .controlPlaneStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: controlPlaneStorage + - name: lb-additional-volumes + description: "Add additional disks to load-balancers via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .loadBalancerAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: [ "proxmox-loadbalancer" ] + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .loadBalancerAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: load-balancer-storage + description: "Default Proxmox storage for loadbalancer nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .loadBalancerStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-loadbalancer"] + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: workerStorage - name: ProxmoxClusterTemplateGeneral description: "Configure Cluster" definitions: diff --git a/templates/cluster-class.yaml b/templates/cluster-class.yaml index cecdd33d..98468b95 100644 --- a/templates/cluster-class.yaml +++ b/templates/cluster-class.yaml @@ -59,6 +59,75 @@ spec: namingStrategy: template: "{{ .cluster.name }}-loadbalancer-{{ .random }}" variables: + - name: workerAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for worker nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)" + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: workerStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + - name: controlPlaneAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for control-plane nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)" + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: controlPlaneStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for control plane nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)" + - name: loadbalancerAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for load-balancer nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: loadBalancerStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for load-balancer nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." - name: controlPlaneEndpoint required: true schema: @@ -348,6 +417,125 @@ spec: workerNode: *machineSpec loadBalancer: *machineSpec patches: + - name: worker-additional-volumes + description: "Add additional disks to workers via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .workerAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-worker"] + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .workerAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: worker-storage + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .workerStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-worker"] + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: workerStorage + - name: controlplane-additional-volumes + description: "Add additional disks to control-plane via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .controlPlaneAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .controlPlaneAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: control-plane-storage + description: "Default Proxmox storage for control-plane nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .controlPlaneStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: controlPlaneStorage + - name: lb-additional-volumes + description: "Add additional disks to load-balancers via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .loadBalancerAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: [ "proxmox-loadbalancer" ] + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .loadBalancerAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: load-balancer-storage + description: "Default Proxmox storage for loadbalancer nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." + enabledIf: "{{ if .loadBalancerStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-loadbalancer"] + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: workerStorage - name: ProxmoxClusterTemplateGeneral description: "Configure Cluster" definitions: