From 313e33a5f2401e4939cfee9e6c2518f18f68587c Mon Sep 17 00:00:00 2001 From: Mohamed Chiheb Ben Jemaa Date: Thu, 24 Jul 2025 18:13:53 +0200 Subject: [PATCH 1/5] Introduce AdditionalVolumes in the Machine Spec --- api/v1alpha1/proxmoxmachine_types.go | 14 ++++++---- api/v1alpha1/proxmoxmachine_types_test.go | 2 +- api/v1alpha1/zz_generated.deepcopy.go | 15 ++++++---- ...ture.cluster.x-k8s.io_proxmoxclusters.yaml | 28 +++++++++++++++++++ ...ster.x-k8s.io_proxmoxclustertemplates.yaml | 28 +++++++++++++++++++ ...ture.cluster.x-k8s.io_proxmoxmachines.yaml | 28 +++++++++++++++++++ ...ster.x-k8s.io_proxmoxmachinetemplates.yaml | 28 +++++++++++++++++++ internal/service/vmservice/vm_test.go | 4 +-- .../webhook/proxmoxmachine_webhook_test.go | 2 +- 9 files changed, 134 insertions(+), 15 deletions(-) diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index 0c26c8a5..74e8426f 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -140,15 +140,17 @@ type Storage struct { // to change the size of the boot volume. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" // +optional - BootVolume *DiskSize `json:"bootVolume,omitempty"` + BootVolume *DiskSpec `json:"bootVolume,omitempty"` // TODO Intended to add handling for additional volumes, // which will be added to the node. - // e.g. AdditionalVolumes []DiskSize. + // AdditionalVolumes defines additional volumes to be added to the virtual machine. + // +optional + AdditionalVolumes []DiskSpec `json:"additionalVolumes,omitempty"` } -// DiskSize is contains values for the disk device and size. -type DiskSize struct { +// DiskSpec is contains values for the disk device and size. +type DiskSpec struct { // Disk is the name of the disk device, that should be resized. // Example values are: ide[0-3], scsi[0-30], sata[0-5]. Disk string `json:"disk"` @@ -583,7 +585,7 @@ type ProxmoxMachine struct { Status ProxmoxMachineStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // ProxmoxMachineList contains a list of ProxmoxMachine. type ProxmoxMachineList struct { @@ -632,7 +634,7 @@ func (r *ProxmoxMachine) GetNode() string { } // FormatSize returns the format required for the Proxmox API. -func (d *DiskSize) FormatSize() string { +func (d *DiskSpec) FormatSize() string { return fmt.Sprintf("%dG", d.SizeGB) } diff --git a/api/v1alpha1/proxmoxmachine_types_test.go b/api/v1alpha1/proxmoxmachine_types_test.go index e6f14fb5..a741ad74 100644 --- a/api/v1alpha1/proxmoxmachine_types_test.go +++ b/api/v1alpha1/proxmoxmachine_types_test.go @@ -44,7 +44,7 @@ func defaultMachine() *ProxmoxMachine { }, }, Disks: &Storage{ - BootVolume: &DiskSize{ + BootVolume: &DiskSpec{ Disk: "scsi0", SizeGB: 100, }, diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index c1951100..d67de886 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -29,16 +29,16 @@ func (in *AdditionalNetworkDevice) DeepCopy() *AdditionalNetworkDevice { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DiskSize) DeepCopyInto(out *DiskSize) { +func (in *DiskSpec) DeepCopyInto(out *DiskSpec) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSize. -func (in *DiskSize) DeepCopy() *DiskSize { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSpec. +func (in *DiskSpec) DeepCopy() *DiskSpec { if in == nil { return nil } - out := new(DiskSize) + out := new(DiskSpec) in.DeepCopyInto(out) return out } @@ -933,9 +933,14 @@ func (in *Storage) DeepCopyInto(out *Storage) { *out = *in if in.BootVolume != nil { in, out := &in.BootVolume, &out.BootVolume - *out = new(DiskSize) + *out = new(DiskSpec) **out = **in } + if in.AdditionalVolumes != nil { + in, out := &in.AdditionalVolumes, &out.AdditionalVolumes + *out = make([]DiskSpec, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index 5c6ffdda..d43ac6ce 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -105,6 +105,34 @@ spec: Disks contains a set of disk configuration options, which will be applied before the first startup. properties: + additionalVolumes: + description: |- + which will be added to the node. + AdditionalVolumes defines additional volumes to be added to the virtual machine. + items: + description: DiskSpec is contains values for the disk + device and size. + properties: + disk: + description: |- + Disk is the name of the disk device, that should be resized. + Example values are: ide[0-3], scsi[0-30], sata[0-5]. + type: string + sizeGb: + description: |- + Size defines the size in gigabyte. + + As Proxmox does not support shrinking, the size + must be bigger than the already configured size in the + template. + format: int32 + minimum: 5 + type: integer + required: + - disk + - sizeGb + type: object + type: array bootVolume: description: |- BootVolume defines the storage size for the boot volume. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 26e26e1e..9f328f63 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -129,6 +129,34 @@ spec: Disks contains a set of disk configuration options, which will be applied before the first startup. properties: + additionalVolumes: + description: |- + which will be added to the node. + AdditionalVolumes defines additional volumes to be added to the virtual machine. + items: + description: DiskSpec is contains values for + the disk device and size. + properties: + disk: + description: |- + Disk is the name of the disk device, that should be resized. + Example values are: ide[0-3], scsi[0-30], sata[0-5]. + type: string + sizeGb: + description: |- + Size defines the size in gigabyte. + + As Proxmox does not support shrinking, the size + must be bigger than the already configured size in the + template. + format: int32 + minimum: 5 + type: integer + required: + - disk + - sizeGb + type: object + type: array bootVolume: description: |- BootVolume defines the storage size for the boot volume. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index 45cff41b..ad11b855 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -97,6 +97,34 @@ spec: Disks contains a set of disk configuration options, which will be applied before the first startup. properties: + additionalVolumes: + description: |- + which will be added to the node. + AdditionalVolumes defines additional volumes to be added to the virtual machine. + items: + description: DiskSpec is contains values for the disk device + and size. + properties: + disk: + description: |- + Disk is the name of the disk device, that should be resized. + Example values are: ide[0-3], scsi[0-30], sata[0-5]. + type: string + sizeGb: + description: |- + Size defines the size in gigabyte. + + As Proxmox does not support shrinking, the size + must be bigger than the already configured size in the + template. + format: int32 + minimum: 5 + type: integer + required: + - disk + - sizeGb + type: object + type: array bootVolume: description: |- BootVolume defines the storage size for the boot volume. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index c6e1a2ec..78251456 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -109,6 +109,34 @@ spec: Disks contains a set of disk configuration options, which will be applied before the first startup. properties: + additionalVolumes: + description: |- + which will be added to the node. + AdditionalVolumes defines additional volumes to be added to the virtual machine. + items: + description: DiskSpec is contains values for the disk + device and size. + properties: + disk: + description: |- + Disk is the name of the disk device, that should be resized. + Example values are: ide[0-3], scsi[0-30], sata[0-5]. + type: string + sizeGb: + description: |- + Size defines the size in gigabyte. + + As Proxmox does not support shrinking, the size + must be bigger than the already configured size in the + template. + format: int32 + minimum: 5 + type: integer + required: + - disk + - sizeGb + type: object + type: array bootVolume: description: |- BootVolume defines the storage size for the boot volume. diff --git a/internal/service/vmservice/vm_test.go b/internal/service/vmservice/vm_test.go index cbec7958..b1356a9d 100644 --- a/internal/service/vmservice/vm_test.go +++ b/internal/service/vmservice/vm_test.go @@ -496,7 +496,7 @@ func TestReconcileVirtualMachineConfigTags(t *testing.T) { func TestReconcileDisks_RunningVM(t *testing.T) { machineScope, _, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ - BootVolume: &infrav1alpha1.DiskSize{Disk: "ide0", SizeGB: 100}, + BootVolume: &infrav1alpha1.DiskSpec{Disk: "ide0", SizeGB: 100}, } machineScope.SetVirtualMachine(newRunningVM()) @@ -506,7 +506,7 @@ func TestReconcileDisks_RunningVM(t *testing.T) { func TestReconcileDisks_ResizeDisk(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ - BootVolume: &infrav1alpha1.DiskSize{Disk: "ide0", SizeGB: 100}, + BootVolume: &infrav1alpha1.DiskSpec{Disk: "ide0", SizeGB: 100}, } vm := newStoppedVM() machineScope.SetVirtualMachine(vm) diff --git a/internal/webhook/proxmoxmachine_webhook_test.go b/internal/webhook/proxmoxmachine_webhook_test.go index c39bf0fa..b1ec8f6c 100644 --- a/internal/webhook/proxmoxmachine_webhook_test.go +++ b/internal/webhook/proxmoxmachine_webhook_test.go @@ -151,7 +151,7 @@ func validProxmoxMachine(name string) infrav1.ProxmoxMachine { NumCores: 1, MemoryMiB: 1024, Disks: &infrav1.Storage{ - BootVolume: &infrav1.DiskSize{ + BootVolume: &infrav1.DiskSpec{ Disk: "scsi[0]", SizeGB: 10, }, From c9dc3da276c21b065b8da78507db06690b013c97 Mon Sep 17 00:00:00 2001 From: holmesb <5072156+holmesb@users.noreply.github.com> Date: Sat, 4 Oct 2025 18:44:35 +0100 Subject: [PATCH 2/5] Implement AdditionalVolumes. Include avoidance of race duplicates. Fixes #504 --- api/v1alpha1/proxmoxmachine_types.go | 20 +- api/v1alpha1/proxmoxmachine_types_test.go | 267 ++++++++ api/v1alpha1/zz_generated.deepcopy.go | 31 +- ...ture.cluster.x-k8s.io_proxmoxclusters.yaml | 43 +- ...ster.x-k8s.io_proxmoxclustertemplates.yaml | 43 +- ...ture.cluster.x-k8s.io_proxmoxmachines.yaml | 43 +- ...ster.x-k8s.io_proxmoxmachinetemplates.yaml | 43 +- docs/advanced-setups.md | 65 ++ go.sum | 1 + internal/service/vmservice/pending_guard.go | 84 +++ internal/service/vmservice/vm.go | 154 +++++ internal/service/vmservice/vm_test.go | 573 ++++++++++++++++++ templates/cluster-class-calico.yaml | 185 ++++++ templates/cluster-class-cilium.yaml | 189 +++++- templates/cluster-class.yaml | 185 ++++++ 15 files changed, 1896 insertions(+), 30 deletions(-) create mode 100644 internal/service/vmservice/pending_guard.go diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index 74e8426f..0085856d 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -142,8 +142,6 @@ type Storage struct { // +optional BootVolume *DiskSpec `json:"bootVolume,omitempty"` - // TODO Intended to add handling for additional volumes, - // which will be added to the node. // AdditionalVolumes defines additional volumes to be added to the virtual machine. // +optional AdditionalVolumes []DiskSpec `json:"additionalVolumes,omitempty"` @@ -154,15 +152,25 @@ type DiskSpec struct { // Disk is the name of the disk device, that should be resized. // Example values are: ide[0-3], scsi[0-30], sata[0-5]. Disk string `json:"disk"` - // Size defines the size in gigabyte. - // // As Proxmox does not support shrinking, the size // must be bigger than the already configured size in the // template. - // // +kubebuilder:validation:Minimum=5 SizeGB int32 `json:"sizeGb"` + // Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + // If omitted, falls back to the machine's .spec.storage. + // +optional + Storage *string `json:"storage,omitempty"` + // Format is optional: + Format *TargetFileStorageFormat `json:"format,omitempty"` + // Discard enables TRIM/UNMAP support for this virtual disk. + // Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + // If omitted or false, the flag is not set. + // +optional + Discard *bool `json:"discard,omitempty"` + Iothread *bool `json:"iothread,omitempty"` + SSD *bool `json:"ssd,omitempty"` } // TargetFileStorageFormat the target format of the cloned disk. @@ -585,7 +593,7 @@ type ProxmoxMachine struct { Status ProxmoxMachineStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +//+kubebuilder:object:root=true // ProxmoxMachineList contains a list of ProxmoxMachine. type ProxmoxMachineList struct { diff --git a/api/v1alpha1/proxmoxmachine_types_test.go b/api/v1alpha1/proxmoxmachine_types_test.go index a741ad74..7b2c25e6 100644 --- a/api/v1alpha1/proxmoxmachine_types_test.go +++ b/api/v1alpha1/proxmoxmachine_types_test.go @@ -18,6 +18,7 @@ package v1alpha1 import ( "context" + "encoding/json" "strconv" . "github.com/onsi/ginkgo/v2" @@ -177,6 +178,272 @@ var _ = Describe("ProxmoxMachine Test", func() { }) }) + Context("AdditionalVolumes format/storage - JSON marshalling", func() { + It("includes format and storage when set", func() { + f := TargetFileStorageFormat("qcow2") + s := "nfs-templates" + ds := DiskSpec{ + Disk: "scsi1", + SizeGB: 80, + Format: &f, + Storage: &s, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi1"`)) + Expect(js).To(ContainSubstring(`"sizeGb":80`)) + Expect(js).To(ContainSubstring(`"format":"qcow2"`)) + Expect(js).To(ContainSubstring(`"storage":"nfs-templates"`)) + }) + It("omits format and storage when nil", func() { + ds := DiskSpec{ + Disk: "scsi2", + SizeGB: 120, + Format: nil, + Storage: nil, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi2"`)) + Expect(js).To(ContainSubstring(`"sizeGb":120`)) + Expect(js).NotTo(ContainSubstring(`"format"`)) + Expect(js).NotTo(ContainSubstring(`"storage"`)) + }) + }) + + Context("AdditionalVolumes format/storage - DeepCopy", func() { + It("preserves per-volume format and storage and performs a deep copy", func() { + qcow2 := TargetFileStorageFormat("qcow2") + store := "filestore-a" + src := &Storage{ + AdditionalVolumes: []DiskSpec{ + {Disk: "scsi1", SizeGB: 80, Format: &qcow2, Storage: &store}, + }, + } + dst := src.DeepCopy() + Expect(dst).NotTo(BeNil()) + Expect(dst.AdditionalVolumes).To(HaveLen(1)) + got := dst.AdditionalVolumes[0] + Expect(got.Disk).To(Equal("scsi1")) + Expect(got.SizeGB).To(Equal(int32(80))) + Expect(got.Format).NotTo(BeNil()) + Expect(*got.Format).To(Equal(TargetFileStorageFormat("qcow2"))) + Expect(got.Storage).NotTo(BeNil()) + Expect(*got.Storage).To(Equal("filestore-a")) + newFmt := TargetFileStorageFormat("raw") + newStore := "filestore-b" + *src.AdditionalVolumes[0].Format = newFmt + *src.AdditionalVolumes[0].Storage = newStore + Expect(dst.AdditionalVolumes[0].Format).NotTo(BeNil()) + Expect(*dst.AdditionalVolumes[0].Format).To(Equal(TargetFileStorageFormat("qcow2"))) + Expect(dst.AdditionalVolumes[0].Storage).NotTo(BeNil()) + Expect(*dst.AdditionalVolumes[0].Storage).To(Equal("filestore-a")) + }) + }) + + Context("AdditionalVolumes discard - JSON marshalling", func() { + It("includes discard when explicitly true", func() { + dTrue := true + ds := DiskSpec{ + Disk: "scsi3", + SizeGB: 60, + Discard: &dTrue, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi3"`)) + Expect(js).To(ContainSubstring(`"sizeGb":60`)) + Expect(js).To(ContainSubstring(`"discard":true`)) + }) + It("includes discard when explicitly false (non-nil pointer)", func() { + dFalse := false + ds := DiskSpec{ + Disk: "scsi4", + SizeGB: 70, + Discard: &dFalse, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi4"`)) + Expect(js).To(ContainSubstring(`"sizeGb":70`)) + // Because Discard is a bool, omitempty does NOT drop a false: + Expect(js).To(ContainSubstring(`"discard":false`)) + }) + It("omits discard when nil", func() { + ds := DiskSpec{ + Disk: "scsi5", + SizeGB: 80, + Discard: nil, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi5"`)) + Expect(js).To(ContainSubstring(`"sizeGb":80`)) + Expect(js).NotTo(ContainSubstring(`"discard"`)) + }) + }) + + Context("AdditionalVolumes discard - DeepCopy", func() { + It("preserves per-volume discard and performs a deep copy", func() { + dTrue := true + src := &Storage{ + AdditionalVolumes: []DiskSpec{ + {Disk: "scsi6", SizeGB: 90, Discard: &dTrue}, + }, + } + dst := src.DeepCopy() + Expect(dst).NotTo(BeNil()) + Expect(dst.AdditionalVolumes).To(HaveLen(1)) + got := dst.AdditionalVolumes[0] + Expect(got.Disk).To(Equal("scsi6")) + Expect(got.SizeGB).To(Equal(int32(90))) + Expect(got.Discard).NotTo(BeNil()) + Expect(*got.Discard).To(BeTrue()) + *src.AdditionalVolumes[0].Discard = false + Expect(dst.AdditionalVolumes[0].Discard).NotTo(BeNil()) + Expect(*dst.AdditionalVolumes[0].Discard).To(BeTrue()) + }) + }) + Context("AdditionalVolumes iothread - JSON marshalling", func() { + It("includes iothread when explicitly true", func() { + tTrue := true + ds := DiskSpec{ + Disk: "scsi7", + SizeGB: 60, + Iothread: &tTrue, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi7"`)) + Expect(js).To(ContainSubstring(`"sizeGb":60`)) + Expect(js).To(ContainSubstring(`"iothread":true`)) + }) + It("includes iothread when explicitly false", func() { + tFalse := false + ds := DiskSpec{ + Disk: "scsi8", + SizeGB: 70, + Iothread: &tFalse, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi8"`)) + Expect(js).To(ContainSubstring(`"sizeGb":70`)) + Expect(js).To(ContainSubstring(`"iothread":false`)) // non-nil -> present + }) + + It("omits iothread when nil", func() { + ds := DiskSpec{ + Disk: "scsi9", + SizeGB: 80, + Iothread: nil, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi9"`)) + Expect(js).To(ContainSubstring(`"sizeGb":80`)) + Expect(js).NotTo(ContainSubstring(`"iothread"`)) + }) + }) + + Context("AdditionalVolumes iothread - DeepCopy", func() { + It("preserves per-volume iothread and performs a deep copy", func() { + tTrue := true + src := &Storage{ + AdditionalVolumes: []DiskSpec{ + {Disk: "scsi10", SizeGB: 90, Iothread: &tTrue}, + }, + } + dst := src.DeepCopy() + Expect(dst).NotTo(BeNil()) + Expect(dst.AdditionalVolumes).To(HaveLen(1)) + got := dst.AdditionalVolumes[0] + Expect(got.Disk).To(Equal("scsi10")) + Expect(got.SizeGB).To(Equal(int32(90))) + Expect(got.Iothread).NotTo(BeNil()) + Expect(*got.Iothread).To(BeTrue()) + *src.AdditionalVolumes[0].Iothread = false + Expect(dst.AdditionalVolumes[0].Iothread).NotTo(BeNil()) + Expect(*dst.AdditionalVolumes[0].Iothread).To(BeTrue()) + }) + }) + Context("AdditionalVolumes ssd - JSON marshalling", func() { + It("includes ssd when explicitly true", func() { + sTrue := true + ds := DiskSpec{ + Disk: "scsi11", + SizeGB: 60, + SSD: &sTrue, + } + + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + + Expect(js).To(ContainSubstring(`"disk":"scsi11"`)) + Expect(js).To(ContainSubstring(`"sizeGb":60`)) + Expect(js).To(ContainSubstring(`"ssd":true`)) + }) + It("includes ssd when explicitly false", func() { + sFalse := false + ds := DiskSpec{ + Disk: "scsi12", + SizeGB: 70, + SSD: &sFalse, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi12"`)) + Expect(js).To(ContainSubstring(`"sizeGb":70`)) + Expect(js).To(ContainSubstring(`"ssd":false`)) // non-nil -> present + }) + It("omits ssd when nil", func() { + ds := DiskSpec{ + Disk: "scsi13", + SizeGB: 80, + SSD: nil, + } + b, err := json.Marshal(ds) + Expect(err).NotTo(HaveOccurred()) + js := string(b) + Expect(js).To(ContainSubstring(`"disk":"scsi13"`)) + Expect(js).To(ContainSubstring(`"sizeGb":80`)) + Expect(js).NotTo(ContainSubstring(`"ssd"`)) + }) + }) + + Context("AdditionalVolumes ssd - DeepCopy", func() { + It("preserves per-volume ssd and performs a deep copy", func() { + sTrue := true + src := &Storage{ + AdditionalVolumes: []DiskSpec{ + {Disk: "scsi14", SizeGB: 90, SSD: &sTrue}, + }, + } + dst := src.DeepCopy() + Expect(dst).NotTo(BeNil()) + Expect(dst.AdditionalVolumes).To(HaveLen(1)) + got := dst.AdditionalVolumes[0] + Expect(got.Disk).To(Equal("scsi14")) + Expect(got.SizeGB).To(Equal(int32(90))) + Expect(got.SSD).NotTo(BeNil()) + Expect(*got.SSD).To(BeTrue()) + // Mutate source; destination should remain unchanged + *src.AdditionalVolumes[0].SSD = false + Expect(dst.AdditionalVolumes[0].SSD).NotTo(BeNil()) + Expect(*dst.AdditionalVolumes[0].SSD).To(BeTrue()) + }) + }) + Context("Network", func() { It("Should set default bridge", func() { dm := defaultMachine() diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index d67de886..cd67d635 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -31,6 +31,31 @@ func (in *AdditionalNetworkDevice) DeepCopy() *AdditionalNetworkDevice { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DiskSpec) DeepCopyInto(out *DiskSpec) { *out = *in + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(TargetFileStorageFormat) + **out = **in + } + if in.Discard != nil { + in, out := &in.Discard, &out.Discard + *out = new(bool) + **out = **in + } + if in.Iothread != nil { + in, out := &in.Iothread, &out.Iothread + *out = new(bool) + **out = **in + } + if in.SSD != nil { + in, out := &in.SSD, &out.SSD + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSpec. @@ -934,12 +959,14 @@ func (in *Storage) DeepCopyInto(out *Storage) { if in.BootVolume != nil { in, out := &in.BootVolume, &out.BootVolume *out = new(DiskSpec) - **out = **in + (*in).DeepCopyInto(*out) } if in.AdditionalVolumes != nil { in, out := &in.AdditionalVolumes, &out.AdditionalVolumes *out = make([]DiskSpec, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index d43ac6ce..d848493b 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -106,28 +106,44 @@ spec: which will be applied before the first startup. properties: additionalVolumes: - description: |- - which will be added to the node. - AdditionalVolumes defines additional volumes to be added to the virtual machine. + description: AdditionalVolumes defines additional volumes + to be added to the virtual machine. items: description: DiskSpec is contains values for the disk device and size. properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean disk: description: |- Disk is the name of the disk device, that should be resized. Example values are: ide[0-3], scsi[0-30], sata[0-5]. type: string + format: + description: 'Format is optional:' + type: string + iothread: + type: boolean sizeGb: description: |- Size defines the size in gigabyte. - As Proxmox does not support shrinking, the size must be bigger than the already configured size in the template. format: int32 minimum: 5 type: integer + ssd: + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage. + type: string required: - disk - sizeGb @@ -139,21 +155,38 @@ spec: This field is optional, and should only be set if you want to change the size of the boot volume. properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean disk: description: |- Disk is the name of the disk device, that should be resized. Example values are: ide[0-3], scsi[0-30], sata[0-5]. type: string + format: + description: 'Format is optional:' + type: string + iothread: + type: boolean sizeGb: description: |- Size defines the size in gigabyte. - As Proxmox does not support shrinking, the size must be bigger than the already configured size in the template. format: int32 minimum: 5 type: integer + ssd: + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage. + type: string required: - disk - sizeGb diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 9f328f63..578ae40b 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -130,28 +130,44 @@ spec: which will be applied before the first startup. properties: additionalVolumes: - description: |- - which will be added to the node. - AdditionalVolumes defines additional volumes to be added to the virtual machine. + description: AdditionalVolumes defines additional + volumes to be added to the virtual machine. items: description: DiskSpec is contains values for the disk device and size. properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean disk: description: |- Disk is the name of the disk device, that should be resized. Example values are: ide[0-3], scsi[0-30], sata[0-5]. type: string + format: + description: 'Format is optional:' + type: string + iothread: + type: boolean sizeGb: description: |- Size defines the size in gigabyte. - As Proxmox does not support shrinking, the size must be bigger than the already configured size in the template. format: int32 minimum: 5 type: integer + ssd: + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage. + type: string required: - disk - sizeGb @@ -163,21 +179,38 @@ spec: This field is optional, and should only be set if you want to change the size of the boot volume. properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean disk: description: |- Disk is the name of the disk device, that should be resized. Example values are: ide[0-3], scsi[0-30], sata[0-5]. type: string + format: + description: 'Format is optional:' + type: string + iothread: + type: boolean sizeGb: description: |- Size defines the size in gigabyte. - As Proxmox does not support shrinking, the size must be bigger than the already configured size in the template. format: int32 minimum: 5 type: integer + ssd: + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage. + type: string required: - disk - sizeGb diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index ad11b855..e2971d86 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -98,28 +98,44 @@ spec: which will be applied before the first startup. properties: additionalVolumes: - description: |- - which will be added to the node. - AdditionalVolumes defines additional volumes to be added to the virtual machine. + description: AdditionalVolumes defines additional volumes to be + added to the virtual machine. items: description: DiskSpec is contains values for the disk device and size. properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean disk: description: |- Disk is the name of the disk device, that should be resized. Example values are: ide[0-3], scsi[0-30], sata[0-5]. type: string + format: + description: 'Format is optional:' + type: string + iothread: + type: boolean sizeGb: description: |- Size defines the size in gigabyte. - As Proxmox does not support shrinking, the size must be bigger than the already configured size in the template. format: int32 minimum: 5 type: integer + ssd: + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage. + type: string required: - disk - sizeGb @@ -131,21 +147,38 @@ spec: This field is optional, and should only be set if you want to change the size of the boot volume. properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean disk: description: |- Disk is the name of the disk device, that should be resized. Example values are: ide[0-3], scsi[0-30], sata[0-5]. type: string + format: + description: 'Format is optional:' + type: string + iothread: + type: boolean sizeGb: description: |- Size defines the size in gigabyte. - As Proxmox does not support shrinking, the size must be bigger than the already configured size in the template. format: int32 minimum: 5 type: integer + ssd: + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage. + type: string required: - disk - sizeGb diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index 78251456..09fa1101 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -110,28 +110,44 @@ spec: which will be applied before the first startup. properties: additionalVolumes: - description: |- - which will be added to the node. - AdditionalVolumes defines additional volumes to be added to the virtual machine. + description: AdditionalVolumes defines additional volumes + to be added to the virtual machine. items: description: DiskSpec is contains values for the disk device and size. properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean disk: description: |- Disk is the name of the disk device, that should be resized. Example values are: ide[0-3], scsi[0-30], sata[0-5]. type: string + format: + description: 'Format is optional:' + type: string + iothread: + type: boolean sizeGb: description: |- Size defines the size in gigabyte. - As Proxmox does not support shrinking, the size must be bigger than the already configured size in the template. format: int32 minimum: 5 type: integer + ssd: + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage. + type: string required: - disk - sizeGb @@ -143,21 +159,38 @@ spec: This field is optional, and should only be set if you want to change the size of the boot volume. properties: + discard: + description: |- + Discard enables TRIM/UNMAP support for this virtual disk. + Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". + If omitted or false, the flag is not set. + type: boolean disk: description: |- Disk is the name of the disk device, that should be resized. Example values are: ide[0-3], scsi[0-30], sata[0-5]. type: string + format: + description: 'Format is optional:' + type: string + iothread: + type: boolean sizeGb: description: |- Size defines the size in gigabyte. - As Proxmox does not support shrinking, the size must be bigger than the already configured size in the template. format: int32 minimum: 5 type: integer + ssd: + type: boolean + storage: + description: |- + Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). + If omitted, falls back to the machine's .spec.storage. + type: string required: - disk - sizeGb diff --git a/docs/advanced-setups.md b/docs/advanced-setups.md index 38c8f24f..2ef13851 100644 --- a/docs/advanced-setups.md +++ b/docs/advanced-setups.md @@ -308,6 +308,71 @@ spec: You can set either `ipv4PoolRef` or `ipv6PoolRef` or you can also set them both for dual-stack. It's up for you also to manage the IP Pool, you can choose a `GlobalInClusterIPPool` or an `InClusterIPPool`. +## Additional Volumes +By default, only a boot volume is created in machines. If additional disks are required for data storage, they can be +specified in the ProxmoxMachineTemplates. + +```yaml +kind: ProxmoxMachineTemplate +spec: + template: + spec: + storage: local-lvm # Optional: a default storage to use when a volume doesn't set .storage + disks: + additionalVolumes: + - disk: scsi1 + sizeGb: 200 + - disk: scsi2 # target slot (e.g. scsi1, sata1, virtio1, ide2) + sizeGb: 80 # capacity in gigabytes + # Optional flags: + storage: my-nfs # Optional per-volume storage override. Uses .spec.template.spec.storage if omitted + format: qcow2 # Only specify if using file-backed storage. If omitted, default for disk is used. + discard: true + iothread: true + ssd: true +``` +In the same way, additionalVolumes can also be specified in ProxmoxClusters, ProxmoxClusterTemplates, +and ProxmoxMachines. Flags: format, discard, iothread, and ssd are supported by this provider. +See Proxmox [docs](https://pve.proxmox.com/pve-docs/qm.1.html#qm_hard_disk) for details about these flags. + +Alternatively if using cluster-class, define additionalVolmes in your cluster: +```yaml +kind: Cluster +spec: + topology: + class: proxmox-clusterclass-cilium-v0.1.0 + variables: + - name: workerAdditionalVolumes + value: + - { disk: scsi1, sizeGb: 80, storage: my-lvm } + - { disk: ide1, sizeGb: 80, storage: my-zfs } + - name: controlPlaneAdditionalVolumes + value: + - { disk: virtio1, sizeGb: 80, storage: my-zfs } + - name: loadBalancerAdditionalVolumes + value: + - { disk: sata1, sizeGb: 80, storage: my-nfs, format: qcow2 } +``` +To use the same storage for all machines of a given type, can specify a `Storage` variable and then omit `storage` +from the `workerAdditionalVolumes`. Eg for workers: +```yaml +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/proxmox-cluster-cni: cilium + name: capmox-cluster +spec: + topology: + class: proxmox-clusterclass-cilium-v0.1.0 + variables: + - name: workerStorage + value: my-lvm + - name: workerAdditionalVolumes + value: + - { disk: scsi1, sizeGb: 80 } + - { disk: scsi2, sizeGb: 80 } +``` + ## Notes * Clusters with IPV6 only is supported. diff --git a/go.sum b/go.sum index b87e8d5d..de00b833 100644 --- a/go.sum +++ b/go.sum @@ -157,6 +157,7 @@ github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= diff --git a/internal/service/vmservice/pending_guard.go b/internal/service/vmservice/pending_guard.go new file mode 100644 index 00000000..0d01e3cf --- /dev/null +++ b/internal/service/vmservice/pending_guard.go @@ -0,0 +1,84 @@ +// Unlike VMs, which have a unique ID for tracking, PVE provides no server-side ID for configuration items like disks. +// When configuration items are specified, duplicate requests for the same slot (eg "scsi1") occur, and PVE creates +// spurious duplicates (eg "Unused Disks"). This "pending guard" mechanism avoids. We queue the item (eg disk) to be +// added, mark it as pending, and skip further adds for that slot until the TTL expires. While implemented for +// additionalVolumes, this is generalsed to other VM configuration items. "slot" is applicable to NICs (net#), +// USB passthrough (usb#), serial ports (serial#), PCI devices (hostpci#), and ISO/CDROMs. + +package vmservice + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope" +) + +var ( + pendingAdds sync.Map + pendingTTL = 15 * time.Second + pendingGuardEnabled = false // defaults to off +) + +// Toggle the guard and clear any leftover keys: +func EnablePendingGuard(enable bool) { + pendingGuardEnabled = enable + pendingAdds.Range(func(key, _ any) bool { + pendingAdds.Delete(key) + return true + }) +} + +// Build a key. Fall-back to machineScope & slot if identifying fields are missing: +func buildPendingKey(machineScope *scope.MachineScope, slotName string) string { + namespace, machineName, machineUID := "", "", "" + if machineScope != nil && machineScope.ProxmoxMachine != nil { + namespace = machineScope.ProxmoxMachine.Namespace + machineName = machineScope.ProxmoxMachine.Name + machineUID = string(machineScope.ProxmoxMachine.UID) + } + slot := strings.ToLower(strings.TrimSpace(slotName)) + if namespace == "" && machineName == "" && machineUID == "" { + return fmt.Sprintf("addr=%p|slot=%s", machineScope, slot) + } + return fmt.Sprintf( + "ns=%s|name=%s|uid=%s|slot=%s", + namespace, machineName, machineUID, slot, + ) +} + +func isPending(machineScope *scope.MachineScope, slot string) bool { + if !pendingGuardEnabled { + return false + } + key := buildPendingKey(machineScope, slot) + if raw, found := pendingAdds.Load(key); found { + if deadline, ok := raw.(time.Time); ok { + if time.Now().Before(deadline) { + return true // don't queue another + } + pendingAdds.Delete(key) // delete expired + } else { + pendingAdds.Delete(key) + } + } + return false +} + +func markPending(machineScope *scope.MachineScope, slot string) { + if !pendingGuardEnabled { + return + } + key := buildPendingKey(machineScope, slot) + pendingAdds.Store(key, time.Now().Add(pendingTTL)) +} + +func clearPending(machineScope *scope.MachineScope, slot string) { + if !pendingGuardEnabled { + return + } + key := buildPendingKey(machineScope, slot) + pendingAdds.Delete(key) +} diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index 5b2ef743..4d5576c2 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -19,6 +19,8 @@ package vmservice import ( "context" + "fmt" + "reflect" "slices" "strings" @@ -53,6 +55,10 @@ const ( // ErrNoVMIDInRangeFree is returned if no free VMID is found in the specified vmIDRange. var ErrNoVMIDInRangeFree = errors.New("No free vmid found in vmIDRange") +func init() { + EnablePendingGuard(true) // prevents race-condition duplicates, eg additionalVolumes +} + // ReconcileVM makes sure that the VM is in the desired state by: // 1. Creating the VM if it does not exist, then... // 2. Updating the VM with the bootstrap data, such as the cloud-init meta and user data, before... @@ -115,6 +121,40 @@ func ReconcileVM(ctx context.Context, scope *scope.MachineScope) (infrav1alpha1. return vm, nil } +// Report whether a VM disk slot (eg "scsi1") is already set in the VM's config: +func diskSlotOccupied(cfg any, slot string) bool { + if cfg == nil || slot == "" { + return false + } + slot = strings.TrimSpace(strings.ToLower(slot)) + var fieldName string + switch { + case strings.HasPrefix(slot, "scsi"): + fieldName = "SCSI" + slot[4:] + case strings.HasPrefix(slot, "sata"): + fieldName = "SATA" + slot[4:] + case strings.HasPrefix(slot, "ide"): + fieldName = "IDE" + slot[3:] + case strings.HasPrefix(slot, "virtio"): + fieldName = "VirtIO" + slot[6:] + default: + // Unknown bus: assume occupied to avoid creating junk: + return true + } + cfgValue := reflect.ValueOf(cfg) + if cfgValue.Kind() == reflect.Pointer { + cfgValue = cfgValue.Elem() + } + if !cfgValue.IsValid() { + return false + } + fieldValue := cfgValue.FieldByName(fieldName) + if !fieldValue.IsValid() || fieldValue.Kind() != reflect.String { + return false + } + return strings.TrimSpace(fieldValue.String()) != "" +} + func checkCloudInitStatus(ctx context.Context, machineScope *scope.MachineScope) (requeue bool, err error) { if !machineScope.VirtualMachine.IsRunning() { // skip if the vm is not running. @@ -287,6 +327,120 @@ func reconcileVirtualMachineConfig(ctx context.Context, machineScope *scope.Mach } } + // Additional data disks + disksSpec := machineScope.ProxmoxMachine.Spec.Disks + if disksSpec != nil && len(disksSpec.AdditionalVolumes) > 0 { + // Find an "Unused Disk" on the same storage to reattach (dedup): + findMatchingUnusedVolume := func(cfg any, storageName string) string { + if cfg == nil || storageName == "" { + return "" + } + cfgVal := reflect.ValueOf(cfg) + if cfgVal.Kind() == reflect.Pointer { + cfgVal = cfgVal.Elem() + } + if !cfgVal.IsValid() { + return "" + } + cfgType := cfgVal.Type() + + // Unused# fields: + for i := 0; i < cfgType.NumField(); i++ { + fieldMetadata := cfgType.Field(i) + if !strings.HasPrefix(fieldMetadata.Name, "Unused") { + continue + } + fieldValue := cfgVal.Field(i) + if !fieldValue.IsValid() || fieldValue.Kind() != reflect.String { + continue + } + volID := strings.TrimSpace(fieldValue.String()) // e.g. "vg_xxx:vm-103-disk-2" + if volID != "" && strings.HasPrefix(volID, storageName+":") { + return volID + } + } + return "" + } + + // Avoid queueing duplicate adds for the same slot within this reconcile: + pendingInReconcile := map[string]struct{}{} + for _, vol := range disksSpec.AdditionalVolumes { + slotName := strings.ToLower(strings.TrimSpace(vol.Disk)) + alreadySet := diskSlotOccupied(vmConfig, slotName) + machineScope.V(4).Info("additionalVolume: slot state", + "machine", machineScope.Name(), "slot", slotName, "occupied", alreadySet) + if alreadySet { + if pendingGuardEnabled { + clearPending(machineScope, slotName) + } + continue + } + if pendingGuardEnabled && isPending(machineScope, slotName) { + machineScope.V(4).Info("additionalVolume: skip, pending add in effect", + "machine", machineScope.Name(), "slot", slotName) + continue + } + if _, seen := pendingInReconcile[slotName]; seen { + machineScope.V(4).Info("additionalVolume: skip, add already queued in this reconcile", + "machine", machineScope.Name(), "slot", slotName) + continue + } + // Resolve storage (per-volume overrides machine-level): + var storageName string + if vol.Storage != nil && *vol.Storage != "" { + storageName = *vol.Storage + } else if machineScope.ProxmoxMachine.Spec.Storage != nil && *machineScope.ProxmoxMachine.Spec.Storage != "" { + storageName = *machineScope.ProxmoxMachine.Spec.Storage + } else { + return false, errors.New("additionalVolumes requires a storage to be set (either per-volume .storage or spec.storage)") + } + machineScope.V(4).Info("additionalVolume: resolved storage", + "machine", machineScope.Name(), "slot", slotName, "storage", storageName) + + volumeValue := findMatchingUnusedVolume(vmConfig, storageName) + if volumeValue != "" { + machineScope.V(4).Info("additionalVolume: reattaching existing unused volume to avoid spurious extra disks due to reconcile race-condition (dedup)", + "machine", machineScope.Name(), "slot", slotName, "volumeID", volumeValue) + } else { + if vol.Format != nil && *vol.Format != "" { + volumeValue = fmt.Sprintf("%s:0,size=%dG,format=%s", storageName, vol.SizeGB, string(*vol.Format)) + machineScope.Info("additionalVolume: creating file-backed volume (with format notation)", + "machine", machineScope.Name(), "slot", slotName, "value", volumeValue) + } else { + volumeValue = fmt.Sprintf("%s:%d", storageName, vol.SizeGB) + machineScope.Info("additionalVolume: creating block-backed volume (without format notation)", + "machine", machineScope.Name(), "slot", slotName, "value", volumeValue) + } + } + // Add flags: + if vol.Discard != nil && *vol.Discard { + volumeValue = fmt.Sprintf("%s,discard=on", volumeValue) + machineScope.V(4).Info("additionalVolume: appended flag", + "machine", machineScope.Name(), "slot", slotName, "flag", "discard=on") + } + if vol.Iothread != nil && *vol.Iothread { + volumeValue = fmt.Sprintf("%s,iothread=1", volumeValue) + machineScope.V(4).Info("additionalVolume: appended flag", + "machine", machineScope.Name(), "slot", slotName, "flag", "iothread=1") + } + if vol.SSD != nil && *vol.SSD { + volumeValue = fmt.Sprintf("%s,ssd=1", volumeValue) + machineScope.V(4).Info("additionalVolume: appended flag", + "machine", machineScope.Name(), "slot", slotName, "flag", "ssd=1") + } + vmOptions = append(vmOptions, proxmox.VirtualMachineOption{ + Name: vol.Disk, + Value: volumeValue, + }) + machineScope.V(4).Info("additionalVolume: queued vm option", + "machine", machineScope.Name(), "slot", slotName, "name", vol.Disk, "value", volumeValue) + pendingInReconcile[slotName] = struct{}{} + if pendingGuardEnabled { + markPending(machineScope, slotName) + } + } + } + if len(vmOptions) == 0 { return false, nil } diff --git a/internal/service/vmservice/vm_test.go b/internal/service/vmservice/vm_test.go index b1356a9d..4e73487c 100644 --- a/internal/service/vmservice/vm_test.go +++ b/internal/service/vmservice/vm_test.go @@ -35,6 +35,10 @@ import ( "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope" ) +func init() { + EnablePendingGuard(false) +} + func TestReconcileVM_EverythingReady(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) vm := newRunningVM() @@ -517,6 +521,575 @@ func TestReconcileDisks_ResizeDisk(t *testing.T) { require.NoError(t, reconcileDisks(context.Background(), machineScope)) } +func TestReconcileVirtualMachineConfig_AdditionalVolumes(t *testing.T) { + ctx := context.Background() + + // 1) Block-backed syntax when no formats are specified + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + vm.VirtualMachineConfig.Sockets = 2 + vm.VirtualMachineConfig.Cores = 1 + machineScope.SetVirtualMachine(vm) + + // Machine-level format present, but NO per-volume format -> should still use BLOCK syntax + storage := "nfs-templates" + machineScope.ProxmoxMachine.Spec.Storage = &storage + + rawFmt := infrav1alpha1.TargetFileStorageFormat("raw") + machineScope.ProxmoxMachine.Spec.Format = &rawFmt // ignored for additional volumes + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 50}, // no per-volume format + }, + } + + // Expect ":" (block-backed syntax - no 'G', no format) + expectedOptions := []interface{}{ + proxmox.VirtualMachineOption{ + Name: "scsi1", + Value: "nfs-templates:50", + }, + } + proxmoxClient. + EXPECT(). + ConfigureVM(ctx, vm, expectedOptions...). + Return(newTask(), nil). + Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue, "ConfigureVM should queue follow-up while task completes") + } + + // 2) File-backed syntax with per-volume format (per-volume overrides machine-level) + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + vm.VirtualMachineConfig.Sockets = 2 + vm.VirtualMachineConfig.Cores = 1 + machineScope.SetVirtualMachine(vm) + + storage := "nfs-store" // name only used in value rendering; presence of format selects file-backed syntax + machineScope.ProxmoxMachine.Spec.Storage = &storage + + perVolFmt := infrav1alpha1.TargetFileStorageFormat("qcow2") + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi2", SizeGB: 80, Format: &perVolFmt}, + }, + } + + // Expect ":0,size=G,format=" (file-backed) + expectedOptions := []interface{}{ + proxmox.VirtualMachineOption{ + Name: "scsi2", + Value: "nfs-store:0,size=80G,format=qcow2", + }, + } + proxmoxClient. + EXPECT(). + ConfigureVM(ctx, vm, expectedOptions...). + Return(newTask(), nil). + Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } + + // 3) File-backed syntax with machine-level format fallback (no per-volume format) + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + vm.VirtualMachineConfig.Sockets = 2 + vm.VirtualMachineConfig.Cores = 1 + machineScope.SetVirtualMachine(vm) + + storage := "nfs-store" + machineScope.ProxmoxMachine.Spec.Storage = &storage + + machineFmt := infrav1alpha1.TargetFileStorageFormat("raw") + machineScope.ProxmoxMachine.Spec.Format = &machineFmt + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi3", SizeGB: 200}, // no per-volume format + }, + } + + expectedOptions := []interface{}{ + proxmox.VirtualMachineOption{ + Name: "scsi3", + Value: "nfs-store:200", + }, + } + proxmoxClient. + EXPECT(). + ConfigureVM(ctx, vm, expectedOptions...). + Return(newTask(), nil). + Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_Block_NoFormat(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + // Machine-level storage is block-backed (e.g., LVM-thin); no format anywhere. + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 90}, // no per-volume format/storage + }, + } + + // Expect block syntax ":" + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi1", Value: "local-lvm:90"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_File_PerVolumeFormatAndStorage(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + // Per-volume specifies file-backed storage and format. + nfs := "nfs-store" + qcow2 := infrav1alpha1.TargetFileStorageFormat("qcow2") + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 80, Storage: &nfs, Format: &qcow2}, + }, + } + + // Expect file syntax ":0,size=NG,format=fmt" + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi1", Value: "nfs-store:0,size=80G,format=qcow2"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_File_MachineFormatUsedWhenPerVolumeMissing(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + // Machine-level format present -> file syntax. + nfs := "nfs-templates" + format := infrav1alpha1.TargetFileStorageFormat("raw") + machineScope.ProxmoxMachine.Spec.Format = &format + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi2", SizeGB: 50, Storage: &nfs}, // no per-volume format + }, + } + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi2", Value: "nfs-templates:50"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_PerVolumeStorageOverridesMachine(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + // Machine-level storage is block; per-volume chooses file store + format. + machineStorage := "local-lvm" + perVolStore := "nfs-a" + perVolFmt := infrav1alpha1.TargetFileStorageFormat("qcow2") + machineScope.ProxmoxMachine.Spec.Storage = &machineStorage + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 30, Storage: &perVolStore, Format: &perVolFmt}, + }, + } + + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi1", Value: "nfs-a:0,size=30G,format=qcow2"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_ErrorWhenNoStorageAnywhere(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, _, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + // No machine storage, no per-volume storage -> error + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 10}, // no Storage, no Format + }, + } + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.Error(t, err) + require.False(t, requeue) + require.Contains(t, err.Error(), "requires a storage to be set") +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_IdempotentWhenSlotOccupied(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, _, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + // Pretend scsi1 is already populated in VM config — reconcile should NOT call ConfigureVM. + vm.VirtualMachineConfig.SCSI1 = "local-lvm:20" + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 40}, + }, + } + + // No EXPECT() on proxmoxClient.ConfigureVM — any call would be an unexpected invocation and fail the test. + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.False(t, requeue, "reconcile should be a no-op when slot already occupied") +} +func TestReconcileVirtualMachineConfig_AdditionalVolumes_Block_DiscardTrue(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + dTrue := true + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 90, Discard: &dTrue}, + }, + } + + // Expect block syntax with ",discard=on" appended. + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi1", Value: "local-lvm:90,discard=on"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_File_PerVolumeFormat_DiscardTrue(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + nfs := "nfs-store" + qcow2 := infrav1alpha1.TargetFileStorageFormat("qcow2") + dTrue := true + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi2", SizeGB: 80, Storage: &nfs, Format: &qcow2, Discard: &dTrue}, + }, + } + + // Expect file syntax with ",discard=on" appended. + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi2", Value: "nfs-store:0,size=80G,format=qcow2,discard=on"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_DiscardOmittedWhenNilOrFalse(t *testing.T) { + t.Parallel() + ctx := context.Background() + + // Case A: discard=nil -> omitted + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + // discard not set (nil) + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi3", SizeGB: 20}, + }, + } + + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi3", Value: "local-lvm:20"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } + + // Case B: discard=false -> omitted (we only emit when explicitly true) + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + dFalse := false + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi4", SizeGB: 25, Discard: &dFalse}, + }, + } + + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi4", Value: "local-lvm:25"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_Block_IothreadTrue(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + iTrue := true + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 90, Iothread: &iTrue}, + }, + } + + // Expect block syntax with ",iothread=1" appended. + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi1", Value: "local-lvm:90,iothread=1"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_File_PerVolumeFormat_IothreadTrue(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + nfs := "nfs-store" + qcow2 := infrav1alpha1.TargetFileStorageFormat("qcow2") + iTrue := true + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi2", SizeGB: 80, Storage: &nfs, Format: &qcow2, Iothread: &iTrue}, + }, + } + + // Expect file syntax with ",iothread=1" appended. + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi2", Value: "nfs-store:0,size=80G,format=qcow2,iothread=1"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_Iothread_OmittedWhenNilOrFalse(t *testing.T) { + t.Parallel() + ctx := context.Background() + + // Case A: iothread=nil -> omitted + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi3", SizeGB: 20}, + }, + } + + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi3", Value: "local-lvm:20"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } + + // Case B: iothread=false -> omitted (only emit when explicitly true) + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + iFalse := false + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi4", SizeGB: 25, Iothread: &iFalse}, + }, + } + + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi4", Value: "local-lvm:25"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } +} +func TestReconcileVirtualMachineConfig_AdditionalVolumes_Block_SSDTrue(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + sTrue := true + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi1", SizeGB: 90, SSD: &sTrue}, + }, + } + + // Expect block syntax with ",ssd=1" appended. + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi1", Value: "local-lvm:90,ssd=1"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_File_PerVolumeFormat_SSDTrue(t *testing.T) { + t.Parallel() + ctx := context.Background() + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + nfs := "nfs-store" + qcow2 := infrav1alpha1.TargetFileStorageFormat("qcow2") + sTrue := true + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi2", SizeGB: 80, Storage: &nfs, Format: &qcow2, SSD: &sTrue}, + }, + } + + // Expect file syntax with ",ssd=1" appended. + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi2", Value: "nfs-store:0,size=80G,format=qcow2,ssd=1"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) +} + +func TestReconcileVirtualMachineConfig_AdditionalVolumes_SSD_OmittedWhenNilOrFalse(t *testing.T) { + t.Parallel() + ctx := context.Background() + + // Case A: ssd=nil -> omitted + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi3", SizeGB: 20}, + }, + } + + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi3", Value: "local-lvm:20"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } + + // Case B: ssd=false -> omitted (only emit when explicitly true) + { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + vm := newStoppedVM() + machineScope.SetVirtualMachine(vm) + + storage := "local-lvm" + machineScope.ProxmoxMachine.Spec.Storage = &storage + sFalse := false + machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ + AdditionalVolumes: []infrav1alpha1.DiskSpec{ + {Disk: "scsi4", SizeGB: 25, SSD: &sFalse}, + }, + } + + expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi4", Value: "local-lvm:25"}} + proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() + + requeue, err := reconcileVirtualMachineConfig(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + } +} + func TestReconcileMachineAddresses_IPV4(t *testing.T) { machineScope, _, _ := setupReconcilerTest(t) vm := newRunningVM() diff --git a/templates/cluster-class-calico.yaml b/templates/cluster-class-calico.yaml index 83e80a70..8fbd8bef 100644 --- a/templates/cluster-class-calico.yaml +++ b/templates/cluster-class-calico.yaml @@ -89,6 +89,75 @@ spec: namingStrategy: template: "{{ .cluster.name }}-loadbalancer-{{ .random }}" variables: + - name: workerAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for worker nodes (per-volume storage + optional format)." + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: workerStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set)." + - name: controlPlaneAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for control plane nodes (per-volume storage + optional format)." + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: controlPlaneStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for control plane nodes (used if per-volume storage not set)." + - name: loadbalancerAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for load-balancer nodes (per-volume storage + optional format)." + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: loadBalancerStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for load-balancer nodes (used if per-volume storage not set)." - name: controlPlaneEndpoint required: true schema: @@ -378,6 +447,122 @@ spec: workerNode: *machineSpec loadBalancer: *machineSpec patches: + - name: worker-additional-volumes + description: "Add additional disks to workers via variable" + enabledIf: "{{ if .workerAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-worker"] + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .workerAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: worker-storage + enabledIf: "{{ if .workerStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-worker"] + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: workerStorage + - name: controlplane-additional-volumes + description: "Add additional disks to control-plane via variable" + enabledIf: "{{ if .controlPlaneAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .controlPlaneAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: control-plane-storage + enabledIf: "{{ if .controlPlaneStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: controlPlaneStorage + - name: lb-additional-volumes + description: "Add additional disks to load-balancers via variable" + enabledIf: "{{ if .loadBalancerAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: [ "proxmox-loadbalancer" ] + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .loadBalancerAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: load-balancer-storage + enabledIf: "{{ if .loadBalancerStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-loadbalancer"] + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: workerStorage - name: ProxmoxClusterTemplateGeneral description: "Configure Cluster" definitions: diff --git a/templates/cluster-class-cilium.yaml b/templates/cluster-class-cilium.yaml index 7b696ae7..15e05c86 100644 --- a/templates/cluster-class-cilium.yaml +++ b/templates/cluster-class-cilium.yaml @@ -59,7 +59,7 @@ spec: status: "False" timeout: 300s namingStrategy: - template: "{{ .cluster.name }}-worker-{{ .random }}" + template: "{{ .cluster.name }}-worker-{{ .random }}" - class: proxmox-loadbalancer template: bootstrap: @@ -87,8 +87,77 @@ spec: status: "False" timeout: 300s namingStrategy: - template: "{{ .cluster.name }}-loadbalancer-{{ .random }}" + template: "{{ .cluster.name }}-loadbalancer-{{ .random }}" variables: + - name: workerAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for worker nodes (per-volume storage + optional format)." + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: workerStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set)." + - name: controlPlaneAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for control-plane nodes (per-volume storage + optional format)." + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: controlPlaneStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for control plane nodes (used if per-volume storage not set)." + - name: loadbalancerAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for load-balancer nodes (per-volume storage + optional format)." + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: loadBalancerStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for load-balancer nodes (used if per-volume storage not set)." - name: controlPlaneEndpoint required: true schema: @@ -378,6 +447,122 @@ spec: workerNode: *machineSpec loadBalancer: *machineSpec patches: + - name: worker-additional-volumes + description: "Add additional disks to workers via variable" + enabledIf: "{{ if .workerAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-worker"] + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .workerAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: worker-storage + enabledIf: "{{ if .workerStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-worker"] + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: workerStorage + - name: controlplane-additional-volumes + description: "Add additional disks to control-plane via variable" + enabledIf: "{{ if .controlPlaneAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .controlPlaneAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: control-plane-storage + enabledIf: "{{ if .controlPlaneStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: controlPlaneStorage + - name: lb-additional-volumes + description: "Add additional disks to load-balancers via variable" + enabledIf: "{{ if .loadBalancerAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: [ "proxmox-loadbalancer" ] + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .loadBalancerAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: load-balancer-storage + enabledIf: "{{ if .loadBalancerStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-loadbalancer"] + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: workerStorage - name: ProxmoxClusterTemplateGeneral description: "Configure Cluster" definitions: diff --git a/templates/cluster-class.yaml b/templates/cluster-class.yaml index cecdd33d..e59ee813 100644 --- a/templates/cluster-class.yaml +++ b/templates/cluster-class.yaml @@ -59,6 +59,75 @@ spec: namingStrategy: template: "{{ .cluster.name }}-loadbalancer-{{ .random }}" variables: + - name: workerAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for worker nodes (per-volume storage + optional format)." + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: workerStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set)." + - name: controlPlaneAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for control plane nodes (per-volume storage + optional format)." + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: controlPlaneStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for control plane nodes (used if per-volume storage not set)." + - name: loadbalancerAdditionalVolumes + required: false + schema: + openAPIV3Schema: + type: array + description: "Extra disks for load-balancer nodes (per-volume storage + optional format)." + items: + type: object + required: ["disk","sizeGb"] + properties: + disk: { type: string, description: "Target slot (e.g. scsi1)" } + sizeGb: { type: integer, minimum: 5, description: "Size in GiB (min 5)" } + storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } + format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } + discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } + iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } + - name: loadBalancerStorage + required: false + schema: + openAPIV3Schema: + type: string + description: "Default Proxmox storage for load-balancer nodes (used if per-volume storage not set)." - name: controlPlaneEndpoint required: true schema: @@ -348,6 +417,122 @@ spec: workerNode: *machineSpec loadBalancer: *machineSpec patches: + - name: worker-additional-volumes + description: "Add additional disks to workers via variable" + enabledIf: "{{ if .workerAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-worker"] + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .workerAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: worker-storage + enabledIf: "{{ if .workerStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-worker"] + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: workerStorage + - name: controlplane-additional-volumes + description: "Add additional disks to control-plane via variable" + enabledIf: "{{ if .controlPlaneAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .controlPlaneAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: control-plane-storage + enabledIf: "{{ if .controlPlaneStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: controlPlaneStorage + - name: lb-additional-volumes + description: "Add additional disks to load-balancers via variable" + enabledIf: "{{ if .loadBalancerAdditionalVolumes }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: [ "proxmox-loadbalancer" ] + jsonPatches: + - op: add + path: /spec/template/spec/disks + valueFrom: + template: | + additionalVolumes: + {{- range $i, $v := .loadBalancerAdditionalVolumes }} + - { disk: {{ $v.disk }}, sizeGb: {{ $v.sizeGb }} + {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} + {{- if $v.format }}, format: {{ $v.format }}{{- end }} + {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} + {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } + {{- end }} + - name: load-balancer-storage + enabledIf: "{{ if .loadBalancerStorage }}true{{ end }}" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: false + machineDeploymentClass: + names: ["proxmox-loadbalancer"] + jsonPatches: + - op: add + path: /spec/template/spec/storage + valueFrom: + variable: workerStorage - name: ProxmoxClusterTemplateGeneral description: "Configure Cluster" definitions: From 35eeec3ed0c3aebd33afa07bb4308f695f77791f Mon Sep 17 00:00:00 2001 From: holmesb <5072156+holmesb@users.noreply.github.com> Date: Wed, 29 Oct 2025 10:52:50 +0000 Subject: [PATCH 3/5] Update api/v1alpha1/proxmoxmachine_types.go Co-authored-by: Mohamed Chiheb Ben Jemaa --- api/v1alpha1/proxmoxmachine_types.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index 0085856d..f71c6c19 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -169,7 +169,10 @@ type DiskSpec struct { // If omitted or false, the flag is not set. // +optional Discard *bool `json:"discard,omitempty"` - Iothread *bool `json:"iothread,omitempty"` + // IOThread enables the option IO Thread, + // With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + // The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller + IOThread *bool `json:"ioThread,omitempty"` SSD *bool `json:"ssd,omitempty"` } From f31cebc45779014b0f1e10986b2328dd8a0d0194 Mon Sep 17 00:00:00 2001 From: holmesb <5072156+holmesb@users.noreply.github.com> Date: Wed, 29 Oct 2025 12:29:21 +0000 Subject: [PATCH 4/5] Address PR comments --- api/v1alpha1/proxmoxmachine_types.go | 24 +- api/v1alpha1/proxmoxmachine_types_test.go | 26 +- api/v1alpha1/zz_generated.deepcopy.go | 4 +- ...ture.cluster.x-k8s.io_proxmoxclusters.yaml | 47 +++- ...ster.x-k8s.io_proxmoxclustertemplates.yaml | 47 +++- ...ture.cluster.x-k8s.io_proxmoxmachines.yaml | 47 +++- ...ster.x-k8s.io_proxmoxmachinetemplates.yaml | 47 +++- docs/advanced-setups.md | 6 +- internal/service/vmservice/vm.go | 226 +++++++++--------- internal/service/vmservice/vm_test.go | 18 +- templates/cluster-class-calico.yaml | 12 +- templates/cluster-class-cilium.yaml | 12 +- templates/cluster-class.yaml | 12 +- 13 files changed, 330 insertions(+), 198 deletions(-) diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index f71c6c19..6b2bb8a7 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -159,31 +159,37 @@ type DiskSpec struct { // +kubebuilder:validation:Minimum=5 SizeGB int32 `json:"sizeGb"` // Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). - // If omitted, falls back to the machine's .spec.storage. + // If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. // +optional Storage *string `json:"storage,omitempty"` // Format is optional: + // +optional Format *TargetFileStorageFormat `json:"format,omitempty"` // Discard enables TRIM/UNMAP support for this virtual disk. // Safe on IDE/SATA/SCSI/VirtIO; maps to Proxmox "discard=on". // If omitted or false, the flag is not set. // +optional - Discard *bool `json:"discard,omitempty"` - // IOThread enables the option IO Thread, - // With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. - // The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller + Discard *bool `json:"discard,omitempty"` + // IOThread enables the option IO Thread, + // With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + // The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller IOThread *bool `json:"ioThread,omitempty"` - SSD *bool `json:"ssd,omitempty"` + // SSD enables SSD emulation feature + // SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + // There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + // SSD emulation is not supported on VirtIO Block drives. + SSD *bool `json:"ssd,omitempty"` } // TargetFileStorageFormat the target format of the cloned disk. +// +kubebuilder:validation:Enum=raw;qcow2;vmdk type TargetFileStorageFormat string // Supported disk formats. const ( - TargetStorageFormatRaw TargetFileStorageFormat = "raw" - TargetStorageFormatQcow2 TargetFileStorageFormat = "qcow2" - TargetStorageFormatVmdk TargetFileStorageFormat = "vmdk" + TargetFileStorageFormatRaw TargetFileStorageFormat = "raw" + TargetFileStorageFormatQCOW2 TargetFileStorageFormat = "qcow2" + TargetFileStorageFormatVMDK TargetFileStorageFormat = "vmdk" ) // TemplateSource defines the source of the template VM. diff --git a/api/v1alpha1/proxmoxmachine_types_test.go b/api/v1alpha1/proxmoxmachine_types_test.go index 7b2c25e6..6173a748 100644 --- a/api/v1alpha1/proxmoxmachine_types_test.go +++ b/api/v1alpha1/proxmoxmachine_types_test.go @@ -63,7 +63,7 @@ var _ = Describe("ProxmoxMachine Test", func() { Context("VirtualMachineCloneSpec", func() { It("Should not allow specifying format if full clone is disabled", func() { dm := defaultMachine() - dm.Spec.Format = ptr.To(TargetStorageFormatRaw) + dm.Spec.Format = ptr.To(TargetFileStorageFormatRaw) dm.Spec.Full = ptr.To(false) Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("Must set full=true when specifying format"))) @@ -315,42 +315,42 @@ var _ = Describe("ProxmoxMachine Test", func() { ds := DiskSpec{ Disk: "scsi7", SizeGB: 60, - Iothread: &tTrue, + IOThread: &tTrue, } b, err := json.Marshal(ds) Expect(err).NotTo(HaveOccurred()) js := string(b) Expect(js).To(ContainSubstring(`"disk":"scsi7"`)) Expect(js).To(ContainSubstring(`"sizeGb":60`)) - Expect(js).To(ContainSubstring(`"iothread":true`)) + Expect(js).To(ContainSubstring(`"ioThread":true`)) }) It("includes iothread when explicitly false", func() { tFalse := false ds := DiskSpec{ Disk: "scsi8", SizeGB: 70, - Iothread: &tFalse, + IOThread: &tFalse, } b, err := json.Marshal(ds) Expect(err).NotTo(HaveOccurred()) js := string(b) Expect(js).To(ContainSubstring(`"disk":"scsi8"`)) Expect(js).To(ContainSubstring(`"sizeGb":70`)) - Expect(js).To(ContainSubstring(`"iothread":false`)) // non-nil -> present + Expect(js).To(ContainSubstring(`"ioThread":false`)) // non-nil -> present }) It("omits iothread when nil", func() { ds := DiskSpec{ Disk: "scsi9", SizeGB: 80, - Iothread: nil, + IOThread: nil, } b, err := json.Marshal(ds) Expect(err).NotTo(HaveOccurred()) js := string(b) Expect(js).To(ContainSubstring(`"disk":"scsi9"`)) Expect(js).To(ContainSubstring(`"sizeGb":80`)) - Expect(js).NotTo(ContainSubstring(`"iothread"`)) + Expect(js).NotTo(ContainSubstring(`"ioThread"`)) }) }) @@ -359,7 +359,7 @@ var _ = Describe("ProxmoxMachine Test", func() { tTrue := true src := &Storage{ AdditionalVolumes: []DiskSpec{ - {Disk: "scsi10", SizeGB: 90, Iothread: &tTrue}, + {Disk: "scsi10", SizeGB: 90, IOThread: &tTrue}, }, } dst := src.DeepCopy() @@ -368,11 +368,11 @@ var _ = Describe("ProxmoxMachine Test", func() { got := dst.AdditionalVolumes[0] Expect(got.Disk).To(Equal("scsi10")) Expect(got.SizeGB).To(Equal(int32(90))) - Expect(got.Iothread).NotTo(BeNil()) - Expect(*got.Iothread).To(BeTrue()) - *src.AdditionalVolumes[0].Iothread = false - Expect(dst.AdditionalVolumes[0].Iothread).NotTo(BeNil()) - Expect(*dst.AdditionalVolumes[0].Iothread).To(BeTrue()) + Expect(got.IOThread).NotTo(BeNil()) + Expect(*got.IOThread).To(BeTrue()) + *src.AdditionalVolumes[0].IOThread = false + Expect(dst.AdditionalVolumes[0].IOThread).NotTo(BeNil()) + Expect(*dst.AdditionalVolumes[0].IOThread).To(BeTrue()) }) }) Context("AdditionalVolumes ssd - JSON marshalling", func() { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index cd67d635..ca8a7fbc 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -46,8 +46,8 @@ func (in *DiskSpec) DeepCopyInto(out *DiskSpec) { *out = new(bool) **out = **in } - if in.Iothread != nil { - in, out := &in.Iothread, &out.Iothread + if in.IOThread != nil { + in, out := &in.IOThread, &out.IOThread *out = new(bool) **out = **in } diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index d848493b..bce8c96c 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -125,8 +125,16 @@ spec: type: string format: description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk type: string - iothread: + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller type: boolean sizeGb: description: |- @@ -138,11 +146,16 @@ spec: minimum: 5 type: integer ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. type: boolean storage: description: |- Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). - If omitted, falls back to the machine's .spec.storage. + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. type: string required: - disk @@ -168,8 +181,16 @@ spec: type: string format: description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk type: string - iothread: + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller type: boolean sizeGb: description: |- @@ -181,11 +202,16 @@ spec: minimum: 5 type: integer ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. type: boolean storage: description: |- Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). - If omitted, falls back to the machine's .spec.storage. + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. type: string required: - disk @@ -196,12 +222,17 @@ spec: rule: self == oldSelf type: object format: + allOf: + - enum: + - raw + - qcow2 + - vmdk + - enum: + - raw + - qcow2 + - vmdk description: Format for file storage. Only valid for full clone. - enum: - - raw - - qcow2 - - vmdk type: string full: default: true diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 578ae40b..dd965bc3 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -149,8 +149,16 @@ spec: type: string format: description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk type: string - iothread: + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller type: boolean sizeGb: description: |- @@ -162,11 +170,16 @@ spec: minimum: 5 type: integer ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. type: boolean storage: description: |- Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). - If omitted, falls back to the machine's .spec.storage. + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. type: string required: - disk @@ -192,8 +205,16 @@ spec: type: string format: description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk type: string - iothread: + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller type: boolean sizeGb: description: |- @@ -205,11 +226,16 @@ spec: minimum: 5 type: integer ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. type: boolean storage: description: |- Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). - If omitted, falls back to the machine's .spec.storage. + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. type: string required: - disk @@ -220,12 +246,17 @@ spec: rule: self == oldSelf type: object format: + allOf: + - enum: + - raw + - qcow2 + - vmdk + - enum: + - raw + - qcow2 + - vmdk description: Format for file storage. Only valid for full clone. - enum: - - raw - - qcow2 - - vmdk type: string full: default: true diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index e2971d86..7064b706 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -117,8 +117,16 @@ spec: type: string format: description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk type: string - iothread: + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller type: boolean sizeGb: description: |- @@ -130,11 +138,16 @@ spec: minimum: 5 type: integer ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. type: boolean storage: description: |- Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). - If omitted, falls back to the machine's .spec.storage. + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. type: string required: - disk @@ -160,8 +173,16 @@ spec: type: string format: description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk type: string - iothread: + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller type: boolean sizeGb: description: |- @@ -173,11 +194,16 @@ spec: minimum: 5 type: integer ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. type: boolean storage: description: |- Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). - If omitted, falls back to the machine's .spec.storage. + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. type: string required: - disk @@ -188,11 +214,16 @@ spec: rule: self == oldSelf type: object format: + allOf: + - enum: + - raw + - qcow2 + - vmdk + - enum: + - raw + - qcow2 + - vmdk description: Format for file storage. Only valid for full clone. - enum: - - raw - - qcow2 - - vmdk type: string full: default: true diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index 09fa1101..4896109f 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -129,8 +129,16 @@ spec: type: string format: description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk type: string - iothread: + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller type: boolean sizeGb: description: |- @@ -142,11 +150,16 @@ spec: minimum: 5 type: integer ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. type: boolean storage: description: |- Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). - If omitted, falls back to the machine's .spec.storage. + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. type: string required: - disk @@ -172,8 +185,16 @@ spec: type: string format: description: 'Format is optional:' + enum: + - raw + - qcow2 + - vmdk type: string - iothread: + ioThread: + description: |- + IOThread enables the option IO Thread, + With IO Thread enabled, QEMU creates one I/O thread per storage controller rather than handling all I/O in the main event loop or vCPU threads. + The option IO Thread can only be used when using a disk with the VirtIO controller, or with the SCSI controller type: boolean sizeGb: description: |- @@ -185,11 +206,16 @@ spec: minimum: 5 type: integer ssd: + description: |- + SSD enables SSD emulation feature + SSD emulation sets a drive to be presented to the guest as a solid-state drive rather than a rotational hard disk + There is no requirement that the underlying storage actually be backed by SSDs; this feature can be used with physical media of any type + SSD emulation is not supported on VirtIO Block drives. type: boolean storage: description: |- Storage is an optional per-volume Proxmox storage name (e.g., "local-lvm", "nfs-data"). - If omitted, falls back to the machine's .spec.storage. + If omitted, falls back to the machine's .spec.storage if exists, otherwise the Proxmox node’s default storage. type: string required: - disk @@ -200,12 +226,17 @@ spec: rule: self == oldSelf type: object format: + allOf: + - enum: + - raw + - qcow2 + - vmdk + - enum: + - raw + - qcow2 + - vmdk description: Format for file storage. Only valid for full clone. - enum: - - raw - - qcow2 - - vmdk type: string full: default: true diff --git a/docs/advanced-setups.md b/docs/advanced-setups.md index 2ef13851..218bcf45 100644 --- a/docs/advanced-setups.md +++ b/docs/advanced-setups.md @@ -328,14 +328,14 @@ spec: storage: my-nfs # Optional per-volume storage override. Uses .spec.template.spec.storage if omitted format: qcow2 # Only specify if using file-backed storage. If omitted, default for disk is used. discard: true - iothread: true + ioThread: true ssd: true ``` In the same way, additionalVolumes can also be specified in ProxmoxClusters, ProxmoxClusterTemplates, -and ProxmoxMachines. Flags: format, discard, iothread, and ssd are supported by this provider. +and ProxmoxMachines. Flags: format, discard, ioThread, and ssd are supported by this provider. See Proxmox [docs](https://pve.proxmox.com/pve-docs/qm.1.html#qm_hard_disk) for details about these flags. -Alternatively if using cluster-class, define additionalVolmes in your cluster: +Alternatively if using cluster-class, define additionalVolumes in your cluster: ```yaml kind: Cluster spec: diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index 4d5576c2..df66e27b 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -327,118 +327,8 @@ func reconcileVirtualMachineConfig(ctx context.Context, machineScope *scope.Mach } } - // Additional data disks - disksSpec := machineScope.ProxmoxMachine.Spec.Disks - if disksSpec != nil && len(disksSpec.AdditionalVolumes) > 0 { - // Find an "Unused Disk" on the same storage to reattach (dedup): - findMatchingUnusedVolume := func(cfg any, storageName string) string { - if cfg == nil || storageName == "" { - return "" - } - cfgVal := reflect.ValueOf(cfg) - if cfgVal.Kind() == reflect.Pointer { - cfgVal = cfgVal.Elem() - } - if !cfgVal.IsValid() { - return "" - } - cfgType := cfgVal.Type() - - // Unused# fields: - for i := 0; i < cfgType.NumField(); i++ { - fieldMetadata := cfgType.Field(i) - if !strings.HasPrefix(fieldMetadata.Name, "Unused") { - continue - } - fieldValue := cfgVal.Field(i) - if !fieldValue.IsValid() || fieldValue.Kind() != reflect.String { - continue - } - volID := strings.TrimSpace(fieldValue.String()) // e.g. "vg_xxx:vm-103-disk-2" - if volID != "" && strings.HasPrefix(volID, storageName+":") { - return volID - } - } - return "" - } - - // Avoid queueing duplicate adds for the same slot within this reconcile: - pendingInReconcile := map[string]struct{}{} - for _, vol := range disksSpec.AdditionalVolumes { - slotName := strings.ToLower(strings.TrimSpace(vol.Disk)) - alreadySet := diskSlotOccupied(vmConfig, slotName) - machineScope.V(4).Info("additionalVolume: slot state", - "machine", machineScope.Name(), "slot", slotName, "occupied", alreadySet) - if alreadySet { - if pendingGuardEnabled { - clearPending(machineScope, slotName) - } - continue - } - if pendingGuardEnabled && isPending(machineScope, slotName) { - machineScope.V(4).Info("additionalVolume: skip, pending add in effect", - "machine", machineScope.Name(), "slot", slotName) - continue - } - if _, seen := pendingInReconcile[slotName]; seen { - machineScope.V(4).Info("additionalVolume: skip, add already queued in this reconcile", - "machine", machineScope.Name(), "slot", slotName) - continue - } - // Resolve storage (per-volume overrides machine-level): - var storageName string - if vol.Storage != nil && *vol.Storage != "" { - storageName = *vol.Storage - } else if machineScope.ProxmoxMachine.Spec.Storage != nil && *machineScope.ProxmoxMachine.Spec.Storage != "" { - storageName = *machineScope.ProxmoxMachine.Spec.Storage - } else { - return false, errors.New("additionalVolumes requires a storage to be set (either per-volume .storage or spec.storage)") - } - machineScope.V(4).Info("additionalVolume: resolved storage", - "machine", machineScope.Name(), "slot", slotName, "storage", storageName) - - volumeValue := findMatchingUnusedVolume(vmConfig, storageName) - if volumeValue != "" { - machineScope.V(4).Info("additionalVolume: reattaching existing unused volume to avoid spurious extra disks due to reconcile race-condition (dedup)", - "machine", machineScope.Name(), "slot", slotName, "volumeID", volumeValue) - } else { - if vol.Format != nil && *vol.Format != "" { - volumeValue = fmt.Sprintf("%s:0,size=%dG,format=%s", storageName, vol.SizeGB, string(*vol.Format)) - machineScope.Info("additionalVolume: creating file-backed volume (with format notation)", - "machine", machineScope.Name(), "slot", slotName, "value", volumeValue) - } else { - volumeValue = fmt.Sprintf("%s:%d", storageName, vol.SizeGB) - machineScope.Info("additionalVolume: creating block-backed volume (without format notation)", - "machine", machineScope.Name(), "slot", slotName, "value", volumeValue) - } - } - // Add flags: - if vol.Discard != nil && *vol.Discard { - volumeValue = fmt.Sprintf("%s,discard=on", volumeValue) - machineScope.V(4).Info("additionalVolume: appended flag", - "machine", machineScope.Name(), "slot", slotName, "flag", "discard=on") - } - if vol.Iothread != nil && *vol.Iothread { - volumeValue = fmt.Sprintf("%s,iothread=1", volumeValue) - machineScope.V(4).Info("additionalVolume: appended flag", - "machine", machineScope.Name(), "slot", slotName, "flag", "iothread=1") - } - if vol.SSD != nil && *vol.SSD { - volumeValue = fmt.Sprintf("%s,ssd=1", volumeValue) - machineScope.V(4).Info("additionalVolume: appended flag", - "machine", machineScope.Name(), "slot", slotName, "flag", "ssd=1") - } - vmOptions = append(vmOptions, proxmox.VirtualMachineOption{ - Name: vol.Disk, - Value: volumeValue, - }) - machineScope.V(4).Info("additionalVolume: queued vm option", - "machine", machineScope.Name(), "slot", slotName, "name", vol.Disk, "value", volumeValue) - pendingInReconcile[slotName] = struct{}{} - if pendingGuardEnabled { - markPending(machineScope, slotName) - } - } + if err := reconcileAdditionalVolumes(machineScope, vmConfig, &vmOptions); err != nil { + return false, err } if len(vmOptions) == 0 { @@ -651,3 +541,115 @@ var selectNextNode = scheduler.ScheduleVM func unmountCloudInitISO(ctx context.Context, machineScope *scope.MachineScope) error { return machineScope.InfraCluster.ProxmoxClient.UnmountCloudInitISO(ctx, machineScope.VirtualMachine, inject.CloudInitISODevice) } + +func reconcileAdditionalVolumes(machineScope *scope.MachineScope, vmConfig any, vmOptions *[]proxmox.VirtualMachineOption) error { + disksSpec := machineScope.ProxmoxMachine.Spec.Disks + if disksSpec == nil || len(disksSpec.AdditionalVolumes) == 0 { + return nil + } + findMatchingUnusedVolume := func(cfg any, storageName string) string { + if cfg == nil || storageName == "" { + return "" + } + cfgVal := reflect.ValueOf(cfg) + if cfgVal.Kind() == reflect.Pointer { + cfgVal = cfgVal.Elem() + } + if !cfgVal.IsValid() { + return "" + } + cfgType := cfgVal.Type() + for i := 0; i < cfgType.NumField(); i++ { + fieldMetadata := cfgType.Field(i) + if !strings.HasPrefix(fieldMetadata.Name, "Unused") { + continue + } + fieldValue := cfgVal.Field(i) + if !fieldValue.IsValid() || fieldValue.Kind() != reflect.String { + continue + } + volID := strings.TrimSpace(fieldValue.String()) // e.g. "vg_xxx:vm-103-disk-2" + if volID != "" && strings.HasPrefix(volID, storageName+":") { + return volID + } + } + return "" + } + pendingInReconcile := map[string]struct{}{} + for _, vol := range disksSpec.AdditionalVolumes { + slotName := strings.ToLower(strings.TrimSpace(vol.Disk)) + alreadySet := diskSlotOccupied(vmConfig, slotName) + machineScope.V(4).Info("additionalVolume: slot state", + "machine", machineScope.Name(), "slot", slotName, "occupied", alreadySet) + if alreadySet { + if pendingGuardEnabled { + clearPending(machineScope, slotName) + } + continue + } + if pendingGuardEnabled && isPending(machineScope, slotName) { + machineScope.V(4).Info("additionalVolume: skip, pending add in effect", + "machine", machineScope.Name(), "slot", slotName) + continue + } + if _, seen := pendingInReconcile[slotName]; seen { + machineScope.V(4).Info("additionalVolume: skip, add already queued in this reconcile", + "machine", machineScope.Name(), "slot", slotName) + continue + } + // Resolve storage (per-volume overrides machine-level) + var storageName string + if vol.Storage != nil && *vol.Storage != "" { + storageName = *vol.Storage + } else if machineScope.ProxmoxMachine.Spec.Storage != nil && *machineScope.ProxmoxMachine.Spec.Storage != "" { + storageName = *machineScope.ProxmoxMachine.Spec.Storage + } else { + return errors.New("additionalVolumes requires a storage to be set (either per-volume .storage or spec.storage)") + } + machineScope.V(4).Info("additionalVolume: resolved storage", + "machine", machineScope.Name(), "slot", slotName, "storage", storageName) + volumeValue := findMatchingUnusedVolume(vmConfig, storageName) + if volumeValue != "" { + machineScope.V(4).Info("additionalVolume: reattaching existing unused volume to avoid spurious extra disks due to reconcile race-condition (dedup)", + "machine", machineScope.Name(), "slot", slotName, "volumeID", volumeValue) + } else { + if vol.Format != nil && *vol.Format != "" { + volumeValue = fmt.Sprintf("%s:0,size=%dG,format=%s", storageName, vol.SizeGB, string(*vol.Format)) + machineScope.Info("additionalVolume: creating file-backed volume (with format notation)", + "machine", machineScope.Name(), "slot", slotName, "value", volumeValue) + } else { + volumeValue = fmt.Sprintf("%s:%d", storageName, vol.SizeGB) + machineScope.Info("additionalVolume: creating block-backed volume (without format notation)", + "machine", machineScope.Name(), "slot", slotName, "value", volumeValue) + } + } + // Add flags + if vol.Discard != nil && *vol.Discard { + volumeValue = fmt.Sprintf("%s,discard=on", volumeValue) + machineScope.V(4).Info("additionalVolume: appended flag", + "machine", machineScope.Name(), "slot", slotName, "flag", "discard=on") + } + if vol.IOThread != nil && *vol.IOThread { + volumeValue = fmt.Sprintf("%s,iothread=1", volumeValue) + machineScope.V(4).Info("additionalVolume: appended flag", + "machine", machineScope.Name(), "slot", slotName, "flag", "iothread=1") + } + if vol.SSD != nil && *vol.SSD { + volumeValue = fmt.Sprintf("%s,ssd=1", volumeValue) + machineScope.V(4).Info("additionalVolume: appended flag", + "machine", machineScope.Name(), "slot", slotName, "flag", "ssd=1") + } + *vmOptions = append(*vmOptions, proxmox.VirtualMachineOption{ + Name: vol.Disk, + Value: volumeValue, + }) + machineScope.V(4).Info("additionalVolume: queued vm option", + "machine", machineScope.Name(), "slot", slotName, "name", vol.Disk, "value", volumeValue) + pendingInReconcile[slotName] = struct{}{} + if pendingGuardEnabled { + markPending(machineScope, slotName) + } + } + + return nil +} diff --git a/internal/service/vmservice/vm_test.go b/internal/service/vmservice/vm_test.go index 4e73487c..3eaa900a 100644 --- a/internal/service/vmservice/vm_test.go +++ b/internal/service/vmservice/vm_test.go @@ -120,7 +120,7 @@ func TestReconcileVM_InitCheckDisabled(t *testing.T) { func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") - machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) + machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetFileStorageFormatRaw) machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") @@ -161,7 +161,7 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector(t *testing.T }, } machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") - machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) + machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetFileStorageFormatRaw) machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") @@ -206,7 +206,7 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_VMTemplateNo }, } machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") - machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) + machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetFileStorageFormatRaw) machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") @@ -900,11 +900,11 @@ func TestReconcileVirtualMachineConfig_AdditionalVolumes_Block_IothreadTrue(t *t iTrue := true machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ AdditionalVolumes: []infrav1alpha1.DiskSpec{ - {Disk: "scsi1", SizeGB: 90, Iothread: &iTrue}, + {Disk: "scsi1", SizeGB: 90, IOThread: &iTrue}, }, } - // Expect block syntax with ",iothread=1" appended. + // Expect block syntax with ",ioThread=1" appended. expected := []interface{}{proxmox.VirtualMachineOption{Name: "scsi1", Value: "local-lvm:90,iothread=1"}} proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expected...).Return(newTask(), nil).Once() @@ -926,7 +926,7 @@ func TestReconcileVirtualMachineConfig_AdditionalVolumes_File_PerVolumeFormat_Io iTrue := true machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ AdditionalVolumes: []infrav1alpha1.DiskSpec{ - {Disk: "scsi2", SizeGB: 80, Storage: &nfs, Format: &qcow2, Iothread: &iTrue}, + {Disk: "scsi2", SizeGB: 80, Storage: &nfs, Format: &qcow2, IOThread: &iTrue}, }, } @@ -943,7 +943,7 @@ func TestReconcileVirtualMachineConfig_AdditionalVolumes_Iothread_OmittedWhenNil t.Parallel() ctx := context.Background() - // Case A: iothread=nil -> omitted + // Case A: ioThread=nil -> omitted { machineScope, proxmoxClient, _ := setupReconcilerTest(t) vm := newStoppedVM() @@ -965,7 +965,7 @@ func TestReconcileVirtualMachineConfig_AdditionalVolumes_Iothread_OmittedWhenNil require.True(t, requeue) } - // Case B: iothread=false -> omitted (only emit when explicitly true) + // Case B: ioThread=false -> omitted (only emit when explicitly true) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) vm := newStoppedVM() @@ -976,7 +976,7 @@ func TestReconcileVirtualMachineConfig_AdditionalVolumes_Iothread_OmittedWhenNil iFalse := false machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ AdditionalVolumes: []infrav1alpha1.DiskSpec{ - {Disk: "scsi4", SizeGB: 25, Iothread: &iFalse}, + {Disk: "scsi4", SizeGB: 25, IOThread: &iFalse}, }, } diff --git a/templates/cluster-class-calico.yaml b/templates/cluster-class-calico.yaml index 8fbd8bef..aae7b33c 100644 --- a/templates/cluster-class-calico.yaml +++ b/templates/cluster-class-calico.yaml @@ -104,7 +104,7 @@ spec: storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } - iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } - name: workerStorage required: false @@ -127,7 +127,7 @@ spec: storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } - iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } - name: controlPlaneStorage required: false @@ -150,7 +150,7 @@ spec: storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } - iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } - name: loadBalancerStorage required: false @@ -469,7 +469,7 @@ spec: {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} {{- if $v.format }}, format: {{ $v.format }}{{- end }} {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} - {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: worker-storage @@ -507,7 +507,7 @@ spec: {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} {{- if $v.format }}, format: {{ $v.format }}{{- end }} {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} - {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: control-plane-storage @@ -545,7 +545,7 @@ spec: {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} {{- if $v.format }}, format: {{ $v.format }}{{- end }} {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} - {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: load-balancer-storage diff --git a/templates/cluster-class-cilium.yaml b/templates/cluster-class-cilium.yaml index 15e05c86..110a59be 100644 --- a/templates/cluster-class-cilium.yaml +++ b/templates/cluster-class-cilium.yaml @@ -104,7 +104,7 @@ spec: storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } - iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } - name: workerStorage required: false @@ -127,7 +127,7 @@ spec: storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } - iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } - name: controlPlaneStorage required: false @@ -150,7 +150,7 @@ spec: storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } - iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } - name: loadBalancerStorage required: false @@ -469,7 +469,7 @@ spec: {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} {{- if $v.format }}, format: {{ $v.format }}{{- end }} {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} - {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: worker-storage @@ -507,7 +507,7 @@ spec: {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} {{- if $v.format }}, format: {{ $v.format }}{{- end }} {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} - {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: control-plane-storage @@ -545,7 +545,7 @@ spec: {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} {{- if $v.format }}, format: {{ $v.format }}{{- end }} {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} - {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: load-balancer-storage diff --git a/templates/cluster-class.yaml b/templates/cluster-class.yaml index e59ee813..d2da351f 100644 --- a/templates/cluster-class.yaml +++ b/templates/cluster-class.yaml @@ -74,7 +74,7 @@ spec: storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } - iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } - name: workerStorage required: false @@ -97,7 +97,7 @@ spec: storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } - iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } - name: controlPlaneStorage required: false @@ -120,7 +120,7 @@ spec: storage: { type: string, description: "Optional per-volume storage; falls back to PMT spec.storage" } format: { type: string, enum: ["raw","qcow2","vmdk"], description: "Optional; file-backed only" } discard: { type: boolean, description: "Enable TRIM/UNMAP on this disk (Proxmox 'discard=on'). Safe on all slot types." } - iothread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } + ioThread: { type: boolean, description: "Enable a dedicated IO thread (Proxmox 'iothread=1'); SCSI & VirtIO-blk only." } ssd: { type: boolean, description: "Hint as SSD (Proxmox 'ssd=1'); SCSI/IDE/SATA only." } - name: loadBalancerStorage required: false @@ -439,7 +439,7 @@ spec: {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} {{- if $v.format }}, format: {{ $v.format }}{{- end }} {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} - {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: worker-storage @@ -477,7 +477,7 @@ spec: {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} {{- if $v.format }}, format: {{ $v.format }}{{- end }} {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} - {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: control-plane-storage @@ -515,7 +515,7 @@ spec: {{- if $v.storage }}, storage: {{ $v.storage }}{{- end }} {{- if $v.format }}, format: {{ $v.format }}{{- end }} {{- if hasKey $v "discard" }}, discard: {{ $v.discard }}{{- end }} - {{- if hasKey $v "iothread" }}, iothread: {{ $v.iothread }}{{- end }} + {{- if hasKey $v "ioThread" }}, ioThread: {{ $v.ioThread }}{{- end }} {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: load-balancer-storage From fcc05e13332f40752bcaf7ff04e244da18a36b00 Mon Sep 17 00:00:00 2001 From: holmesb <5072156+holmesb@users.noreply.github.com> Date: Thu, 30 Oct 2025 16:34:02 +0000 Subject: [PATCH 5/5] Deprecation notice, since we'll eventually move to a single volumes list instead of separate boot + additional volumes. --- api/v1alpha1/proxmoxmachine_types.go | 5 ++++ ...ture.cluster.x-k8s.io_proxmoxclusters.yaml | 9 ++++++-- ...ster.x-k8s.io_proxmoxclustertemplates.yaml | 9 ++++++-- ...ture.cluster.x-k8s.io_proxmoxmachines.yaml | 9 ++++++-- ...ster.x-k8s.io_proxmoxmachinetemplates.yaml | 9 ++++++-- templates/cluster-class-calico.yaml | 21 +++++++++-------- templates/cluster-class-cilium.yaml | 23 +++++++++++-------- templates/cluster-class.yaml | 21 +++++++++-------- 8 files changed, 70 insertions(+), 36 deletions(-) diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index 6b2bb8a7..26160c4b 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -136,6 +136,9 @@ type ProxmoxMachineSpec struct { // Storage is the physical storage on the node. type Storage struct { // BootVolume defines the storage size for the boot volume. + // Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (one item + // with boot:true). Use BootVolume for v1alpha1/v1alpha2, but plan to migrate to `volumes[]` when + // available. // This field is optional, and should only be set if you want // to change the size of the boot volume. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" @@ -143,6 +146,8 @@ type Storage struct { BootVolume *DiskSpec `json:"bootVolume,omitempty"` // AdditionalVolumes defines additional volumes to be added to the virtual machine. + // Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (additional + // items with boot:false). Use for v1alpha1/v1alpha2; plan to migrate to `volumes[]` when available. // +optional AdditionalVolumes []DiskSpec `json:"additionalVolumes,omitempty"` } diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index bce8c96c..096e0dd0 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -106,8 +106,10 @@ spec: which will be applied before the first startup. properties: additionalVolumes: - description: AdditionalVolumes defines additional volumes - to be added to the virtual machine. + description: |- + AdditionalVolumes defines additional volumes to be added to the virtual machine. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (additional + items with boot:false). Use for v1alpha1/v1alpha2; plan to migrate to `volumes[]` when available. items: description: DiskSpec is contains values for the disk device and size. @@ -165,6 +167,9 @@ spec: bootVolume: description: |- BootVolume defines the storage size for the boot volume. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (one item + with boot:true). Use BootVolume for v1alpha1/v1alpha2, but plan to migrate to `volumes[]` when + available. This field is optional, and should only be set if you want to change the size of the boot volume. properties: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index dd965bc3..b5e5daa3 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -130,8 +130,10 @@ spec: which will be applied before the first startup. properties: additionalVolumes: - description: AdditionalVolumes defines additional - volumes to be added to the virtual machine. + description: |- + AdditionalVolumes defines additional volumes to be added to the virtual machine. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (additional + items with boot:false). Use for v1alpha1/v1alpha2; plan to migrate to `volumes[]` when available. items: description: DiskSpec is contains values for the disk device and size. @@ -189,6 +191,9 @@ spec: bootVolume: description: |- BootVolume defines the storage size for the boot volume. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (one item + with boot:true). Use BootVolume for v1alpha1/v1alpha2, but plan to migrate to `volumes[]` when + available. This field is optional, and should only be set if you want to change the size of the boot volume. properties: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index 7064b706..aa25c7d8 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -98,8 +98,10 @@ spec: which will be applied before the first startup. properties: additionalVolumes: - description: AdditionalVolumes defines additional volumes to be - added to the virtual machine. + description: |- + AdditionalVolumes defines additional volumes to be added to the virtual machine. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (additional + items with boot:false). Use for v1alpha1/v1alpha2; plan to migrate to `volumes[]` when available. items: description: DiskSpec is contains values for the disk device and size. @@ -157,6 +159,9 @@ spec: bootVolume: description: |- BootVolume defines the storage size for the boot volume. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (one item + with boot:true). Use BootVolume for v1alpha1/v1alpha2, but plan to migrate to `volumes[]` when + available. This field is optional, and should only be set if you want to change the size of the boot volume. properties: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index 4896109f..8925b67a 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -110,8 +110,10 @@ spec: which will be applied before the first startup. properties: additionalVolumes: - description: AdditionalVolumes defines additional volumes - to be added to the virtual machine. + description: |- + AdditionalVolumes defines additional volumes to be added to the virtual machine. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (additional + items with boot:false). Use for v1alpha1/v1alpha2; plan to migrate to `volumes[]` when available. items: description: DiskSpec is contains values for the disk device and size. @@ -169,6 +171,9 @@ spec: bootVolume: description: |- BootVolume defines the storage size for the boot volume. + Deprecated: This field will be replaced by a unified `spec.disks.volumes[]` list (one item + with boot:true). Use BootVolume for v1alpha1/v1alpha2, but plan to migrate to `volumes[]` when + available. This field is optional, and should only be set if you want to change the size of the boot volume. properties: diff --git a/templates/cluster-class-calico.yaml b/templates/cluster-class-calico.yaml index aae7b33c..bcc4c0a7 100644 --- a/templates/cluster-class-calico.yaml +++ b/templates/cluster-class-calico.yaml @@ -94,7 +94,7 @@ spec: schema: openAPIV3Schema: type: array - description: "Extra disks for worker nodes (per-volume storage + optional format)." + description: "Extra disks for worker nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)" items: type: object required: ["disk","sizeGb"] @@ -111,13 +111,13 @@ spec: schema: openAPIV3Schema: type: string - description: "Default Proxmox storage for worker nodes (used if per-volume storage not set)." + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." - name: controlPlaneAdditionalVolumes required: false schema: openAPIV3Schema: type: array - description: "Extra disks for control plane nodes (per-volume storage + optional format)." + description: "Extra disks for control-plane nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)" items: type: object required: ["disk","sizeGb"] @@ -134,13 +134,13 @@ spec: schema: openAPIV3Schema: type: string - description: "Default Proxmox storage for control plane nodes (used if per-volume storage not set)." + description: "Default Proxmox storage for control plane nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)" - name: loadbalancerAdditionalVolumes required: false schema: openAPIV3Schema: type: array - description: "Extra disks for load-balancer nodes (per-volume storage + optional format)." + description: "Extra disks for load-balancer nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)." items: type: object required: ["disk","sizeGb"] @@ -157,7 +157,7 @@ spec: schema: openAPIV3Schema: type: string - description: "Default Proxmox storage for load-balancer nodes (used if per-volume storage not set)." + description: "Default Proxmox storage for load-balancer nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." - name: controlPlaneEndpoint required: true schema: @@ -448,7 +448,7 @@ spec: loadBalancer: *machineSpec patches: - name: worker-additional-volumes - description: "Add additional disks to workers via variable" + description: "Add additional disks to workers via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .workerAdditionalVolumes }}true{{ end }}" definitions: - selector: @@ -473,6 +473,7 @@ spec: {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: worker-storage + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .workerStorage }}true{{ end }}" definitions: - selector: @@ -488,7 +489,7 @@ spec: valueFrom: variable: workerStorage - name: controlplane-additional-volumes - description: "Add additional disks to control-plane via variable" + description: "Add additional disks to control-plane via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .controlPlaneAdditionalVolumes }}true{{ end }}" definitions: - selector: @@ -511,6 +512,7 @@ spec: {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: control-plane-storage + description: "Default Proxmox storage for control-plane nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .controlPlaneStorage }}true{{ end }}" definitions: - selector: @@ -524,7 +526,7 @@ spec: valueFrom: variable: controlPlaneStorage - name: lb-additional-volumes - description: "Add additional disks to load-balancers via variable" + description: "Add additional disks to load-balancers via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .loadBalancerAdditionalVolumes }}true{{ end }}" definitions: - selector: @@ -549,6 +551,7 @@ spec: {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: load-balancer-storage + description: "Default Proxmox storage for loadbalancer nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .loadBalancerStorage }}true{{ end }}" definitions: - selector: diff --git a/templates/cluster-class-cilium.yaml b/templates/cluster-class-cilium.yaml index 110a59be..6c53bcd4 100644 --- a/templates/cluster-class-cilium.yaml +++ b/templates/cluster-class-cilium.yaml @@ -94,7 +94,7 @@ spec: schema: openAPIV3Schema: type: array - description: "Extra disks for worker nodes (per-volume storage + optional format)." + description: "Extra disks for worker nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)" items: type: object required: ["disk","sizeGb"] @@ -111,13 +111,13 @@ spec: schema: openAPIV3Schema: type: string - description: "Default Proxmox storage for worker nodes (used if per-volume storage not set)." + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." - name: controlPlaneAdditionalVolumes required: false schema: openAPIV3Schema: type: array - description: "Extra disks for control-plane nodes (per-volume storage + optional format)." + description: "Extra disks for control-plane nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)" items: type: object required: ["disk","sizeGb"] @@ -134,13 +134,13 @@ spec: schema: openAPIV3Schema: type: string - description: "Default Proxmox storage for control plane nodes (used if per-volume storage not set)." + description: "Default Proxmox storage for control plane nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)" - name: loadbalancerAdditionalVolumes required: false schema: openAPIV3Schema: type: array - description: "Extra disks for load-balancer nodes (per-volume storage + optional format)." + description: "Extra disks for load-balancer nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)." items: type: object required: ["disk","sizeGb"] @@ -157,7 +157,7 @@ spec: schema: openAPIV3Schema: type: string - description: "Default Proxmox storage for load-balancer nodes (used if per-volume storage not set)." + description: "Default Proxmox storage for load-balancer nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." - name: controlPlaneEndpoint required: true schema: @@ -447,8 +447,8 @@ spec: workerNode: *machineSpec loadBalancer: *machineSpec patches: - - name: worker-additional-volumes - description: "Add additional disks to workers via variable" + - name: worker-additional-volumes # Deprecated in favor of .spec.disks.volumes (coming in a future API) + description: "Add additional disks to workers via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .workerAdditionalVolumes }}true{{ end }}" definitions: - selector: @@ -473,6 +473,7 @@ spec: {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: worker-storage + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .workerStorage }}true{{ end }}" definitions: - selector: @@ -488,7 +489,7 @@ spec: valueFrom: variable: workerStorage - name: controlplane-additional-volumes - description: "Add additional disks to control-plane via variable" + description: "Add additional disks to control-plane via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .controlPlaneAdditionalVolumes }}true{{ end }}" definitions: - selector: @@ -511,6 +512,7 @@ spec: {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: control-plane-storage + description: "Default Proxmox storage for control-plane nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .controlPlaneStorage }}true{{ end }}" definitions: - selector: @@ -524,7 +526,7 @@ spec: valueFrom: variable: controlPlaneStorage - name: lb-additional-volumes - description: "Add additional disks to load-balancers via variable" + description: "Add additional disks to load-balancers via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .loadBalancerAdditionalVolumes }}true{{ end }}" definitions: - selector: @@ -549,6 +551,7 @@ spec: {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: load-balancer-storage + description: "Default Proxmox storage for loadbalancer nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .loadBalancerStorage }}true{{ end }}" definitions: - selector: diff --git a/templates/cluster-class.yaml b/templates/cluster-class.yaml index d2da351f..98468b95 100644 --- a/templates/cluster-class.yaml +++ b/templates/cluster-class.yaml @@ -64,7 +64,7 @@ spec: schema: openAPIV3Schema: type: array - description: "Extra disks for worker nodes (per-volume storage + optional format)." + description: "Extra disks for worker nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)" items: type: object required: ["disk","sizeGb"] @@ -81,13 +81,13 @@ spec: schema: openAPIV3Schema: type: string - description: "Default Proxmox storage for worker nodes (used if per-volume storage not set)." + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." - name: controlPlaneAdditionalVolumes required: false schema: openAPIV3Schema: type: array - description: "Extra disks for control plane nodes (per-volume storage + optional format)." + description: "Extra disks for control-plane nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)" items: type: object required: ["disk","sizeGb"] @@ -104,13 +104,13 @@ spec: schema: openAPIV3Schema: type: string - description: "Default Proxmox storage for control plane nodes (used if per-volume storage not set)." + description: "Default Proxmox storage for control plane nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)" - name: loadbalancerAdditionalVolumes required: false schema: openAPIV3Schema: type: array - description: "Extra disks for load-balancer nodes (per-volume storage + optional format)." + description: "Extra disks for load-balancer nodes (per-volume storage + optional format). Deprecated in favor of .spec.disks.volumes (coming in a future API)." items: type: object required: ["disk","sizeGb"] @@ -127,7 +127,7 @@ spec: schema: openAPIV3Schema: type: string - description: "Default Proxmox storage for load-balancer nodes (used if per-volume storage not set)." + description: "Default Proxmox storage for load-balancer nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." - name: controlPlaneEndpoint required: true schema: @@ -418,7 +418,7 @@ spec: loadBalancer: *machineSpec patches: - name: worker-additional-volumes - description: "Add additional disks to workers via variable" + description: "Add additional disks to workers via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .workerAdditionalVolumes }}true{{ end }}" definitions: - selector: @@ -443,6 +443,7 @@ spec: {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: worker-storage + description: "Default Proxmox storage for worker nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .workerStorage }}true{{ end }}" definitions: - selector: @@ -458,7 +459,7 @@ spec: valueFrom: variable: workerStorage - name: controlplane-additional-volumes - description: "Add additional disks to control-plane via variable" + description: "Add additional disks to control-plane via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .controlPlaneAdditionalVolumes }}true{{ end }}" definitions: - selector: @@ -481,6 +482,7 @@ spec: {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: control-plane-storage + description: "Default Proxmox storage for control-plane nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .controlPlaneStorage }}true{{ end }}" definitions: - selector: @@ -494,7 +496,7 @@ spec: valueFrom: variable: controlPlaneStorage - name: lb-additional-volumes - description: "Add additional disks to load-balancers via variable" + description: "Add additional disks to load-balancers via variable. Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .loadBalancerAdditionalVolumes }}true{{ end }}" definitions: - selector: @@ -519,6 +521,7 @@ spec: {{- if hasKey $v "ssd" }}, ssd: {{ $v.ssd }}{{- end }} } {{- end }} - name: load-balancer-storage + description: "Default Proxmox storage for loadbalancer nodes (used if per-volume storage not set). Deprecated in favor of .spec.disks.volumes (coming in a future API)." enabledIf: "{{ if .loadBalancerStorage }}true{{ end }}" definitions: - selector: