diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index 0c26c8a5..b167456b 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -115,12 +115,13 @@ type ProxmoxMachineSpec struct { MetadataSettings *MetadataSettings `json:"metadataSettings,omitempty"` // AllowedNodes specifies all Proxmox nodes which will be considered - // for operations. This implies that VMs can be cloned on different nodes from - // the node which holds the VM template. + // for operations. This implies that VMs can be cloned on different nodes. // // This field is optional and should only be set if you want to restrict // the nodes where the VM can be cloned. + // // If not set, the ProxmoxCluster will be used to determine the nodes. + // // +optional AllowedNodes []string `json:"allowedNodes,omitempty"` @@ -176,8 +177,7 @@ const ( // TemplateSource defines the source of the template VM. type TemplateSource struct { // SourceNode is the initially selected proxmox node. - // This node will be used to locate the template VM, which will - // be used for cloning operations. + // SourceNode should be used together with TemplateID. // // Cloning will be performed according to the configuration. // Setting the `Target` field will tell Proxmox to clone the @@ -201,6 +201,12 @@ type TemplateSource struct { // TemplateSelector defines MatchTags for looking up VM templates. // +optional TemplateSelector *TemplateSelector `json:"templateSelector,omitempty"` + + // LocalStorage defines whether the VM template stored on local storage. + // Combination of (TemplateID, SourceNode) and LocalStorage is mutually exclusive. + // +kubebuilder:default=false + // +optional + LocalStorage *bool `json:"localStorage,omitempty"` } // VirtualMachineCloneSpec is information used to clone a virtual machine. @@ -239,6 +245,7 @@ type VirtualMachineCloneSpec struct { // Target node. Only allowed if the original VM is on shared storage. // +optional + // Deprecated: Use `AllowedNodes` instead. Target *string `json:"target,omitempty"` } @@ -246,7 +253,8 @@ type VirtualMachineCloneSpec struct { type TemplateSelector struct { // Specifies all tags to look for, when looking up the VM template. // Passed tags must be an exact 1:1 match with the tags on the template you want to use. - // If multiple VM templates with the same set of tags are found, provisioning will fail. + // If multiple VM templates on same Proxmox node with the same set of tags are found, provisioning will fail. + // If multiple VM templates on different Proxmox nodes with the same set of tags are found, template on node where vm scheduled will be used. // // +listType=set // +kubebuilder:validation:items:Pattern=`^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$` @@ -610,12 +618,20 @@ func (r *ProxmoxMachine) GetVirtualMachineID() int64 { return -1 } -// GetTemplateID get the Proxmox template "vmid" used to provision this machine. -func (r *ProxmoxMachine) GetTemplateID() int32 { +// GetTemplateMap get the Proxmox template "sourceNode:vmid" used to provision this machine. +func (r *ProxmoxMachine) GetTemplateMap() map[string]int32 { if r.Spec.TemplateID != nil { - return *r.Spec.TemplateID + return map[string]int32{r.Spec.SourceNode: *r.Spec.TemplateID} } - return -1 + return nil +} + +// GetLocalStorage get the Proxmox local storage used to provision this machine. +func (r *ProxmoxMachine) GetLocalStorage() bool { + if r.Spec.LocalStorage != nil { + return *r.Spec.LocalStorage + } + return false } // GetTemplateSelectorTags get the tags, the desired vm template should have. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index c1951100..1537fd4c 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -981,6 +981,11 @@ func (in *TemplateSource) DeepCopyInto(out *TemplateSource) { *out = new(TemplateSelector) (*in).DeepCopyInto(*out) } + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateSource. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index c9b8cd25..42b6b758 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -75,11 +75,11 @@ spec: allowedNodes: description: |- AllowedNodes specifies all Proxmox nodes which will be considered - for operations. This implies that VMs can be cloned on different nodes from - the node which holds the VM template. + for operations. This implies that VMs can be cloned on different nodes. This field is optional and should only be set if you want to restrict the nodes where the VM can be cloned. + If not set, the ProxmoxCluster will be used to determine the nodes. items: type: string @@ -149,6 +149,12 @@ spec: This is always done when you clone a normal VM. Create a Full clone by default. type: boolean + localStorage: + default: false + description: |- + LocalStorage defines whether the VM template stored on local storage. + Combination of (TemplateID, SourceNode) and LocalStorage is mutually exclusive. + type: boolean memoryMiB: description: |- MemoryMiB is the size of a virtual machine's memory, in MiB. @@ -613,8 +619,7 @@ spec: sourceNode: description: |- SourceNode is the initially selected proxmox node. - This node will be used to locate the template VM, which will - be used for cloning operations. + SourceNode should be used together with TemplateID. Cloning will be performed according to the configuration. Setting the `Target` field will tell Proxmox to clone the @@ -641,8 +646,9 @@ spec: type: array x-kubernetes-list-type: set target: - description: Target node. Only allowed if the original VM - is on shared storage. + description: |- + Target node. Only allowed if the original VM is on shared storage. + Deprecated: Use `AllowedNodes` instead. type: string templateID: description: TemplateID the vm_template vmid used for cloning @@ -657,7 +663,8 @@ spec: description: |- Specifies all tags to look for, when looking up the VM template. Passed tags must be an exact 1:1 match with the tags on the template you want to use. - If multiple VM templates with the same set of tags are found, provisioning will fail. + If multiple VM templates on same Proxmox node with the same set of tags are found, provisioning will fail. + If multiple VM templates on different Proxmox nodes with the same set of tags are found, template on node where vm scheduled will be used. items: pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 544e59d3..e8324705 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -97,11 +97,11 @@ spec: allowedNodes: description: |- AllowedNodes specifies all Proxmox nodes which will be considered - for operations. This implies that VMs can be cloned on different nodes from - the node which holds the VM template. + for operations. This implies that VMs can be cloned on different nodes. This field is optional and should only be set if you want to restrict the nodes where the VM can be cloned. + If not set, the ProxmoxCluster will be used to determine the nodes. items: type: string @@ -173,6 +173,12 @@ spec: This is always done when you clone a normal VM. Create a Full clone by default. type: boolean + localStorage: + default: false + description: |- + LocalStorage defines whether the VM template stored on local storage. + Combination of (TemplateID, SourceNode) and LocalStorage is mutually exclusive. + type: boolean memoryMiB: description: |- MemoryMiB is the size of a virtual machine's memory, in MiB. @@ -654,8 +660,7 @@ spec: sourceNode: description: |- SourceNode is the initially selected proxmox node. - This node will be used to locate the template VM, which will - be used for cloning operations. + SourceNode should be used together with TemplateID. Cloning will be performed according to the configuration. Setting the `Target` field will tell Proxmox to clone the @@ -682,8 +687,9 @@ spec: type: array x-kubernetes-list-type: set target: - description: Target node. Only allowed if the original - VM is on shared storage. + description: |- + Target node. Only allowed if the original VM is on shared storage. + Deprecated: Use `AllowedNodes` instead. type: string templateID: description: TemplateID the vm_template vmid used @@ -698,7 +704,8 @@ spec: description: |- Specifies all tags to look for, when looking up the VM template. Passed tags must be an exact 1:1 match with the tags on the template you want to use. - If multiple VM templates with the same set of tags are found, provisioning will fail. + If multiple VM templates on same Proxmox node with the same set of tags are found, provisioning will fail. + If multiple VM templates on different Proxmox nodes with the same set of tags are found, template on node where vm scheduled will be used. items: pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index ced008b4..8bb5af1b 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -68,11 +68,11 @@ spec: allowedNodes: description: |- AllowedNodes specifies all Proxmox nodes which will be considered - for operations. This implies that VMs can be cloned on different nodes from - the node which holds the VM template. + for operations. This implies that VMs can be cloned on different nodes. This field is optional and should only be set if you want to restrict the nodes where the VM can be cloned. + If not set, the ProxmoxCluster will be used to determine the nodes. items: type: string @@ -140,6 +140,12 @@ spec: This is always done when you clone a normal VM. Create a Full clone by default. type: boolean + localStorage: + default: false + description: |- + LocalStorage defines whether the VM template stored on local storage. + Combination of (TemplateID, SourceNode) and LocalStorage is mutually exclusive. + type: boolean memoryMiB: description: |- MemoryMiB is the size of a virtual machine's memory, in MiB. @@ -571,8 +577,7 @@ spec: sourceNode: description: |- SourceNode is the initially selected proxmox node. - This node will be used to locate the template VM, which will - be used for cloning operations. + SourceNode should be used together with TemplateID. Cloning will be performed according to the configuration. Setting the `Target` field will tell Proxmox to clone the @@ -598,8 +603,9 @@ spec: type: array x-kubernetes-list-type: set target: - description: Target node. Only allowed if the original VM is on shared - storage. + description: |- + Target node. Only allowed if the original VM is on shared storage. + Deprecated: Use `AllowedNodes` instead. type: string templateID: description: TemplateID the vm_template vmid used for cloning a new @@ -614,7 +620,8 @@ spec: description: |- Specifies all tags to look for, when looking up the VM template. Passed tags must be an exact 1:1 match with the tags on the template you want to use. - If multiple VM templates with the same set of tags are found, provisioning will fail. + If multiple VM templates on same Proxmox node with the same set of tags are found, provisioning will fail. + If multiple VM templates on different Proxmox nodes with the same set of tags are found, template on node where vm scheduled will be used. items: pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index baad08fa..b1dd86bd 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -79,11 +79,11 @@ spec: allowedNodes: description: |- AllowedNodes specifies all Proxmox nodes which will be considered - for operations. This implies that VMs can be cloned on different nodes from - the node which holds the VM template. + for operations. This implies that VMs can be cloned on different nodes. This field is optional and should only be set if you want to restrict the nodes where the VM can be cloned. + If not set, the ProxmoxCluster will be used to determine the nodes. items: type: string @@ -153,6 +153,12 @@ spec: This is always done when you clone a normal VM. Create a Full clone by default. type: boolean + localStorage: + default: false + description: |- + LocalStorage defines whether the VM template stored on local storage. + Combination of (TemplateID, SourceNode) and LocalStorage is mutually exclusive. + type: boolean memoryMiB: description: |- MemoryMiB is the size of a virtual machine's memory, in MiB. @@ -611,8 +617,7 @@ spec: sourceNode: description: |- SourceNode is the initially selected proxmox node. - This node will be used to locate the template VM, which will - be used for cloning operations. + SourceNode should be used together with TemplateID. Cloning will be performed according to the configuration. Setting the `Target` field will tell Proxmox to clone the @@ -639,8 +644,9 @@ spec: type: array x-kubernetes-list-type: set target: - description: Target node. Only allowed if the original VM - is on shared storage. + description: |- + Target node. Only allowed if the original VM is on shared storage. + Deprecated: Use `AllowedNodes` instead. type: string templateID: description: TemplateID the vm_template vmid used for cloning @@ -655,7 +661,8 @@ spec: description: |- Specifies all tags to look for, when looking up the VM template. Passed tags must be an exact 1:1 match with the tags on the template you want to use. - If multiple VM templates with the same set of tags are found, provisioning will fail. + If multiple VM templates on same Proxmox node with the same set of tags are found, provisioning will fail. + If multiple VM templates on different Proxmox nodes with the same set of tags are found, template on node where vm scheduled will be used. items: pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ type: string diff --git a/docs/advanced-setups.md b/docs/advanced-setups.md index 38c8f24f..f190c456 100644 --- a/docs/advanced-setups.md +++ b/docs/advanced-setups.md @@ -182,7 +182,65 @@ Our provider is able to look up templates based on their attached tags, for `Pro For example, you can set the `TEMPLATE_TAGS="tag1,tag2"` environment variable. Your custom image will then be used when using the [auto-image](https://github.com/ionos-cloud/cluster-api-provider-ionoscloud/blob/main/templates/cluster-template-auto-image.yaml) template. -Please note: Passed tags must be an exact 1:1 match with the tags on the template you want to use. The matched result must be unique. If multiple templates are found, provisioning will fail. +**N.B.** Passed tags must be an exact 1:1 match with the tags on the template you want to use. + +### localStorage + +`false` (default) +* Shared storage is assumed for template. +* The template must be accessible from any node. +* Only one template with the matching tags should exist in the entire cluster. +* If multiple templates found, the operation fails. + +`true` +* Local storage is assumed for templates. +* Each node listed in the `allowedNodes` is expected to have a copy of the template stored locally. +* If any of the nodes in `allowedNodes` is missing the template, the operation fails. + +### Template lookup and cluster classes +With the ClusterClasses provided in this repo, in your cluster you can choose one of two ways to tell CAPMOX which +Proxmox VM template to clone from. 1) Selector mode (tag-based) — recommended for multi-node setups: +```yaml +spec: + topology: + variables: + - name: templateSelector + value: + matchTags: ["capmox","ubuntu-24.04","k8s-1.33"] +``` +This enables `templateSelector.matchTags`, and removes `sourceNode` and `templateID`. + +Or 2) Explicit mode (sourceNode/templateID) — default if templateSelector is not set. +```yaml +spec: + topology: + variables: + - name: cloneSpec + value: + machineSpec: + controlPlane: + sourceNode: pve1 + templateID: 100 + workerNode: + sourceNode: pve1 + templateID: 100 + loadBalancer: + sourceNode: pve1 + templateID: 100 + ``` + +If using local (not shared) storage on nodes, you can also set `localStorage: true` in the selector: +```yaml +spec: + topology: + variables: + - name: localStorage + value: true +``` +See earlier `localStorage` section for details. If using Explicit mode, `localStorage` is not applicable and should not +be set or leave the default `false`. In explicit mode you’re telling CAPMOX exactly which node and template ID to use. +That implicitly pins the clone to the node where that template resides, regardless of whether the backing storage is +local or shared. ## Proxmox RBAC with least privileges @@ -308,6 +366,71 @@ spec: You can set either `ipv4PoolRef` or `ipv6PoolRef` or you can also set them both for dual-stack. It's up for you also to manage the IP Pool, you can choose a `GlobalInClusterIPPool` or an `InClusterIPPool`. +## Additional Volumes +By default, only a boot volume is created in machines. If additional disks are required for data storage, they can be +specified in the ProxmoxMachineTemplates. + +```yaml +kind: ProxmoxMachineTemplate +spec: + template: + spec: + storage: local-lvm # Optional: a default storage to use when a volume doesn't set .storage + disks: + additionalVolumes: + - disk: scsi1 + sizeGb: 200 + - disk: scsi2 # target slot (e.g. scsi1, sata1, virtio1, ide2) + sizeGb: 80 # capacity in gigabytes + # Optional flags: + storage: my-nfs # Optional per-volume storage override. Uses .spec.template.spec.storage if omitted + format: qcow2 # Only specify if using file-backed storage. If omitted, default for disk is used. + discard: true + ioThread: true + ssd: true +``` +In the same way, additionalVolumes can also be specified in ProxmoxClusters, ProxmoxClusterTemplates, +and ProxmoxMachines. Flags: format, discard, ioThread, and ssd are supported by this provider. +See Proxmox [docs](https://pve.proxmox.com/pve-docs/qm.1.html#qm_hard_disk) for details about these flags. + +Alternatively if using cluster-class, define additionalVolumes in your cluster: +```yaml +kind: Cluster +spec: + topology: + class: proxmox-clusterclass-cilium-v0.1.0 + variables: + - name: workerAdditionalVolumes + value: + - { disk: scsi1, sizeGb: 80, storage: my-lvm } + - { disk: ide1, sizeGb: 80, storage: my-zfs } + - name: controlPlaneAdditionalVolumes + value: + - { disk: virtio1, sizeGb: 80, storage: my-zfs } + - name: loadBalancerAdditionalVolumes + value: + - { disk: sata1, sizeGb: 80, storage: my-nfs, format: qcow2 } +``` +To use the same storage for all machines of a given type, can specify a `Storage` variable and then omit `storage` +from the `workerAdditionalVolumes`. Eg for workers: +```yaml +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/proxmox-cluster-cni: cilium + name: capmox-cluster +spec: + topology: + class: proxmox-clusterclass-cilium-v0.1.0 + variables: + - name: workerStorage + value: my-lvm + - name: workerAdditionalVolumes + value: + - { disk: scsi1, sizeGb: 80 } + - { disk: scsi2, sizeGb: 80 } +``` + ## Notes * Clusters with IPV6 only is supported. diff --git a/internal/controller/proxmoxmachine_controller.go b/internal/controller/proxmoxmachine_controller.go index 5f3d3850..24b2d2c4 100644 --- a/internal/controller/proxmoxmachine_controller.go +++ b/internal/controller/proxmoxmachine_controller.go @@ -91,6 +91,12 @@ func (r *ProxmoxMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, err } + // Emit deprecation warning if .spec.Target is set + if proxmoxMachine.Spec.Target != nil { + logger.Info("DEPRECATION NOTICE: .spec.Target is deprecated and will be removed in a future release. Use .spec.AllowedNodes instead.", + "ProxmoxMachine", req.NamespacedName) + } + // Fetch the Machine. machine, err := util.GetOwnerMachine(ctx, r.Client, proxmoxMachine.ObjectMeta) if err != nil { diff --git a/internal/service/scheduler/vmscheduler.go b/internal/service/scheduler/vmscheduler.go index 50ad6557..2d6161c1 100644 --- a/internal/service/scheduler/vmscheduler.go +++ b/internal/service/scheduler/vmscheduler.go @@ -45,22 +45,19 @@ func (err InsufficientMemoryError) Error() string { // ScheduleVM decides which node to a ProxmoxMachine should be scheduled on. // It requires the machine's ProxmoxCluster to have at least 1 allowed node. -func ScheduleVM(ctx context.Context, machineScope *scope.MachineScope) (string, error) { +func ScheduleVM(ctx context.Context, + machineScope *scope.MachineScope, + templateMap map[string]int32, + allowedNodes []string, +) (string, int32, error) { client := machineScope.InfraCluster.ProxmoxClient - // Use the default allowed nodes from the ProxmoxCluster. - allowedNodes := machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes schedulerHints := machineScope.InfraCluster.ProxmoxCluster.Spec.SchedulerHints locations := machineScope.InfraCluster.ProxmoxCluster.Status.NodeLocations.Workers if util.IsControlPlaneMachine(machineScope.Machine) { locations = machineScope.InfraCluster.ProxmoxCluster.Status.NodeLocations.ControlPlane } - // If ProxmoxMachine defines allowedNodes use them instead - if len(machineScope.ProxmoxMachine.Spec.AllowedNodes) > 0 { - allowedNodes = machineScope.ProxmoxMachine.Spec.AllowedNodes - } - - return selectNode(ctx, client, machineScope.ProxmoxMachine, locations, allowedNodes, schedulerHints) + return selectNode(ctx, client, machineScope.ProxmoxMachine, locations, allowedNodes, schedulerHints, templateMap) } func selectNode( @@ -70,22 +67,22 @@ func selectNode( locations []infrav1.NodeLocation, allowedNodes []string, schedulerHints *infrav1.SchedulerHints, -) (string, error) { + templateMap map[string]int32, +) (string, int32, error) { byMemory := make(sortByAvailableMemory, len(allowedNodes)) for i, nodeName := range allowedNodes { mem, err := client.GetReservableMemoryBytes(ctx, nodeName, schedulerHints.GetMemoryAdjustment()) if err != nil { - return "", err + return "", 0, err } byMemory[i] = nodeInfo{Name: nodeName, AvailableMemory: mem} } - sort.Sort(byMemory) requestedMemory := uint64(machine.Spec.MemoryMiB) * 1024 * 1024 // convert to bytes if requestedMemory > byMemory[0].AvailableMemory { // no more space on the node with the highest amount of available memory - return "", InsufficientMemoryError{ + return "", 0, InsufficientMemoryError{ node: byMemory[0].Name, available: byMemory[0].AvailableMemory, requested: requestedMemory, @@ -123,8 +120,8 @@ func selectNode( "resultNode", decision, ) } - - return decision, nil + templateID := templateMap[decision] + return decision, templateID, nil } type resourceClient interface { diff --git a/internal/service/scheduler/vmscheduler_test.go b/internal/service/scheduler/vmscheduler_test.go index 3251dea1..93a816d6 100644 --- a/internal/service/scheduler/vmscheduler_test.go +++ b/internal/service/scheduler/vmscheduler_test.go @@ -49,6 +49,7 @@ func miBytes(in uint64) uint64 { func TestSelectNode(t *testing.T) { allowedNodes := []string{"pve1", "pve2", "pve3"} + templateMap := map[string]int32{} var locations []infrav1.NodeLocation const requestMiB = 8 availableMem := map[string]uint64{ @@ -74,7 +75,7 @@ func TestSelectNode(t *testing.T) { client := fakeResourceClient(availableMem) - node, err := selectNode(context.Background(), client, proxmoxMachine, locations, allowedNodes, &infrav1.SchedulerHints{}) + node, _, err := selectNode(context.Background(), client, proxmoxMachine, locations, allowedNodes, &infrav1.SchedulerHints{}, templateMap) require.NoError(t, err) require.Equal(t, expectedNode, node) @@ -94,7 +95,7 @@ func TestSelectNode(t *testing.T) { client := fakeResourceClient(availableMem) - node, err := selectNode(context.Background(), client, proxmoxMachine, locations, allowedNodes, &infrav1.SchedulerHints{}) + node, _, err := selectNode(context.Background(), client, proxmoxMachine, locations, allowedNodes, &infrav1.SchedulerHints{}, templateMap) require.ErrorAs(t, err, &InsufficientMemoryError{}) require.Empty(t, node) @@ -112,6 +113,8 @@ func TestScheduleVM(t *testing.T) { require.NotNil(t, ctrlClient) ipamHelper := &ipam.Helper{} + templateMap := map[string]int32{} + allowedNodes := []string{"pve1", "pve2", "pve3"} proxmoxCluster := infrav1.ProxmoxCluster{ ObjectMeta: metav1.ObjectMeta{ @@ -181,7 +184,7 @@ func TestScheduleVM(t *testing.T) { fakeProxmoxClient.EXPECT().GetReservableMemoryBytes(context.Background(), "pve2", uint64(100)).Return(miBytes(20), nil) fakeProxmoxClient.EXPECT().GetReservableMemoryBytes(context.Background(), "pve3", uint64(100)).Return(miBytes(20), nil) - node, err := ScheduleVM(context.Background(), machineScope) + node, _, err := ScheduleVM(context.Background(), machineScope, templateMap, allowedNodes) require.NoError(t, err) require.Equal(t, "pve2", node) } diff --git a/internal/service/vmservice/find_test.go b/internal/service/vmservice/find_test.go index 503909ff..09800326 100644 --- a/internal/service/vmservice/find_test.go +++ b/internal/service/vmservice/find_test.go @@ -33,6 +33,8 @@ func TestFindVM_FindByNodeAndID(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) vm := newRunningVM() machineScope.ProxmoxMachine.Spec.VirtualMachineID = ptr.To(int64(vm.VMID)) + // this is used in LocateProxmoxNode function cannot be empty. + machineScope.ProxmoxMachine.Spec.SourceNode = "node1" //nolint:goconst proxmoxClient.EXPECT().GetVM(ctx, "node1", int64(123)).Return(vm, nil).Once() diff --git a/internal/service/vmservice/helpers_test.go b/internal/service/vmservice/helpers_test.go index 034d596f..3d7a5271 100644 --- a/internal/service/vmservice/helpers_test.go +++ b/internal/service/vmservice/helpers_test.go @@ -107,14 +107,6 @@ func setupReconcilerTest(t *testing.T) (*scope.MachineScope, *proxmoxtest.MockCl infrav1alpha1.MachineFinalizer, }, }, - Spec: infrav1alpha1.ProxmoxMachineSpec{ - VirtualMachineCloneSpec: infrav1alpha1.VirtualMachineCloneSpec{ - TemplateSource: infrav1alpha1.TemplateSource{ - SourceNode: "node1", - TemplateID: ptr.To[int32](123), - }, - }, - }, } scheme := runtime.NewScheme() diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index 0f96645d..390fcd67 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -357,7 +357,6 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe } options := proxmox.VMCloneRequest{ - Node: scope.ProxmoxMachine.GetNode(), NewID: int(vmid), Name: scope.ProxmoxMachine.GetName(), } @@ -384,45 +383,77 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe if scope.ProxmoxMachine.Spec.Storage != nil { options.Storage = *scope.ProxmoxMachine.Spec.Storage } - if scope.ProxmoxMachine.Spec.Target != nil { - options.Target = *scope.ProxmoxMachine.Spec.Target - } if scope.InfraCluster.ProxmoxCluster.Status.NodeLocations == nil { scope.InfraCluster.ProxmoxCluster.Status.NodeLocations = new(infrav1alpha1.NodeLocations) } - // if no target was specified but we have a set of nodes defined in the spec, we want to evenly distribute - // the nodes across the cluster. - if scope.ProxmoxMachine.Spec.Target == nil && - (len(scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes) > 0 || len(scope.ProxmoxMachine.Spec.AllowedNodes) > 0) { - // select next node as a target - var err error - options.Target, err = selectNextNode(ctx, scope) - if err != nil { - if errors.As(err, &scheduler.InsufficientMemoryError{}) { - scope.SetFailureMessage(err) - scope.SetFailureReason(capierrors.InsufficientResourcesMachineError) - } - return proxmox.VMCloneResponse{}, err - } + // get localStorage + localStorage := scope.ProxmoxMachine.GetLocalStorage() + + // we first try to find templates, so we can use those during scheduling + var templateID int32 + + // return templateMap if TemplateID and SourceNode used + templateMap := scope.ProxmoxMachine.GetTemplateMap() + + // set allowedNodes (we need to use this in scheduler and template search) + // defaults to ProxmoxCluster allowedNodes + allowedNodes := scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes + + // if ProxmoxMachine defines allowedNodes use them instead + if len(scope.ProxmoxMachine.Spec.AllowedNodes) > 0 { + allowedNodes = scope.ProxmoxMachine.Spec.AllowedNodes + } + // set .spec.Target as allowedNodes (we use this in scheduler) + if scope.ProxmoxMachine.Spec.Target != nil && + (len(scope.ProxmoxMachine.Spec.AllowedNodes) == 0 || len(scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes) == 0) { + allowedNodes = []string{*scope.ProxmoxMachine.Spec.Target} } - templateID := scope.ProxmoxMachine.GetTemplateID() - if templateID == -1 { - var err error + // TemplateSelector should be used + if templateMap == nil { + // get templateSelectorTags from the spec templateSelectorTags := scope.ProxmoxMachine.GetTemplateSelectorTags() - options.Node, templateID, err = scope.InfraCluster.ProxmoxClient.FindVMTemplateByTags(ctx, templateSelectorTags) + // find templates based on tags and allowed nodes + templateMap, err = scope.InfraCluster.ProxmoxClient.FindVMTemplatesByTags(ctx, templateSelectorTags, allowedNodes, localStorage) + + // fail if templates are not found or multiple templates found if localstorage is false if err != nil { + scope.SetFailureMessage(err) if errors.Is(err, goproxmox.ErrTemplateNotFound) { - scope.SetFailureMessage(err) scope.SetFailureReason(capierrors.MachineStatusError("VMTemplateNotFound")) - conditions.MarkFalse(scope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.VMProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err) + } else if errors.Is(err, goproxmox.ErrMultipleTemplatesFound) { + scope.SetFailureReason(capierrors.MachineStatusError("MultipleTemplatesFound")) } + conditions.MarkFalse(scope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.VMProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err) return proxmox.VMCloneResponse{}, err } } + + // we have allowedNodes, so we use them to schedule the VM + options.Target, templateID, err = selectNextNode(ctx, scope, templateMap, allowedNodes) + if err != nil { + if errors.As(err, &scheduler.InsufficientMemoryError{}) { + scope.SetFailureMessage(err) + scope.SetFailureReason(capierrors.InsufficientResourcesMachineError) + } + return proxmox.VMCloneResponse{}, err + } + // if localStorage use same node for Template as Target + if localStorage { + options.Node = options.Target + } else { + // we use first and only template to set options.Node and templateID + for i, k := range templateMap { + options.Node = i + templateID = k + break + } + } + + // at last clone machine res, err := scope.InfraCluster.ProxmoxClient.CloneVM(ctx, int(templateID), options) if err != nil { return res, err diff --git a/internal/service/vmservice/vm_test.go b/internal/service/vmservice/vm_test.go index 18c3baa7..3d944daa 100644 --- a/internal/service/vmservice/vm_test.go +++ b/internal/service/vmservice/vm_test.go @@ -39,6 +39,7 @@ func TestReconcileVM_EverythingReady(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) vm := newRunningVM() machineScope.SetVirtualMachineID(int64(vm.VMID)) + machineScope.ProxmoxMachine.Spec.SourceNode = "node1" machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true @@ -60,6 +61,8 @@ func TestReconcileVM_QemuAgentCheckDisabled(t *testing.T) { machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true + + machineScope.ProxmoxMachine.Spec.SourceNode = "node1" machineScope.ProxmoxMachine.Spec.Checks = &infrav1alpha1.ProxmoxMachineChecks{ SkipQemuGuestAgent: ptr.To(true), } @@ -80,6 +83,7 @@ func TestReconcileVM_CloudInitCheckDisabled(t *testing.T) { machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true + machineScope.ProxmoxMachine.Spec.SourceNode = "node1" machineScope.ProxmoxMachine.Spec.Checks = &infrav1alpha1.ProxmoxMachineChecks{ SkipCloudInitStatus: ptr.To(true), } @@ -100,6 +104,7 @@ func TestReconcileVM_InitCheckDisabled(t *testing.T) { machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true + machineScope.ProxmoxMachine.Spec.SourceNode = "node1" machineScope.ProxmoxMachine.Spec.Checks = &infrav1alpha1.ProxmoxMachineChecks{ SkipCloudInitStatus: ptr.To(true), SkipQemuGuestAgent: ptr.To(true), @@ -112,8 +117,8 @@ func TestReconcileVM_InitCheckDisabled(t *testing.T) { require.Equal(t, infrav1alpha1.VirtualMachineStateReady, result.State) require.Equal(t, "10.10.10.10", machineScope.ProxmoxMachine.Status.Addresses[1].Address) } - func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { + ctx := context.Background() machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) @@ -121,7 +126,9 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") - machineScope.ProxmoxMachine.Spec.Target = ptr.To("node2") + machineScope.ProxmoxMachine.Spec.AllowedNodes = []string{"node2"} + machineScope.ProxmoxMachine.Spec.TemplateID = ptr.To(int32(123)) + machineScope.ProxmoxMachine.Spec.SourceNode = "node1" expectedOptions := proxmox.VMCloneRequest{ Node: "node1", Name: "test", @@ -133,10 +140,19 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { Storage: "storage", Target: "node2", } - response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()} - proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() - requeue, err := ensureVirtualMachine(context.Background(), machineScope) + response := proxmox.VMCloneResponse{ + NewID: 123, + Task: newTask(), + } + proxmoxClient.EXPECT(). + GetReservableMemoryBytes(ctx, "node2", uint64(100)). + Return(uint64(5000), nil). + Once() + + proxmoxClient.EXPECT().CloneVM(ctx, 123, expectedOptions).Return(response, nil).Once() + + requeue, err := ensureVirtualMachine(ctx, machineScope) require.NoError(t, err) require.True(t, requeue) @@ -145,24 +161,28 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) } -func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector(t *testing.T) { +func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_SharedStorage(t *testing.T) { + ctx := context.Background() vmTemplateTags := []string{"foo", "bar"} + allowedNodes := []string{"node1", "node2"} machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ TemplateSource: infrav1alpha1.TemplateSource{ + LocalStorage: ptr.To(true), TemplateSelector: &infrav1alpha1.TemplateSelector{ MatchTags: vmTemplateTags, }, }, } + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(false) machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") - machineScope.ProxmoxMachine.Spec.Target = ptr.To("node2") + machineScope.ProxmoxMachine.Spec.AllowedNodes = allowedNodes expectedOptions := proxmox.VMCloneRequest{ Node: "node1", Name: "test", @@ -172,19 +192,99 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector(t *testing.T Pool: "pool", SnapName: "snap", Storage: "storage", - Target: "node2", + Target: "node1", } - proxmoxClient.EXPECT().FindVMTemplateByTags(context.Background(), vmTemplateTags).Return("node1", 123, nil).Once() + proxmoxClient.EXPECT(). + GetReservableMemoryBytes(ctx, "node1", uint64(100)). + Return(uint64(5000), nil). + Once() + + proxmoxClient.EXPECT(). + GetReservableMemoryBytes(ctx, "node2", uint64(100)). + Return(uint64(5000), nil). + Once() + + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(ctx, vmTemplateTags, allowedNodes, false). + Return(map[string]int32{"node1": int32(123), "node2": int32(124)}, nil). + Once() response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()} - proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() + proxmoxClient.EXPECT(). + CloneVM(ctx, 123, expectedOptions). + Return(response, nil). + Once() - requeue, err := ensureVirtualMachine(context.Background(), machineScope) + requeue, err := ensureVirtualMachine(ctx, machineScope) require.NoError(t, err) require.True(t, requeue) - require.Equal(t, "node2", *machineScope.ProxmoxMachine.Status.ProxmoxNode) + require.Equal(t, "node1", *machineScope.ProxmoxMachine.Status.ProxmoxNode) + require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) +} + +func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_LocalStorage(t *testing.T) { + ctx := context.Background() + vmTemplateTags := []string{"foo", "bar"} + allowedNodes := []string{"node1", "node2"} + + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + LocalStorage: ptr.To(false), + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: vmTemplateTags, + }, + }, + } + machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") + machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) + machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) + machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") + machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") + machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(true) + machineScope.ProxmoxMachine.Spec.AllowedNodes = allowedNodes + expectedOptions := proxmox.VMCloneRequest{ + Node: "node1", + Name: "test", + Description: "test vm", + Format: "raw", + Full: 1, + Pool: "pool", + SnapName: "snap", + Storage: "storage", + Target: "node1", + } + + proxmoxClient.EXPECT(). + GetReservableMemoryBytes(ctx, "node1", uint64(100)). + Return(uint64(5000), nil). + Once() + + proxmoxClient.EXPECT(). + GetReservableMemoryBytes(ctx, "node2", uint64(100)). + Return(uint64(5000), nil). + Once() + + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(ctx, vmTemplateTags, allowedNodes, true). + Return(map[string]int32{"node1": int32(123), "node2": int32(124)}, nil). + Once() + + response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()} + proxmoxClient.EXPECT(). + CloneVM(ctx, 123, expectedOptions). + Return(response, nil). + Once() + + requeue, err := ensureVirtualMachine(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + + require.Equal(t, "node1", *machineScope.ProxmoxMachine.Status.ProxmoxNode) require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) } @@ -192,6 +292,8 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector(t *testing.T func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_VMTemplateNotFound(t *testing.T) { ctx := context.Background() vmTemplateTags := []string{"foo", "bar"} + localStorage := true + allowedNodes := []string{"node1", "node2"} machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ @@ -201,15 +303,16 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_VMTemplateNo }, }, } + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(true) machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") - machineScope.ProxmoxMachine.Spec.Target = ptr.To("node2") + machineScope.ProxmoxMachine.Spec.AllowedNodes = allowedNodes - proxmoxClient.EXPECT().FindVMTemplateByTags(context.Background(), vmTemplateTags).Return("", -1, goproxmox.ErrTemplateNotFound).Once() + proxmoxClient.EXPECT().FindVMTemplatesByTags(context.Background(), vmTemplateTags, allowedNodes, localStorage).Return(map[string]int32{}, goproxmox.ErrTemplateNotFound).Once() _, err := createVM(ctx, machineScope) @@ -219,18 +322,47 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_VMTemplateNo require.Contains(t, "VM template not found", err.Error()) } +// localstorage false. func TestEnsureVirtualMachine_CreateVM_SelectNode(t *testing.T) { + vmTemplateTags := []string{"foo", "bar"} + localStorage := false + allowedNodes := []string{"node3"} + machineScope, proxmoxClient, _ := setupReconcilerTest(t) - machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = []string{"node1", "node2", "node3"} + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: vmTemplateTags, + }, + }, + } + machineScope.ProxmoxMachine.Spec.AllowedNodes = allowedNodes + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(localStorage) + + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(context.Background(), vmTemplateTags, allowedNodes, localStorage). + Return(map[string]int32{"node1": 123}, nil). + Once() - selectNextNode = func(context.Context, *scope.MachineScope) (string, error) { - return "node3", nil + selectNextNode = func(context.Context, *scope.MachineScope, map[string]int32, []string) (string, int32, error) { + return "node3", 0, nil } t.Cleanup(func() { selectNextNode = scheduler.ScheduleVM }) - expectedOptions := proxmox.VMCloneRequest{Node: "node1", Name: "test", Target: "node3"} - response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()} - proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() + expectedOptions := proxmox.VMCloneRequest{ + Node: "node1", + Name: "test", + Target: "node3", + } + + response := proxmox.VMCloneResponse{ + NewID: 123, + Task: newTask(), + } + proxmoxClient.EXPECT(). + CloneVM(context.Background(), 123, expectedOptions). + Return(response, nil). + Once() requeue, err := ensureVirtualMachine(context.Background(), machineScope) require.NoError(t, err) @@ -241,17 +373,78 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode(t *testing.T) { requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) } -func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes(t *testing.T) { +func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes_SharedStorage(t *testing.T) { + clusterAllowedNodes := []string{"node1", "node2", "node3", "node4"} + proxmoxMachineAllowedNodes := []string{"node1", "node2"} + machineScope, proxmoxClient, _ := setupReconcilerTest(t) - machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = []string{"node1", "node2", "node3", "node4"} - machineScope.ProxmoxMachine.Spec.AllowedNodes = []string{"node1", "node2"} + machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = clusterAllowedNodes + machineScope.ProxmoxMachine.Spec.AllowedNodes = proxmoxMachineAllowedNodes + // we need to search for templates, as we cannot do node scheduling without them + vmTemplateTags := []string{"foo", "bar"} + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(false) + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + LocalStorage: ptr.To(false), + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: vmTemplateTags, + }, + }, + } + templateMap := map[string]int32{"node1": int32(122)} - selectNextNode = func(context.Context, *scope.MachineScope) (string, error) { - return "node2", nil + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(context.Background(), vmTemplateTags, proxmoxMachineAllowedNodes, false). + Return(templateMap, nil). + Once() + + selectNextNode = func(context.Context, *scope.MachineScope, map[string]int32, []string) (string, int32, error) { + return "node2", 123, nil } t.Cleanup(func() { selectNextNode = scheduler.ScheduleVM }) expectedOptions := proxmox.VMCloneRequest{Node: "node1", Name: "test", Target: "node2"} + response := proxmox.VMCloneResponse{NewID: 122, Task: newTask()} + proxmoxClient.EXPECT().CloneVM(context.Background(), 122, expectedOptions).Return(response, nil).Once() + + requeue, err := ensureVirtualMachine(context.Background(), machineScope) + require.NoError(t, err) + require.True(t, requeue) + + require.Contains(t, []string{"node2", "node1"}, *machineScope.ProxmoxMachine.Status.ProxmoxNode) + require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) +} + +func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes_LocalStorage(t *testing.T) { + clusterAllowedNodes := []string{"node1", "node2", "node3", "node4"} + proxmoxMachineAllowedNodes := []string{"node1", "node2"} + vmTemplateTags := []string{"foo", "bar"} + + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = clusterAllowedNodes + machineScope.ProxmoxMachine.Spec.AllowedNodes = proxmoxMachineAllowedNodes + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(true) + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + LocalStorage: ptr.To(true), + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: vmTemplateTags, + }, + }, + } + + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(context.Background(), vmTemplateTags, proxmoxMachineAllowedNodes, true). + Return(map[string]int32{"node1": int32(122), "node2": int32(123)}, nil). + Once() + + selectNextNode = func(context.Context, *scope.MachineScope, map[string]int32, []string) (string, int32, error) { + return "node2", 123, nil + } + t.Cleanup(func() { selectNextNode = scheduler.ScheduleVM }) + + expectedOptions := proxmox.VMCloneRequest{Node: "node2", Name: "test", Target: "node2"} response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()} proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() @@ -265,11 +458,24 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes(t *testing } func TestEnsureVirtualMachine_CreateVM_SelectNode_InsufficientMemory(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) - machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = []string{"node1"} + allowedNodes := []string{"node1"} + vmTemplateTags := []string{"foo"} + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = allowedNodes + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: vmTemplateTags, + }, + }, + } + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(context.Background(), vmTemplateTags, allowedNodes, false). + Return(map[string]int32{"node1": int32(122)}, nil). + Once() - selectNextNode = func(context.Context, *scope.MachineScope) (string, error) { - return "", fmt.Errorf("error: %w", scheduler.InsufficientMemoryError{}) + selectNextNode = func(context.Context, *scope.MachineScope, map[string]int32, []string) (string, int32, error) { + return "", 0, fmt.Errorf("error: %w", scheduler.InsufficientMemoryError{}) } t.Cleanup(func() { selectNextNode = scheduler.ScheduleVM }) @@ -282,16 +488,37 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_InsufficientMemory(t *testing. } func TestEnsureVirtualMachine_CreateVM_VMIDRange(t *testing.T) { + vmTemplateTags := []string{"foo"} + allowedNodes := []string{"node1"} machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = allowedNodes + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: vmTemplateTags, + }, + }, + } machineScope.ProxmoxMachine.Spec.VMIDRange = &infrav1alpha1.VMIDRange{ Start: 1000, End: 1002, } - expectedOptions := proxmox.VMCloneRequest{Node: "node1", NewID: 1001, Name: "test"} + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(context.Background(), vmTemplateTags, allowedNodes, false). + Return(map[string]int32{"node1": int32(123)}, nil). + Once() + + proxmoxClient.EXPECT(). + GetReservableMemoryBytes(context.Background(), "node1", uint64(100)). + Return(uint64(5000), nil). + Once() + + expectedOptions := proxmox.VMCloneRequest{Node: "node1", NewID: 1001, Name: "test", Target: "node1"} response := proxmox.VMCloneResponse{Task: newTask(), NewID: int64(1001)} proxmoxClient.Mock.On("CheckID", context.Background(), int64(1000)).Return(false, nil) proxmoxClient.Mock.On("CheckID", context.Background(), int64(1001)).Return(true, nil) + proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() requeue, err := ensureVirtualMachine(context.Background(), machineScope) @@ -326,6 +553,7 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRangeCheckExisting(t *testing.T) { Start: 1000, End: 1002, } + machineScope.ProxmoxMachine.Spec.AllowedNodes = []string{"node1"} // Add a VM with ID 1000. // Make sure the check for a free vmid skips 1000 by ensuring the Proxmox CheckID function isn't called more than once. @@ -342,6 +570,14 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRangeCheckExisting(t *testing.T) { VirtualMachineID: ptr.To(int64(1000)), }, } + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = + infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: []string{"foo"}, + }, + }, + } machine := clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "vm1000", @@ -367,7 +603,17 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRangeCheckExisting(t *testing.T) { _, err = ensureVirtualMachine(context.Background(), machineScopeVMThousand) require.NoError(t, err) - expectedOptions := proxmox.VMCloneRequest{Node: "node1", NewID: 1002, Name: "test"} + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(context.Background(), []string{"foo"}, []string{"node1"}, false). + Return(map[string]int32{"node1": int32(123)}, nil). + Once() + + proxmoxClient.EXPECT(). + GetReservableMemoryBytes(context.Background(), "node1", uint64(100)). + Return(uint64(5000), nil). + Once() + + expectedOptions := proxmox.VMCloneRequest{Node: "node1", NewID: 1002, Name: "test", Target: "node1"} response := proxmox.VMCloneResponse{Task: newTask(), NewID: int64(1002)} proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() proxmoxClient.Mock.On("CheckID", context.Background(), int64(1001)).Return(false, nil).Once() @@ -382,6 +628,13 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRangeCheckExisting(t *testing.T) { func TestEnsureVirtualMachine_FindVM(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.SetVirtualMachineID(123) + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + SourceNode: "node1", + TemplateID: ptr.To[int32](123), + }, + } + vm := newStoppedVM() vm.VirtualMachineConfig.SMBios1 = "uuid=56603c36-46b9-4608-90ae-c731c15eae64" @@ -398,7 +651,12 @@ func TestEnsureVirtualMachine_FindVM(t *testing.T) { func TestEnsureVirtualMachine_UpdateVMLocation_Error(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.SetVirtualMachineID(123) - + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + SourceNode: "node1", + TemplateID: ptr.To[int32](123), + }, + } proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(nil, fmt.Errorf("not found")).Once() proxmoxClient.EXPECT().FindVMResource(context.Background(), uint64(123)).Return(nil, fmt.Errorf("unavailalbe")).Once() @@ -622,6 +880,12 @@ func TestReconcileVM_CloudInitFailed(t *testing.T) { machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + SourceNode: "node1", + TemplateID: ptr.To[int32](123), + }, + } proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() proxmoxClient.EXPECT().CloudInitStatus(context.Background(), vm).Return(false, goproxmox.ErrCloudInitFailed).Once() @@ -640,6 +904,12 @@ func TestReconcileVM_CloudInitRunning(t *testing.T) { machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + SourceNode: "node1", + TemplateID: ptr.To[int32](123), + }, + } proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() proxmoxClient.EXPECT().CloudInitStatus(context.Background(), vm).Return(true, nil).Once() diff --git a/internal/webhook/proxmoxmachine_webhook.go b/internal/webhook/proxmoxmachine_webhook.go index d560ed91..df1cb08e 100644 --- a/internal/webhook/proxmoxmachine_webhook.go +++ b/internal/webhook/proxmoxmachine_webhook.go @@ -53,6 +53,12 @@ func (p *ProxmoxMachine) ValidateCreate(_ context.Context, obj runtime.Object) ( return warnings, apierrors.NewBadRequest(fmt.Sprintf("expected a ProxmoxMachine but got %T", obj)) } + err = validateLocalStorage(machine) + if err != nil { + warnings = append(warnings, "localStorage is mutually exclusive with templateID/sourceNode") + return warnings, apierrors.NewBadRequest("localStorage is mutually exclusive with templateID/sourceNode") + } + err = validateNetworks(machine) if err != nil { warnings = append(warnings, fmt.Sprintf("cannot create proxmox machine %s", machine.GetName())) @@ -78,6 +84,12 @@ func (p *ProxmoxMachine) ValidateUpdate(_ context.Context, old, newObj runtime.O return warnings, apierrors.NewBadRequest("tags are immutable") } + err = validateLocalStorage(newMachine) + if err != nil { + warnings = append(warnings, "localStorage is mutually exclusive with templateID/sourceNode") + return warnings, apierrors.NewBadRequest("localStorage is mutually exclusive with templateID/sourceNode") + } + err = validateNetworks(newMachine) if err != nil { warnings = append(warnings, fmt.Sprintf("cannot update proxmox machine %s", newMachine.GetName())) @@ -92,6 +104,24 @@ func (p *ProxmoxMachine) ValidateDelete(_ context.Context, _ runtime.Object) (wa return nil, nil } +func validateLocalStorage(machine *infrav1.ProxmoxMachine) error { + gk, name := machine.GroupVersionKind().GroupKind(), machine.GetName() + + // if localStorage is set to true, then templateid/sourcenode must be nil + if machine.Spec.LocalStorage != nil && *machine.Spec.LocalStorage { + if machine.Spec.TemplateID != nil || machine.Spec.SourceNode != "" { + return apierrors.NewInvalid( + gk, + name, + field.ErrorList{ + field.Invalid( + field.NewPath("spec", "localStorage"), machine.Spec.LocalStorage, "localStorage is mutually exclusive with templateID/sourceNode"), + }) + } + } + return nil +} + func validateNetworks(machine *infrav1.ProxmoxMachine) error { if machine.Spec.Network == nil { return nil diff --git a/internal/webhook/proxmoxmachine_webhook_test.go b/internal/webhook/proxmoxmachine_webhook_test.go index c39bf0fa..cb282985 100644 --- a/internal/webhook/proxmoxmachine_webhook_test.go +++ b/internal/webhook/proxmoxmachine_webhook_test.go @@ -19,10 +19,9 @@ package webhook import ( "time" - corev1 "k8s.io/api/core/v1" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -44,6 +43,12 @@ var _ = Describe("Controller Test", func() { g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("spec.network.default.vlan: Invalid value"))) }) + It("should disallow use templateID/sourceNode if localStorage", func() { + machine := validProxmoxMachine("test-machine") + machine.Spec.LocalStorage = ptr.To(true) + g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("localStorage is mutually exclusive with templateID/sourceNode"))) + }) + It("should disallow invalid network mtu for additional device", func() { machine := validProxmoxMachine("test-machine") machine.Spec.Network.AdditionalDevices[0].MTU = ptr.To(uint16(1000)) diff --git a/internal/webhook/webhook_suite_test.go b/internal/webhook/webhook_suite_test.go index 35998b6c..585790ad 100644 --- a/internal/webhook/webhook_suite_test.go +++ b/internal/webhook/webhook_suite_test.go @@ -24,17 +24,16 @@ import ( "testing" "time" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/ionos-cloud/cluster-api-provider-proxmox/test/helpers" - //+kubebuilder:scaffold:imports + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/ionos-cloud/cluster-api-provider-proxmox/test/helpers" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to diff --git a/pkg/proxmox/client.go b/pkg/proxmox/client.go index 3219f275..012c333c 100644 --- a/pkg/proxmox/client.go +++ b/pkg/proxmox/client.go @@ -30,7 +30,8 @@ type Client interface { ConfigureVM(ctx context.Context, vm *proxmox.VirtualMachine, options ...VirtualMachineOption) (*proxmox.Task, error) FindVMResource(ctx context.Context, vmID uint64) (*proxmox.ClusterResource, error) - FindVMTemplateByTags(ctx context.Context, templateTags []string) (string, int32, error) + + FindVMTemplatesByTags(ctx context.Context, templateTags []string, allowedNodes []string, localStorage bool) (map[string]int32, error) CheckID(ctx context.Context, vmID int64) (bool, error) diff --git a/pkg/proxmox/goproxmox/api_client.go b/pkg/proxmox/goproxmox/api_client.go index 4cf93aee..b0311cf4 100644 --- a/pkg/proxmox/goproxmox/api_client.go +++ b/pkg/proxmox/goproxmox/api_client.go @@ -142,10 +142,14 @@ func (c *APIClient) FindVMResource(ctx context.Context, vmID uint64) (*proxmox.C return nil, fmt.Errorf("unable to find VM with ID %d on any of the nodes", vmID) } -// FindVMTemplateByTags tries to find a VMID by its tags across the whole cluster. -func (c *APIClient) FindVMTemplateByTags(ctx context.Context, templateTags []string) (string, int32, error) { - vmTemplates := make([]*proxmox.ClusterResource, 0) +// FindVMTemplatesByTags finds VM templates by tags across the whole cluster. +func (c *APIClient) FindVMTemplatesByTags(ctx context.Context, templateTags []string, allowedNodes []string, localStorage bool) (map[string]int32, error) { + templates := make(map[string]int32) + // if for some reason there is not tags, we fail early and return error + if len(templateTags) == 0 { + return nil, fmt.Errorf("%w: no template tags defined", ErrTemplateNotFound) + } sortedTags := make([]string, len(templateTags)) for i, tag := range templateTags { // Proxmox VM tags are always lowercase @@ -156,35 +160,47 @@ func (c *APIClient) FindVMTemplateByTags(ctx context.Context, templateTags []str cluster, err := c.Cluster(ctx) if err != nil { - return "", -1, fmt.Errorf("cannot get cluster status: %w", err) + return nil, fmt.Errorf("cannot get cluster status: %w", err) } vmResources, err := cluster.Resources(ctx, "vm") if err != nil { - return "", -1, fmt.Errorf("could not list vm resources: %w", err) + return nil, fmt.Errorf("could not list VM resources: %w", err) } for _, vm := range vmResources { - if vm.Template == 0 { - continue - } - if len(vm.Tags) == 0 { + if vm.Template == 0 || len(vm.Tags) == 0 { continue } vmTags := strings.Split(vm.Tags, ";") slices.Sort(vmTags) + // if localstorage - template should be on all allowed nodes + if localStorage && !slices.Contains(allowedNodes, vm.Node) { + continue + } + if slices.Equal(vmTags, uniqueTags) { - vmTemplates = append(vmTemplates, vm) + // check if we have multiple templates per node + if _, exists := templates[vm.Node]; exists { + return nil, fmt.Errorf("%w: multiple VM templates found on node %q with tags %q", ErrMultipleTemplatesFound, vm.Node, strings.Join(templateTags, ";")) + } + templates[vm.Node] = int32(vm.VMID) } } - if n := len(vmTemplates); n != 1 { - return "", -1, fmt.Errorf("%w: found %d VM templates with tags %q", ErrTemplateNotFound, n, strings.Join(templateTags, ";")) + if (len(templates) != len(allowedNodes)) && localStorage { + return nil, fmt.Errorf("found %d templates on allowedNodes %q with tags %q", len(templates), strings.Join(allowedNodes, ","), strings.Join(templateTags, ";")) + } + + if !localStorage { + if n := len(templates); n != 1 { + return nil, fmt.Errorf("%s: found %d VM templates with tags %q", "ErrTemplateNotFound", n, strings.Join(templateTags, ";")) + } } - return vmTemplates[0].Node, int32(vmTemplates[0].VMID), nil + return templates, nil } // DeleteVM deletes a VM based on the nodeName and vmID. diff --git a/pkg/proxmox/goproxmox/api_client_test.go b/pkg/proxmox/goproxmox/api_client_test.go index 2d7553b7..341fe46b 100644 --- a/pkg/proxmox/goproxmox/api_client_test.go +++ b/pkg/proxmox/goproxmox/api_client_test.go @@ -371,15 +371,30 @@ func TestProxmoxAPIClient_FindVMResource(t *testing.T) { } } -func TestProxmoxAPIClient_FindVMTemplateByTags(t *testing.T) { +func TestProxmoxAPIClient_FindVMTemplatesByTags(t *testing.T) { proxmoxClusterResources := proxmox.ClusterResources{ - &proxmox.ClusterResource{VMID: 101, Name: "k8s-node01", Node: "capmox01", Tags: ""}, - &proxmox.ClusterResource{VMID: 102, Name: "k8s-node02", Node: "capmox02", Tags: ""}, - &proxmox.ClusterResource{VMID: 150, Name: "template-without-tags", Node: "capmox01", Tags: "", Template: uint64(1)}, - &proxmox.ClusterResource{VMID: 201, Name: "ubuntu-22.04-k8s-v1.28.3", Node: "capmox01", Tags: "template;capmox;v1.28.3", Template: uint64(1)}, - &proxmox.ClusterResource{VMID: 202, Name: "ubuntu-22.04-k8s-v1.30.2", Node: "capmox02", Tags: "capmox;template;v1.30.2", Template: uint64(1)}, - &proxmox.ClusterResource{VMID: 301, Name: "ubuntu-22.04-k8s-v1.29.2", Node: "capmox02", Tags: "capmox;template;v1.29.2", Template: uint64(1)}, - &proxmox.ClusterResource{VMID: 302, Name: "ubuntu-22.04-k8s-v1.29.2", Node: "capmox02", Tags: "capmox;template;v1.29.2", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 101, Name: "template01", Node: "capmox01", Tags: ""}, + &proxmox.ClusterResource{VMID: 102, Name: "template02", Node: "capmox02", Tags: ""}, + + &proxmox.ClusterResource{VMID: 302, Name: "template-shared-storage", Node: "capmox01", Tags: "template-shared-storage", Template: uint64(1)}, + + &proxmox.ClusterResource{VMID: 303, Name: "template-local-storage-other-node", Node: "capmox01", Tags: "template-local-storage-other-node", Template: uint64(1)}, + + &proxmox.ClusterResource{VMID: 202, Name: "template-local-storage", Node: "capmox01", Tags: "template-local-storage", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 201, Name: "template-local-storage", Node: "capmox02", Tags: "template-local-storage", Template: uint64(1)}, + + &proxmox.ClusterResource{VMID: 401, Name: "template-duplicate", Node: "capmox01", Tags: "template-duplicate", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 402, Name: "template-duplicate", Node: "capmox01", Tags: "template-duplicate", Template: uint64(1)}, + + &proxmox.ClusterResource{VMID: 403, Name: "template-duplicate-same-node", Node: "capmox01", Tags: "template-duplicate;same-node", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 404, Name: "template-duplicate-same-node", Node: "capmox01", Tags: "template-duplicate;same-node", Template: uint64(1)}, + + &proxmox.ClusterResource{VMID: 503, Name: "template-duplicate-multiple-nodes", Node: "capmox01", Tags: "template-duplicate;multiple-nodes", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 504, Name: "template-duplicate-multiple-nodes", Node: "capmox01", Tags: "template-duplicate;multiple-nodes", Template: uint64(1)}, + + &proxmox.ClusterResource{VMID: 603, Name: "template-node1", Node: "capmox01", Tags: "template-extra-node", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 604, Name: "template-node2", Node: "capmox02", Tags: "template-extra-node", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 605, Name: "template-node3", Node: "capmox03", Tags: "template-extra-node", Template: uint64(1)}, } tests := []struct { name string @@ -387,84 +402,142 @@ func TestProxmoxAPIClient_FindVMTemplateByTags(t *testing.T) { vmTags []string fails bool err string - vmTemplateNode string - vmTemplateID int32 + vmTemplateNode map[string]int32 + localStorage bool + allowedNodes []string }{ { - name: "clusterstatus broken", + // if tags are not defined we fail + name: "no tags defined", http: []int{500, 200}, fails: true, - err: "cannot get cluster status: 500", + err: "VM template not found: no template tags defined", }, { - name: "resourcelisting broken", - http: []int{200, 500}, - fails: true, - err: "could not list vm resources: 500", + name: "clusterstatus broken", + http: []int{500, 200}, + fails: true, + err: "cannot get cluster status: 500", + vmTags: []string{"template", "capmox", "v1.28.3"}, }, { - name: "find-template", + name: "resourcelisting broken", + http: []int{200, 500}, + fails: true, + err: "could not list VM resources: 500", + vmTags: []string{"template", "capmox", "v1.28.3"}, + }, + { + // if shared template we can use template for one node. + name: "find-template-shared-localstorage-false", + http: []int{200, 200}, + vmTags: []string{"template-shared-storage"}, + fails: false, + err: "", + vmTemplateNode: map[string]int32{"capmox01": 302}, + localStorage: false, + allowedNodes: []string{"capmox01"}, + }, + { + // if shared template and localstorage + name: "find-template-shared-localstorage-true", http: []int{200, 200}, - vmTags: []string{"template", "capmox", "v1.28.3"}, + vmTags: []string{"template-shared-storage"}, fails: false, err: "", - vmTemplateNode: "capmox01", - vmTemplateID: 201, + vmTemplateNode: map[string]int32{"capmox01": 302}, + localStorage: true, + allowedNodes: []string{"capmox01"}, }, { - name: "find-template-nil", + name: "find-template-localstorage-true-not-allowed-node", http: []int{200, 200}, - vmTags: nil, + vmTags: []string{"template-local-storage-other-node"}, fails: true, - err: "VM template not found: found 0 VM templates with tags \"\"", - vmTemplateNode: "capmox01", - vmTemplateID: 201, + err: "found 0 templates on allowedNodes \"capmox02\" with tags \"template-local-storage-other-node\"", + vmTemplateNode: map[string]int32{"capmox01": 303}, + localStorage: true, + allowedNodes: []string{"capmox02"}, }, { - // Proxmox VM tags are always lowercase - name: "find-template-uppercase", + name: "find-template-localstorage-true", http: []int{200, 200}, - vmTags: []string{"TEMPLATE", "CAPMOX", "v1.28.3"}, + vmTags: []string{"template-local-storage"}, fails: false, err: "", - vmTemplateNode: "capmox01", - vmTemplateID: 201, + vmTemplateNode: map[string]int32{"capmox01": 202, "capmox02": 201}, + localStorage: true, + allowedNodes: []string{"capmox01", "capmox02"}, }, { - name: "find-template-unordered", + name: "find-template-localstorage-false", http: []int{200, 200}, - vmTags: []string{"template", "capmox", "v1.30.2"}, - fails: false, - err: "", - vmTemplateNode: "capmox02", - vmTemplateID: 202, + vmTags: []string{"template-local-storage"}, + fails: true, + err: "ErrTemplateNotFound: found 2 VM templates with tags \"template-local-storage\"", + vmTemplateNode: map[string]int32{"capmox01": 202, "capmox02": 301}, + localStorage: false, + allowedNodes: []string{"capmox01", "capmox02"}, }, { - name: "find-template-duplicate-tag", + name: "find-template-duplicate-shared", http: []int{200, 200}, - vmTags: []string{"template", "capmox", "capmox", "v1.30.2"}, - fails: false, - err: "", - vmTemplateNode: "capmox02", - vmTemplateID: 202, + vmTags: []string{"template-duplicate"}, + fails: true, + err: "Multiple templates found: multiple VM templates found on node \"capmox01\" with tags \"template-duplicate\"", + vmTemplateNode: map[string]int32{"capmox01": 401, "capmox02": 402}, + localStorage: false, + allowedNodes: []string{"capmox01", "capmox02"}, }, { - name: "find-multiple-templates", + name: "find-template-duplicate-localstorage", http: []int{200, 200}, - vmTags: []string{"template", "capmox"}, + vmTags: []string{"template-duplicate"}, fails: true, - err: "VM template not found: found 0 VM templates with tags \"template;capmox\"", - vmTemplateID: 69, - vmTemplateNode: "nice", + err: "Multiple templates found: multiple VM templates found on node \"capmox01\" with tags \"template-duplicate\"", + vmTemplateNode: map[string]int32{"capmox01": 401, "capmox02": 402}, + localStorage: true, + allowedNodes: []string{"capmox01", "capmox02"}, }, { - name: "find-multiple-templates", + name: "find-template-duplicate-shared-same-node", http: []int{200, 200}, - vmTags: []string{"template", "capmox", "v1.29.2"}, + vmTags: []string{"template-duplicate", "same-node"}, fails: true, - err: "VM template not found: found 2 VM templates with tags \"template;capmox;v1.29.2\"", - vmTemplateID: 69, - vmTemplateNode: "nice", + err: "Multiple templates found: multiple VM templates found on node \"capmox01\" with tags \"template-duplicate;same-node\"", + vmTemplateNode: map[string]int32{}, + localStorage: false, + allowedNodes: []string{"capmox01", "capmox02"}, + }, + { + name: "find-template-duplicate-localstorage-same-node", + http: []int{200, 200}, + vmTags: []string{"template-duplicate", "same-node"}, + fails: true, + err: "Multiple templates found: multiple VM templates found on node \"capmox01\" with tags \"template-duplicate;same-node\"", + vmTemplateNode: map[string]int32{}, + localStorage: true, + allowedNodes: []string{"capmox01", "capmox02"}, + }, + { + name: "find-template-duplicate-localstorage-multiple-nodes", + http: []int{200, 200}, + vmTags: []string{"template-duplicate", "multiple-nodes"}, + fails: true, + err: "Multiple templates found: multiple VM templates found on node \"capmox01\" with tags \"template-duplicate;multiple-nodes\"", + vmTemplateNode: map[string]int32{}, + localStorage: true, + allowedNodes: []string{"capmox01", "capmox02"}, + }, + { + name: "find-template-extra-node", + http: []int{200, 200}, + vmTags: []string{"template-extra-node"}, + fails: false, + err: "", + vmTemplateNode: map[string]int32{"capmox01": 603, "capmox02": 604}, + localStorage: true, + allowedNodes: []string{"capmox01", "capmox02"}, }, } @@ -477,14 +550,13 @@ func TestProxmoxAPIClient_FindVMTemplateByTags(t *testing.T) { httpmock.RegisterResponder(http.MethodGet, `=~/cluster/resources`, newJSONResponder(test.http[1], proxmoxClusterResources)) - vmTemplateNode, vmTemplateID, err := client.FindVMTemplateByTags(context.Background(), test.vmTags) + vmTemplateNode, err := client.FindVMTemplatesByTags(context.Background(), test.vmTags, test.allowedNodes, test.localStorage) if test.fails { require.Error(t, err) require.Equal(t, test.err, err.Error()) } else { require.NoError(t, err) - require.Equal(t, vmTemplateID, test.vmTemplateID) require.Equal(t, vmTemplateNode, test.vmTemplateNode) } }) diff --git a/pkg/proxmox/goproxmox/errors.go b/pkg/proxmox/goproxmox/errors.go index 967a0fb9..c6936bd7 100644 --- a/pkg/proxmox/goproxmox/errors.go +++ b/pkg/proxmox/goproxmox/errors.go @@ -8,4 +8,7 @@ var ( // ErrTemplateNotFound is returned when a VM template is not found. ErrTemplateNotFound = errors.New("VM template not found") + + // ErrMultipleTemplatesFound is returned when multiple VM templates are found. + ErrMultipleTemplatesFound = errors.New("Multiple templates found") ) diff --git a/pkg/proxmox/proxmoxtest/mock_client.go b/pkg/proxmox/proxmoxtest/mock_client.go index f4bf5516..7bb1a8c5 100644 --- a/pkg/proxmox/proxmoxtest/mock_client.go +++ b/pkg/proxmox/proxmoxtest/mock_client.go @@ -389,66 +389,63 @@ func (_c *MockClient_FindVMResource_Call) RunAndReturn(run func(context.Context, return _c } -// FindVMTemplateByTags provides a mock function with given fields: ctx, templateTags -func (_m *MockClient) FindVMTemplateByTags(ctx context.Context, templateTags []string) (string, int32, error) { - ret := _m.Called(ctx, templateTags) +// FindVMTemplatesByTags provides a mock function with given fields: ctx, templateTags, allowedNodes, localStorage +func (_m *MockClient) FindVMTemplatesByTags(ctx context.Context, templateTags []string, allowedNodes []string, localStorage bool) (map[string]int32, error) { + ret := _m.Called(ctx, templateTags, allowedNodes, localStorage) if len(ret) == 0 { - panic("no return value specified for FindVMTemplateByTags") + panic("no return value specified for FindVMTemplatesByTags") } - var r0 string - var r1 int32 - var r2 error - if rf, ok := ret.Get(0).(func(context.Context, []string) (string, int32, error)); ok { - return rf(ctx, templateTags) + var r0 map[string]int32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []string, []string, bool) (map[string]int32, error)); ok { + return rf(ctx, templateTags, allowedNodes, localStorage) } - if rf, ok := ret.Get(0).(func(context.Context, []string) string); ok { - r0 = rf(ctx, templateTags) + if rf, ok := ret.Get(0).(func(context.Context, []string, []string, bool) map[string]int32); ok { + r0 = rf(ctx, templateTags, allowedNodes, localStorage) } else { - r0 = ret.Get(0).(string) - } - - if rf, ok := ret.Get(1).(func(context.Context, []string) int32); ok { - r1 = rf(ctx, templateTags) - } else { - r1 = ret.Get(1).(int32) + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]int32) + } } - if rf, ok := ret.Get(2).(func(context.Context, []string) error); ok { - r2 = rf(ctx, templateTags) + if rf, ok := ret.Get(1).(func(context.Context, []string, []string, bool) error); ok { + r1 = rf(ctx, templateTags, allowedNodes, localStorage) } else { - r2 = ret.Error(2) + r1 = ret.Error(1) } - return r0, r1, r2 + return r0, r1 } -// MockClient_FindVMTemplateByTags_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FindVMTemplateByTags' -type MockClient_FindVMTemplateByTags_Call struct { +// MockClient_FindVMTemplatesByTags_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FindVMTemplatesByTags' +type MockClient_FindVMTemplatesByTags_Call struct { *mock.Call } -// FindVMTemplateByTags is a helper method to define mock.On call +// FindVMTemplatesByTags is a helper method to define mock.On call // - ctx context.Context // - templateTags []string -func (_e *MockClient_Expecter) FindVMTemplateByTags(ctx interface{}, templateTags interface{}) *MockClient_FindVMTemplateByTags_Call { - return &MockClient_FindVMTemplateByTags_Call{Call: _e.mock.On("FindVMTemplateByTags", ctx, templateTags)} +// - allowedNodes []string +// - localStorage bool +func (_e *MockClient_Expecter) FindVMTemplatesByTags(ctx interface{}, templateTags interface{}, allowedNodes interface{}, localStorage interface{}) *MockClient_FindVMTemplatesByTags_Call { + return &MockClient_FindVMTemplatesByTags_Call{Call: _e.mock.On("FindVMTemplatesByTags", ctx, templateTags, allowedNodes, localStorage)} } -func (_c *MockClient_FindVMTemplateByTags_Call) Run(run func(ctx context.Context, templateTags []string)) *MockClient_FindVMTemplateByTags_Call { +func (_c *MockClient_FindVMTemplatesByTags_Call) Run(run func(ctx context.Context, templateTags []string, allowedNodes []string, localStorage bool)) *MockClient_FindVMTemplatesByTags_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].([]string)) + run(args[0].(context.Context), args[1].([]string), args[2].([]string), args[3].(bool)) }) return _c } -func (_c *MockClient_FindVMTemplateByTags_Call) Return(_a0 string, _a1 int32, _a2 error) *MockClient_FindVMTemplateByTags_Call { - _c.Call.Return(_a0, _a1, _a2) +func (_c *MockClient_FindVMTemplatesByTags_Call) Return(_a0 map[string]int32, _a1 error) *MockClient_FindVMTemplatesByTags_Call { + _c.Call.Return(_a0, _a1) return _c } -func (_c *MockClient_FindVMTemplateByTags_Call) RunAndReturn(run func(context.Context, []string) (string, int32, error)) *MockClient_FindVMTemplateByTags_Call { +func (_c *MockClient_FindVMTemplatesByTags_Call) RunAndReturn(run func(context.Context, []string, []string, bool) (map[string]int32, error)) *MockClient_FindVMTemplatesByTags_Call { _c.Call.Return(run) return _c } diff --git a/templates/cluster-class-calico.yaml b/templates/cluster-class-calico.yaml index 83e80a70..4c05ac99 100644 --- a/templates/cluster-class-calico.yaml +++ b/templates/cluster-class-calico.yaml @@ -166,6 +166,29 @@ spec: mode: type: string enum: ["ipvs","iptables"] + - name: templateSelector + required: false + schema: + openAPIV3Schema: + description: When set, we use tag-based template selection (and drop sourceNode/templateID). + type: object + properties: + matchTags: + type: array + items: + type: string + minItems: 1 + required: + - matchTags + - name: localStorage + required: false + schema: + openAPIV3Schema: + type: boolean + default: false + description: > + If true, CAPMOX will schedule clones from a template on the same node where the VM is created (works with + templateSelector or explicit sourceNode/templateID). Defaults to false. - name: cloneSpec required: true schema: @@ -187,8 +210,6 @@ spec: properties: controlPlane: &machineSpec type: object - required: - - sourceNode properties: disks: type: object @@ -512,20 +533,162 @@ spec: path: /etc/kubernetes/admin.conf type: FileOrCreate name: kubeconfig + - name: ApplyLocalStorageFlag-ControlPlane + definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 kind: ProxmoxMachineTemplate matchResources: controlPlane: true jsonPatches: - - op: replace + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + + - name: ApplyLocalStorageFlag-Worker + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + + - name: ApplyLocalStorageFlag-LoadBalancer + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + +# (A) Selector mode: if templateSelector is provided, set it and REMOVE sourceNode/templateID + - name: TemplateSelectorMode-ControlPlane + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + - name: TemplateSelectorMode-Worker + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + - name: TemplateSelectorMode-LoadBalancer + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + + # (B) Explicit mode: when NO templateSelector, set sourceNode/templateID from cloneSpec + - name: ExplicitMode-ControlPlane + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add path: /spec/template/spec/sourceNode valueFrom: variable: cloneSpec.machineSpec.controlPlane.sourceNode - - op: replace + - op: add path: /spec/template/spec/templateID valueFrom: variable: cloneSpec.machineSpec.controlPlane.templateID + - name: ExplicitMode-Worker + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/sourceNode + valueFrom: + variable: cloneSpec.machineSpec.workerNode.sourceNode + - op: add + path: /spec/template/spec/templateID + valueFrom: + variable: cloneSpec.machineSpec.workerNode.templateID + - name: ExplicitMode-LoadBalancer + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/sourceNode + valueFrom: + variable: cloneSpec.machineSpec.loadBalancer.sourceNode + - op: add + path: /spec/template/spec/templateID + valueFrom: + variable: cloneSpec.machineSpec.loadBalancer.templateID - name: kube-proxy-setup description: "kube-proxy configuration" enabledIf: "{{ if eq .kubeProxy.mode \"ipvs\" }}true{{ end }}" @@ -573,23 +736,6 @@ spec: template: | - name: root sshAuthorizedKeys: {{ .cloneSpec.sshAuthorizedKeys }} - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: ProxmoxMachineTemplate - matchResources: - controlPlane: false - machineDeploymentClass: - names: - - proxmox-worker - jsonPatches: - - op: replace - path: /spec/template/spec/sourceNode - valueFrom: - variable: cloneSpec.machineSpec.workerNode.sourceNode - - op: replace - path: /spec/template/spec/templateID - valueFrom: - variable: cloneSpec.machineSpec.workerNode.templateID - name: LoadBalancerSetup description: "Configure LoadBalancer Node Initialisation" definitions: @@ -607,23 +753,6 @@ spec: template: | - name: root sshAuthorizedKeys: {{ .cloneSpec.sshAuthorizedKeys }} - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: ProxmoxMachineTemplate - matchResources: - controlPlane: false - machineDeploymentClass: - names: - - proxmox-loadbalancer - jsonPatches: - - op: replace - path: /spec/template/spec/sourceNode - valueFrom: - variable: cloneSpec.machineSpec.loadBalancer.sourceNode - - op: replace - path: /spec/template/spec/templateID - valueFrom: - variable: cloneSpec.machineSpec.loadBalancer.templateID - name: ControlPlaneNodeSockets description: "Configure Control Plane Sockets" enabledIf: "{{ if .cloneSpec.machineSpecs.controlPlane.numSockets }}true{{ end }}" diff --git a/templates/cluster-class-cilium.yaml b/templates/cluster-class-cilium.yaml index 7b696ae7..e8733f32 100644 --- a/templates/cluster-class-cilium.yaml +++ b/templates/cluster-class-cilium.yaml @@ -166,6 +166,29 @@ spec: mode: type: string enum: ["ipvs","iptables"] + - name: templateSelector + required: false + schema: + openAPIV3Schema: + description: When set, we use tag-based template selection (and drop sourceNode/templateID). + type: object + properties: + matchTags: + type: array + items: + type: string + minItems: 1 + required: + - matchTags + - name: localStorage + required: false + schema: + openAPIV3Schema: + type: boolean + default: false + description: > + If true, CAPMOX will schedule clones from a template on the same node where the VM is created (works with + templateSelector or explicit sourceNode/templateID). Defaults to false. - name: cloneSpec required: true schema: @@ -187,8 +210,6 @@ spec: properties: controlPlane: &machineSpec type: object - required: - - sourceNode properties: disks: type: object @@ -512,20 +533,162 @@ spec: path: /etc/kubernetes/admin.conf type: FileOrCreate name: kubeconfig + - name: ApplyLocalStorageFlag-ControlPlane + definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 kind: ProxmoxMachineTemplate matchResources: controlPlane: true jsonPatches: - - op: replace + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + + - name: ApplyLocalStorageFlag-Worker + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + + - name: ApplyLocalStorageFlag-LoadBalancer + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + +# (A) Selector mode: if templateSelector is provided, set it and REMOVE sourceNode/templateID + - name: TemplateSelectorMode-ControlPlane + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + - name: TemplateSelectorMode-Worker + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + - name: TemplateSelectorMode-LoadBalancer + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + + # (B) Explicit mode: when NO templateSelector, set sourceNode/templateID from cloneSpec + - name: ExplicitMode-ControlPlane + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add path: /spec/template/spec/sourceNode valueFrom: variable: cloneSpec.machineSpec.controlPlane.sourceNode - - op: replace + - op: add path: /spec/template/spec/templateID valueFrom: variable: cloneSpec.machineSpec.controlPlane.templateID + - name: ExplicitMode-Worker + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/sourceNode + valueFrom: + variable: cloneSpec.machineSpec.workerNode.sourceNode + - op: add + path: /spec/template/spec/templateID + valueFrom: + variable: cloneSpec.machineSpec.workerNode.templateID + - name: ExplicitMode-LoadBalancer + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/sourceNode + valueFrom: + variable: cloneSpec.machineSpec.loadBalancer.sourceNode + - op: add + path: /spec/template/spec/templateID + valueFrom: + variable: cloneSpec.machineSpec.loadBalancer.templateID - name: kube-proxy-setup description: "kube-proxy configuration" enabledIf: "{{ if eq .kubeProxy.mode \"ipvs\" }}true{{ end }}" @@ -573,23 +736,6 @@ spec: template: | - name: root sshAuthorizedKeys: {{ .cloneSpec.sshAuthorizedKeys }} - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: ProxmoxMachineTemplate - matchResources: - controlPlane: false - machineDeploymentClass: - names: - - proxmox-worker - jsonPatches: - - op: replace - path: /spec/template/spec/sourceNode - valueFrom: - variable: cloneSpec.machineSpec.workerNode.sourceNode - - op: replace - path: /spec/template/spec/templateID - valueFrom: - variable: cloneSpec.machineSpec.workerNode.templateID - name: LoadBalancerSetup description: "Configure LoadBalancer Node Initialisation" definitions: @@ -607,23 +753,6 @@ spec: template: | - name: root sshAuthorizedKeys: {{ .cloneSpec.sshAuthorizedKeys }} - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: ProxmoxMachineTemplate - matchResources: - controlPlane: false - machineDeploymentClass: - names: - - proxmox-loadbalancer - jsonPatches: - - op: replace - path: /spec/template/spec/sourceNode - valueFrom: - variable: cloneSpec.machineSpec.loadBalancer.sourceNode - - op: replace - path: /spec/template/spec/templateID - valueFrom: - variable: cloneSpec.machineSpec.loadBalancer.templateID - name: ControlPlaneNodeSockets description: "Configure Control Plane Sockets" enabledIf: "{{ if .cloneSpec.machineSpecs.controlPlane.numSockets }}true{{ end }}" diff --git a/templates/cluster-class.yaml b/templates/cluster-class.yaml index cecdd33d..10b21e27 100644 --- a/templates/cluster-class.yaml +++ b/templates/cluster-class.yaml @@ -136,6 +136,29 @@ spec: mode: type: string enum: ["ipvs","iptables"] + - name: templateSelector + required: false + schema: + openAPIV3Schema: + description: When set, we use tag-based template selection (and drop sourceNode/templateID). + type: object + properties: + matchTags: + type: array + items: + type: string + minItems: 1 + required: + - matchTags + - name: localStorage + required: false + schema: + openAPIV3Schema: + type: boolean + default: false + description: > + If true, CAPMOX will schedule clones from a template on the same node where the VM is created (works with + templateSelector or explicit sourceNode/templateID). Defaults to false. - name: cloneSpec required: true schema: @@ -157,8 +180,6 @@ spec: properties: controlPlane: &machineSpec type: object - required: - - sourceNode properties: disks: type: object @@ -482,20 +503,162 @@ spec: path: /etc/kubernetes/admin.conf type: FileOrCreate name: kubeconfig + - name: ApplyLocalStorageFlag-ControlPlane + definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 kind: ProxmoxMachineTemplate matchResources: controlPlane: true jsonPatches: - - op: replace + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + + - name: ApplyLocalStorageFlag-Worker + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + + - name: ApplyLocalStorageFlag-LoadBalancer + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + +# (A) Selector mode: if templateSelector is provided, set it and REMOVE sourceNode/templateID + - name: TemplateSelectorMode-ControlPlane + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + - name: TemplateSelectorMode-Worker + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + - name: TemplateSelectorMode-LoadBalancer + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + + # (B) Explicit mode: when NO templateSelector, set sourceNode/templateID from cloneSpec + - name: ExplicitMode-ControlPlane + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add path: /spec/template/spec/sourceNode valueFrom: variable: cloneSpec.machineSpec.controlPlane.sourceNode - - op: replace + - op: add path: /spec/template/spec/templateID valueFrom: variable: cloneSpec.machineSpec.controlPlane.templateID + - name: ExplicitMode-Worker + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/sourceNode + valueFrom: + variable: cloneSpec.machineSpec.workerNode.sourceNode + - op: add + path: /spec/template/spec/templateID + valueFrom: + variable: cloneSpec.machineSpec.workerNode.templateID + - name: ExplicitMode-LoadBalancer + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/sourceNode + valueFrom: + variable: cloneSpec.machineSpec.loadBalancer.sourceNode + - op: add + path: /spec/template/spec/templateID + valueFrom: + variable: cloneSpec.machineSpec.loadBalancer.templateID - name: kube-proxy-setup description: "kube-proxy configuration" enabledIf: "{{ if eq .kubeProxy.mode \"ipvs\" }}true{{ end }}" @@ -543,23 +706,6 @@ spec: template: | - name: root sshAuthorizedKeys: {{ .cloneSpec.sshAuthorizedKeys }} - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: ProxmoxMachineTemplate - matchResources: - controlPlane: false - machineDeploymentClass: - names: - - proxmox-worker - jsonPatches: - - op: replace - path: /spec/template/spec/sourceNode - valueFrom: - variable: cloneSpec.machineSpec.workerNode.sourceNode - - op: replace - path: /spec/template/spec/templateID - valueFrom: - variable: cloneSpec.machineSpec.workerNode.templateID - name: LoadBalancerSetup description: "Configure LoadBalancer Node Initialisation" definitions: @@ -577,23 +723,6 @@ spec: template: | - name: root sshAuthorizedKeys: {{ .cloneSpec.sshAuthorizedKeys }} - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: ProxmoxMachineTemplate - matchResources: - controlPlane: false - machineDeploymentClass: - names: - - proxmox-loadbalancer - jsonPatches: - - op: replace - path: /spec/template/spec/sourceNode - valueFrom: - variable: cloneSpec.machineSpec.loadBalancer.sourceNode - - op: replace - path: /spec/template/spec/templateID - valueFrom: - variable: cloneSpec.machineSpec.loadBalancer.templateID - name: ControlPlaneNodeSockets description: "Configure Control Plane Sockets" enabledIf: "{{ if .cloneSpec.machineSpecs.controlPlane.numSockets }}true{{ end }}" @@ -948,6 +1077,10 @@ spec: templateID: 100 format: qcow2 full: true + network: + default: + bridge: ${BRIDGE} + model: virtio --- kind: ProxmoxMachineTemplate apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 @@ -960,6 +1093,10 @@ spec: templateID: 100 format: qcow2 full: true + network: + default: + bridge: ${BRIDGE} + model: virtio --- kind: KubeadmConfigTemplate apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 diff --git a/test/helpers/envtest.go b/test/helpers/envtest.go index bbb2b5a6..b72cee30 100644 --- a/test/helpers/envtest.go +++ b/test/helpers/envtest.go @@ -111,7 +111,6 @@ func NewTestEnvironment(setupWebhook bool, pmClient proxmox.Client) *TestEnviron Paths: []string{filepath.Join(root, "..", "..", "config", "webhook")}, } } - if _, err := env.Start(); err != nil { err = kerrors.NewAggregate([]error{err, env.Stop()}) panic(err)