From 461cfa3766ec6ab60303f7efffd9e63fa167c93d Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Tue, 25 Mar 2025 09:10:18 +0200 Subject: [PATCH 01/16] Allow multiple nodes have templates (use local storage) --- api/v1alpha1/proxmoxmachine_types.go | 3 +- ...ture.cluster.x-k8s.io_proxmoxclusters.yaml | 3 +- ...ster.x-k8s.io_proxmoxclustertemplates.yaml | 3 +- ...ture.cluster.x-k8s.io_proxmoxmachines.yaml | 3 +- ...ster.x-k8s.io_proxmoxmachinetemplates.yaml | 3 +- internal/service/vmservice/vm.go | 56 +++++++++++++------ internal/service/vmservice/vm_test.go | 6 +- pkg/proxmox/client.go | 2 +- pkg/proxmox/goproxmox/api_client.go | 26 ++++----- pkg/proxmox/goproxmox/api_client_test.go | 49 ++++++++-------- pkg/proxmox/goproxmox/errors.go | 2 + pkg/proxmox/proxmoxtest/mock_client.go | 49 ++++++++-------- 12 files changed, 115 insertions(+), 90 deletions(-) diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index 0c26c8a5..50aff619 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -246,7 +246,8 @@ type VirtualMachineCloneSpec struct { type TemplateSelector struct { // Specifies all tags to look for, when looking up the VM template. // Passed tags must be an exact 1:1 match with the tags on the template you want to use. - // If multiple VM templates with the same set of tags are found, provisioning will fail. + // If multiple VM templates on same Proxmox node with the same set of tags are found, provisioning will fail. + // If multiple VM templates on different Proxmox nodes with the same set of tags are found, template on node where vm scheduled will be used. // // +listType=set // +kubebuilder:validation:items:Pattern=`^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$` diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index c9b8cd25..e40b0f3e 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -657,7 +657,8 @@ spec: description: |- Specifies all tags to look for, when looking up the VM template. Passed tags must be an exact 1:1 match with the tags on the template you want to use. - If multiple VM templates with the same set of tags are found, provisioning will fail. + If multiple VM templates on same Proxmox node with the same set of tags are found, provisioning will fail. + If multiple VM templates on different Proxmox nodes with the same set of tags are found, template on node where vm scheduled will be used. items: pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 544e59d3..1c82ee01 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -698,7 +698,8 @@ spec: description: |- Specifies all tags to look for, when looking up the VM template. Passed tags must be an exact 1:1 match with the tags on the template you want to use. - If multiple VM templates with the same set of tags are found, provisioning will fail. + If multiple VM templates on same Proxmox node with the same set of tags are found, provisioning will fail. + If multiple VM templates on different Proxmox nodes with the same set of tags are found, template on node where vm scheduled will be used. items: pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index ced008b4..601feb97 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -614,7 +614,8 @@ spec: description: |- Specifies all tags to look for, when looking up the VM template. Passed tags must be an exact 1:1 match with the tags on the template you want to use. - If multiple VM templates with the same set of tags are found, provisioning will fail. + If multiple VM templates on same Proxmox node with the same set of tags are found, provisioning will fail. + If multiple VM templates on different Proxmox nodes with the same set of tags are found, template on node where vm scheduled will be used. items: pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index baad08fa..a14519c7 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -655,7 +655,8 @@ spec: description: |- Specifies all tags to look for, when looking up the VM template. Passed tags must be an exact 1:1 match with the tags on the template you want to use. - If multiple VM templates with the same set of tags are found, provisioning will fail. + If multiple VM templates on same Proxmox node with the same set of tags are found, provisioning will fail. + If multiple VM templates on different Proxmox nodes with the same set of tags are found, template on node where vm scheduled will be used. items: pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ type: string diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index 0f96645d..27c97909 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -392,37 +392,59 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe scope.InfraCluster.ProxmoxCluster.Status.NodeLocations = new(infrav1alpha1.NodeLocations) } - // if no target was specified but we have a set of nodes defined in the spec, we want to evenly distribute - // the nodes across the cluster. - if scope.ProxmoxMachine.Spec.Target == nil && - (len(scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes) > 0 || len(scope.ProxmoxMachine.Spec.AllowedNodes) > 0) { - // select next node as a target + templateID := scope.ProxmoxMachine.GetTemplateID() + var templateMap = make(map[string]int32) + + // if templateID is -1 then we assume that templateSelector is used and we need to search templates by tags + if templateID == -1 { var err error - options.Target, err = selectNextNode(ctx, scope) + + templateSelectorTags := scope.ProxmoxMachine.GetTemplateSelectorTags() + templateMap, err = scope.InfraCluster.ProxmoxClient.FindVMTemplatesByTags(ctx, templateSelectorTags) if err != nil { - if errors.As(err, &scheduler.InsufficientMemoryError{}) { - scope.SetFailureMessage(err) - scope.SetFailureReason(capierrors.InsufficientResourcesMachineError) + scope.SetFailureMessage(err) + if errors.Is(err, goproxmox.ErrTemplateNotFound) { + scope.SetFailureReason(capierrors.MachineStatusError("VMTemplateNotFound")) + } else if errors.Is(err, goproxmox.ErrMultipleTemplatesFound) { + scope.SetFailureReason(capierrors.MachineStatusError("MultipleTemplatesFound")) } + conditions.MarkFalse(scope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.VMProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err) return proxmox.VMCloneResponse{}, err } } - templateID := scope.ProxmoxMachine.GetTemplateID() - if templateID == -1 { + // the nodes across the cluster. + if scope.ProxmoxMachine.Spec.Target == nil && + (len(scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes) > 0 || len(scope.ProxmoxMachine.Spec.AllowedNodes) > 0) { var err error - templateSelectorTags := scope.ProxmoxMachine.GetTemplateSelectorTags() - options.Node, templateID, err = scope.InfraCluster.ProxmoxClient.FindVMTemplateByTags(ctx, templateSelectorTags) - + // if template is on one node and allowedNodes has more then one node - set target to that node + if len(templateMap) == 1 && + (len(scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes) > 1 || len(scope.ProxmoxMachine.Spec.AllowedNodes) > 1) { + options.Target, templateID = func(m map[string]int32) (string, int32) { + for k, v := range m { + return k, v + } + return "", 0 // Fallback (should not happen if len(m) == 1) + }(templateMap) + } else { + // select next node as a target + options.Target, err = selectNextNode(ctx, scope) + } + // if we have template on that node, set Node and templateID if err != nil { - if errors.Is(err, goproxmox.ErrTemplateNotFound) { + if errors.As(err, &scheduler.InsufficientMemoryError{}) { scope.SetFailureMessage(err) - scope.SetFailureReason(capierrors.MachineStatusError("VMTemplateNotFound")) - conditions.MarkFalse(scope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.VMProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err) + scope.SetFailureReason(capierrors.InsufficientResourcesMachineError) } return proxmox.VMCloneResponse{}, err } } + // set template ID on selected node, and set options.Target to same node. + if val, ok := templateMap[options.Target]; ok { + options.Node = options.Target + templateID = val + } + res, err := scope.InfraCluster.ProxmoxClient.CloneVM(ctx, int(templateID), options) if err != nil { return res, err diff --git a/internal/service/vmservice/vm_test.go b/internal/service/vmservice/vm_test.go index 18c3baa7..2450730f 100644 --- a/internal/service/vmservice/vm_test.go +++ b/internal/service/vmservice/vm_test.go @@ -164,7 +164,7 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector(t *testing.T machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") machineScope.ProxmoxMachine.Spec.Target = ptr.To("node2") expectedOptions := proxmox.VMCloneRequest{ - Node: "node1", + Node: "node2", Name: "test", Description: "test vm", Format: "raw", @@ -175,7 +175,7 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector(t *testing.T Target: "node2", } - proxmoxClient.EXPECT().FindVMTemplateByTags(context.Background(), vmTemplateTags).Return("node1", 123, nil).Once() + proxmoxClient.EXPECT().FindVMTemplatesByTags(context.Background(), vmTemplateTags).Return(map[string]int32{"node2": int32(123)}, nil).Once() response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()} proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() @@ -209,7 +209,7 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_VMTemplateNo machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") machineScope.ProxmoxMachine.Spec.Target = ptr.To("node2") - proxmoxClient.EXPECT().FindVMTemplateByTags(context.Background(), vmTemplateTags).Return("", -1, goproxmox.ErrTemplateNotFound).Once() + proxmoxClient.EXPECT().FindVMTemplatesByTags(context.Background(), vmTemplateTags).Return(map[string]int32{"": -1}, goproxmox.ErrTemplateNotFound).Once() _, err := createVM(ctx, machineScope) diff --git a/pkg/proxmox/client.go b/pkg/proxmox/client.go index 3219f275..271804e6 100644 --- a/pkg/proxmox/client.go +++ b/pkg/proxmox/client.go @@ -30,7 +30,7 @@ type Client interface { ConfigureVM(ctx context.Context, vm *proxmox.VirtualMachine, options ...VirtualMachineOption) (*proxmox.Task, error) FindVMResource(ctx context.Context, vmID uint64) (*proxmox.ClusterResource, error) - FindVMTemplateByTags(ctx context.Context, templateTags []string) (string, int32, error) + FindVMTemplatesByTags(ctx context.Context, templateTags []string) (map[string]int32, error) CheckID(ctx context.Context, vmID int64) (bool, error) diff --git a/pkg/proxmox/goproxmox/api_client.go b/pkg/proxmox/goproxmox/api_client.go index 4cf93aee..9dba8436 100644 --- a/pkg/proxmox/goproxmox/api_client.go +++ b/pkg/proxmox/goproxmox/api_client.go @@ -142,9 +142,9 @@ func (c *APIClient) FindVMResource(ctx context.Context, vmID uint64) (*proxmox.C return nil, fmt.Errorf("unable to find VM with ID %d on any of the nodes", vmID) } -// FindVMTemplateByTags tries to find a VMID by its tags across the whole cluster. -func (c *APIClient) FindVMTemplateByTags(ctx context.Context, templateTags []string) (string, int32, error) { - vmTemplates := make([]*proxmox.ClusterResource, 0) +// FindVMTemplatesByTags finds VM templates by tags across the whole cluster and ensures only one template per node. +func (c *APIClient) FindVMTemplatesByTags(ctx context.Context, templateTags []string) (map[string]int32, error) { + templates := make(map[string]int32) sortedTags := make([]string, len(templateTags)) for i, tag := range templateTags { @@ -156,19 +156,16 @@ func (c *APIClient) FindVMTemplateByTags(ctx context.Context, templateTags []str cluster, err := c.Cluster(ctx) if err != nil { - return "", -1, fmt.Errorf("cannot get cluster status: %w", err) + return nil, fmt.Errorf("cannot get cluster status: %w", err) } vmResources, err := cluster.Resources(ctx, "vm") if err != nil { - return "", -1, fmt.Errorf("could not list vm resources: %w", err) + return nil, fmt.Errorf("could not list VM resources: %w", err) } for _, vm := range vmResources { - if vm.Template == 0 { - continue - } - if len(vm.Tags) == 0 { + if vm.Template == 0 || len(vm.Tags) == 0 { continue } @@ -176,15 +173,18 @@ func (c *APIClient) FindVMTemplateByTags(ctx context.Context, templateTags []str slices.Sort(vmTags) if slices.Equal(vmTags, uniqueTags) { - vmTemplates = append(vmTemplates, vm) + if _, exists := templates[vm.Node]; exists { + return nil, fmt.Errorf("%w: multiple VM templates found on node %q with tags %q", ErrMultipleTemplatesFound, vm.Node, strings.Join(templateTags, ";")) + } + templates[vm.Node] = int32(vm.VMID) } } - if n := len(vmTemplates); n != 1 { - return "", -1, fmt.Errorf("%w: found %d VM templates with tags %q", ErrTemplateNotFound, n, strings.Join(templateTags, ";")) + if len(templates) == 0 { + return nil, fmt.Errorf("%w: no VM templates found with tags %q", ErrTemplateNotFound, strings.Join(templateTags, ";")) } - return vmTemplates[0].Node, int32(vmTemplates[0].VMID), nil + return templates, nil } // DeleteVM deletes a VM based on the nodeName and vmID. diff --git a/pkg/proxmox/goproxmox/api_client_test.go b/pkg/proxmox/goproxmox/api_client_test.go index 2d7553b7..db75c3b8 100644 --- a/pkg/proxmox/goproxmox/api_client_test.go +++ b/pkg/proxmox/goproxmox/api_client_test.go @@ -371,7 +371,7 @@ func TestProxmoxAPIClient_FindVMResource(t *testing.T) { } } -func TestProxmoxAPIClient_FindVMTemplateByTags(t *testing.T) { +func TestProxmoxAPIClient_FindVMTemplatesByTags(t *testing.T) { proxmoxClusterResources := proxmox.ClusterResources{ &proxmox.ClusterResource{VMID: 101, Name: "k8s-node01", Node: "capmox01", Tags: ""}, &proxmox.ClusterResource{VMID: 102, Name: "k8s-node02", Node: "capmox02", Tags: ""}, @@ -380,6 +380,8 @@ func TestProxmoxAPIClient_FindVMTemplateByTags(t *testing.T) { &proxmox.ClusterResource{VMID: 202, Name: "ubuntu-22.04-k8s-v1.30.2", Node: "capmox02", Tags: "capmox;template;v1.30.2", Template: uint64(1)}, &proxmox.ClusterResource{VMID: 301, Name: "ubuntu-22.04-k8s-v1.29.2", Node: "capmox02", Tags: "capmox;template;v1.29.2", Template: uint64(1)}, &proxmox.ClusterResource{VMID: 302, Name: "ubuntu-22.04-k8s-v1.29.2", Node: "capmox02", Tags: "capmox;template;v1.29.2", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 401, Name: "ubuntu-v1.28.3-1", Node: "capmox01", Tags: "ubuntu;capmox;v1.28.3", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 402, Name: "ubuntu-v1.28.3-2", Node: "capmox02", Tags: "ubuntu;capmox;v1.28.3", Template: uint64(1)}, } tests := []struct { name string @@ -387,8 +389,7 @@ func TestProxmoxAPIClient_FindVMTemplateByTags(t *testing.T) { vmTags []string fails bool err string - vmTemplateNode string - vmTemplateID int32 + vmTemplateNode map[string]int32 }{ { name: "clusterstatus broken", @@ -400,7 +401,7 @@ func TestProxmoxAPIClient_FindVMTemplateByTags(t *testing.T) { name: "resourcelisting broken", http: []int{200, 500}, fails: true, - err: "could not list vm resources: 500", + err: "could not list VM resources: 500", }, { name: "find-template", @@ -408,17 +409,15 @@ func TestProxmoxAPIClient_FindVMTemplateByTags(t *testing.T) { vmTags: []string{"template", "capmox", "v1.28.3"}, fails: false, err: "", - vmTemplateNode: "capmox01", - vmTemplateID: 201, + vmTemplateNode: map[string]int32{"capmox01": 201}, }, { name: "find-template-nil", http: []int{200, 200}, vmTags: nil, fails: true, - err: "VM template not found: found 0 VM templates with tags \"\"", - vmTemplateNode: "capmox01", - vmTemplateID: 201, + err: "VM template not found: no VM templates found with tags \"\"", + vmTemplateNode: map[string]int32{"capmox01": 201}, }, { // Proxmox VM tags are always lowercase @@ -427,8 +426,7 @@ func TestProxmoxAPIClient_FindVMTemplateByTags(t *testing.T) { vmTags: []string{"TEMPLATE", "CAPMOX", "v1.28.3"}, fails: false, err: "", - vmTemplateNode: "capmox01", - vmTemplateID: 201, + vmTemplateNode: map[string]int32{"capmox01": 201}, }, { name: "find-template-unordered", @@ -436,8 +434,7 @@ func TestProxmoxAPIClient_FindVMTemplateByTags(t *testing.T) { vmTags: []string{"template", "capmox", "v1.30.2"}, fails: false, err: "", - vmTemplateNode: "capmox02", - vmTemplateID: 202, + vmTemplateNode: map[string]int32{"capmox02": 202}, }, { name: "find-template-duplicate-tag", @@ -445,26 +442,31 @@ func TestProxmoxAPIClient_FindVMTemplateByTags(t *testing.T) { vmTags: []string{"template", "capmox", "capmox", "v1.30.2"}, fails: false, err: "", - vmTemplateNode: "capmox02", - vmTemplateID: 202, + vmTemplateNode: map[string]int32{"capmox02": 202}, }, { name: "find-multiple-templates", http: []int{200, 200}, vmTags: []string{"template", "capmox"}, fails: true, - err: "VM template not found: found 0 VM templates with tags \"template;capmox\"", - vmTemplateID: 69, - vmTemplateNode: "nice", + err: "VM template not found: no VM templates found with tags \"template;capmox\"", + vmTemplateNode: map[string]int32{"nice": 69}, }, { - name: "find-multiple-templates", + name: "find-multiple-templates-same-node", http: []int{200, 200}, vmTags: []string{"template", "capmox", "v1.29.2"}, fails: true, - err: "VM template not found: found 2 VM templates with tags \"template;capmox;v1.29.2\"", - vmTemplateID: 69, - vmTemplateNode: "nice", + err: "Multiple templates found: multiple VM templates found on node \"capmox02\" with tags \"template;capmox;v1.29.2\"", + vmTemplateNode: map[string]int32{"nice": 11}, + }, + { + name: "find-multiple-templates-multiple-nodes", + http: []int{200, 200}, + vmTags: []string{"ubuntu", "capmox", "v1.28.3"}, + fails: false, + err: "", + vmTemplateNode: map[string]int32{"capmox01": 401, "capmox02": 402}, }, } @@ -477,14 +479,13 @@ func TestProxmoxAPIClient_FindVMTemplateByTags(t *testing.T) { httpmock.RegisterResponder(http.MethodGet, `=~/cluster/resources`, newJSONResponder(test.http[1], proxmoxClusterResources)) - vmTemplateNode, vmTemplateID, err := client.FindVMTemplateByTags(context.Background(), test.vmTags) + vmTemplateNode, err := client.FindVMTemplatesByTags(context.Background(), test.vmTags) if test.fails { require.Error(t, err) require.Equal(t, test.err, err.Error()) } else { require.NoError(t, err) - require.Equal(t, vmTemplateID, test.vmTemplateID) require.Equal(t, vmTemplateNode, test.vmTemplateNode) } }) diff --git a/pkg/proxmox/goproxmox/errors.go b/pkg/proxmox/goproxmox/errors.go index 967a0fb9..1dca7549 100644 --- a/pkg/proxmox/goproxmox/errors.go +++ b/pkg/proxmox/goproxmox/errors.go @@ -8,4 +8,6 @@ var ( // ErrTemplateNotFound is returned when a VM template is not found. ErrTemplateNotFound = errors.New("VM template not found") + + ErrMultipleTemplatesFound = errors.New("Multiple templates found") ) diff --git a/pkg/proxmox/proxmoxtest/mock_client.go b/pkg/proxmox/proxmoxtest/mock_client.go index f4bf5516..932e490b 100644 --- a/pkg/proxmox/proxmoxtest/mock_client.go +++ b/pkg/proxmox/proxmoxtest/mock_client.go @@ -389,66 +389,61 @@ func (_c *MockClient_FindVMResource_Call) RunAndReturn(run func(context.Context, return _c } -// FindVMTemplateByTags provides a mock function with given fields: ctx, templateTags -func (_m *MockClient) FindVMTemplateByTags(ctx context.Context, templateTags []string) (string, int32, error) { +// FindVMTemplatesByTags provides a mock function with given fields: ctx, templateTags +func (_m *MockClient) FindVMTemplatesByTags(ctx context.Context, templateTags []string) (map[string]int32, error) { ret := _m.Called(ctx, templateTags) if len(ret) == 0 { - panic("no return value specified for FindVMTemplateByTags") + panic("no return value specified for FindVMTemplatesByTags") } - var r0 string - var r1 int32 - var r2 error - if rf, ok := ret.Get(0).(func(context.Context, []string) (string, int32, error)); ok { + var r0 map[string]int32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []string) (map[string]int32, error)); ok { return rf(ctx, templateTags) } - if rf, ok := ret.Get(0).(func(context.Context, []string) string); ok { + if rf, ok := ret.Get(0).(func(context.Context, []string) map[string]int32); ok { r0 = rf(ctx, templateTags) } else { - r0 = ret.Get(0).(string) + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]int32) + } } - if rf, ok := ret.Get(1).(func(context.Context, []string) int32); ok { + if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok { r1 = rf(ctx, templateTags) } else { - r1 = ret.Get(1).(int32) - } - - if rf, ok := ret.Get(2).(func(context.Context, []string) error); ok { - r2 = rf(ctx, templateTags) - } else { - r2 = ret.Error(2) + r1 = ret.Error(1) } - return r0, r1, r2 + return r0, r1 } -// MockClient_FindVMTemplateByTags_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FindVMTemplateByTags' -type MockClient_FindVMTemplateByTags_Call struct { +// MockClient_FindVMTemplatesByTags_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FindVMTemplatesByTags' +type MockClient_FindVMTemplatesByTags_Call struct { *mock.Call } -// FindVMTemplateByTags is a helper method to define mock.On call +// FindVMTemplatesByTags is a helper method to define mock.On call // - ctx context.Context // - templateTags []string -func (_e *MockClient_Expecter) FindVMTemplateByTags(ctx interface{}, templateTags interface{}) *MockClient_FindVMTemplateByTags_Call { - return &MockClient_FindVMTemplateByTags_Call{Call: _e.mock.On("FindVMTemplateByTags", ctx, templateTags)} +func (_e *MockClient_Expecter) FindVMTemplatesByTags(ctx interface{}, templateTags interface{}) *MockClient_FindVMTemplatesByTags_Call { + return &MockClient_FindVMTemplatesByTags_Call{Call: _e.mock.On("FindVMTemplatesByTags", ctx, templateTags)} } -func (_c *MockClient_FindVMTemplateByTags_Call) Run(run func(ctx context.Context, templateTags []string)) *MockClient_FindVMTemplateByTags_Call { +func (_c *MockClient_FindVMTemplatesByTags_Call) Run(run func(ctx context.Context, templateTags []string)) *MockClient_FindVMTemplatesByTags_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].([]string)) }) return _c } -func (_c *MockClient_FindVMTemplateByTags_Call) Return(_a0 string, _a1 int32, _a2 error) *MockClient_FindVMTemplateByTags_Call { - _c.Call.Return(_a0, _a1, _a2) +func (_c *MockClient_FindVMTemplatesByTags_Call) Return(_a0 map[string]int32, _a1 error) *MockClient_FindVMTemplatesByTags_Call { + _c.Call.Return(_a0, _a1) return _c } -func (_c *MockClient_FindVMTemplateByTags_Call) RunAndReturn(run func(context.Context, []string) (string, int32, error)) *MockClient_FindVMTemplateByTags_Call { +func (_c *MockClient_FindVMTemplatesByTags_Call) RunAndReturn(run func(context.Context, []string) (map[string]int32, error)) *MockClient_FindVMTemplatesByTags_Call { _c.Call.Return(run) return _c } From 0962223b6b228512ee0686b88a17f44701196586 Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Tue, 25 Mar 2025 09:20:40 +0200 Subject: [PATCH 02/16] Add back missing line --- internal/service/vmservice/vm.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index 27c97909..2d97768e 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -413,6 +413,7 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe } } + // if no target was specified but we have a set of nodes defined in the spec, we want to evenly distribute // the nodes across the cluster. if scope.ProxmoxMachine.Spec.Target == nil && (len(scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes) > 0 || len(scope.ProxmoxMachine.Spec.AllowedNodes) > 0) { From 47634174c957ee36e5fbd5211f08535734d6583f Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Mon, 14 Apr 2025 10:31:09 +0300 Subject: [PATCH 03/16] Update localstorage support --- .github/actionlint.yaml | 10 +- api/v1alpha1/proxmoxmachine_types.go | 36 +- api/v1alpha1/zz_generated.deepcopy.go | 5 + ...ture.cluster.x-k8s.io_proxmoxclusters.yaml | 22 +- ...ster.x-k8s.io_proxmoxclustertemplates.yaml | 22 +- ...ture.cluster.x-k8s.io_proxmoxmachines.yaml | 22 +- ...ster.x-k8s.io_proxmoxmachinetemplates.yaml | 22 +- examples/cluster-calico.yaml | 114 +++--- examples/cluster-cilium.yaml | 114 +++--- examples/cluster.yaml | 114 +++--- go.sum | 1 + internal/service/scheduler/vmscheduler.go | 27 +- .../service/scheduler/vmscheduler_test.go | 9 +- internal/service/vmservice/find_test.go | 16 +- internal/service/vmservice/helpers_test.go | 8 - internal/service/vmservice/vm.go | 103 +++-- internal/service/vmservice/vm_test.go | 382 ++++++++++++++++-- pkg/proxmox/client.go | 5 +- pkg/proxmox/goproxmox/api_client.go | 52 ++- pkg/proxmox/goproxmox/api_client_test.go | 167 +++++--- pkg/proxmox/goproxmox/errors.go | 4 + pkg/proxmox/proxmoxtest/mock_client.go | 88 +++- 22 files changed, 956 insertions(+), 387 deletions(-) diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml index 06172822..9ad1fe5d 100644 --- a/.github/actionlint.yaml +++ b/.github/actionlint.yaml @@ -2,11 +2,11 @@ self-hosted-runner: # Labels of self-hosted runner in array of strings. labels: - - "self-hosted" - - "proxmox" - - "e2e" - - "capmox" - - "dcd-playground" + - "self-hosted" + - "proxmox" + - "e2e" + - "capmox" + - "dcd-playground" # Configuration variables in array of strings defined in your repository or # organization. `null` means disabling configuration variables check. # Empty array means no configuration variable is allowed. diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index 50aff619..8954e99c 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -115,12 +115,13 @@ type ProxmoxMachineSpec struct { MetadataSettings *MetadataSettings `json:"metadataSettings,omitempty"` // AllowedNodes specifies all Proxmox nodes which will be considered - // for operations. This implies that VMs can be cloned on different nodes from - // the node which holds the VM template. + // for operations. This implies that VMs can be cloned on different nodes. // // This field is optional and should only be set if you want to restrict // the nodes where the VM can be cloned. - // If not set, the ProxmoxCluster will be used to determine the nodes. + // + // if set overrides ProxmoxCluster.spec.AllowedNodes + // if not set node discovery will be used // +optional AllowedNodes []string `json:"allowedNodes,omitempty"` @@ -176,8 +177,7 @@ const ( // TemplateSource defines the source of the template VM. type TemplateSource struct { // SourceNode is the initially selected proxmox node. - // This node will be used to locate the template VM, which will - // be used for cloning operations. + // SourceNode should be used together with TemplateID. // // Cloning will be performed according to the configuration. // Setting the `Target` field will tell Proxmox to clone the @@ -201,6 +201,12 @@ type TemplateSource struct { // TemplateSelector defines MatchTags for looking up VM templates. // +optional TemplateSelector *TemplateSelector `json:"templateSelector,omitempty"` + + // LocalStorage defines whether the VM template stored on local storage. + // Combination of (TemplateID, SourceNode, Target) and Localstorage is mutually exclusive. + // +kubebuilder:default=false + // +optional + LocalStorage *bool `json:"localStorage,omitempty"` } // VirtualMachineCloneSpec is information used to clone a virtual machine. @@ -237,7 +243,9 @@ type VirtualMachineCloneSpec struct { // +optional Storage *string `json:"storage,omitempty"` - // Target node. Only allowed if the original VM is on shared storage. + // Target node. Only allowed if the template is on shared storage. + // Requires templateID and sourceNode to be set. + // LocalStorage must be set to false. // +optional Target *string `json:"target,omitempty"` } @@ -611,12 +619,20 @@ func (r *ProxmoxMachine) GetVirtualMachineID() int64 { return -1 } -// GetTemplateID get the Proxmox template "vmid" used to provision this machine. -func (r *ProxmoxMachine) GetTemplateID() int32 { +// GetTemplateMap get the Proxmox template "sourceNode:vmid" used to provision this machine. +func (r *ProxmoxMachine) GetTemplateMap() map[string]int32 { if r.Spec.TemplateID != nil { - return *r.Spec.TemplateID + return map[string]int32{r.Spec.SourceNode: *r.Spec.TemplateID} } - return -1 + return nil +} + +// GetLocalStorage get the Proxmox local storage used to provision this machine. +func (r *ProxmoxMachine) GetLocalStorage() bool { + if r.Spec.LocalStorage != nil { + return *r.Spec.LocalStorage + } + return false } // GetTemplateSelectorTags get the tags, the desired vm template should have. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index c1951100..1537fd4c 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -981,6 +981,11 @@ func (in *TemplateSource) DeepCopyInto(out *TemplateSource) { *out = new(TemplateSelector) (*in).DeepCopyInto(*out) } + if in.LocalStorage != nil { + in, out := &in.LocalStorage, &out.LocalStorage + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateSource. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index e40b0f3e..4511eb4c 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -75,12 +75,13 @@ spec: allowedNodes: description: |- AllowedNodes specifies all Proxmox nodes which will be considered - for operations. This implies that VMs can be cloned on different nodes from - the node which holds the VM template. + for operations. This implies that VMs can be cloned on different nodes. This field is optional and should only be set if you want to restrict the nodes where the VM can be cloned. - If not set, the ProxmoxCluster will be used to determine the nodes. + + if set overrides ProxmoxCluster.spec.AllowedNodes + if not set node discovery will be used items: type: string type: array @@ -149,6 +150,12 @@ spec: This is always done when you clone a normal VM. Create a Full clone by default. type: boolean + localStorage: + default: false + description: |- + LocalStorage defines whether the VM template stored on local storage. + Combination of (TemplateID, SourceNode, Target) and Localstorage is mutually exclusive. + type: boolean memoryMiB: description: |- MemoryMiB is the size of a virtual machine's memory, in MiB. @@ -613,8 +620,7 @@ spec: sourceNode: description: |- SourceNode is the initially selected proxmox node. - This node will be used to locate the template VM, which will - be used for cloning operations. + SourceNode should be used together with TemplateID. Cloning will be performed according to the configuration. Setting the `Target` field will tell Proxmox to clone the @@ -641,8 +647,10 @@ spec: type: array x-kubernetes-list-type: set target: - description: Target node. Only allowed if the original VM - is on shared storage. + description: |- + Target node. Only allowed if the template is on shared storage. + Requires templateID and sourceNode to be set. + LocalStorage must be set to false. type: string templateID: description: TemplateID the vm_template vmid used for cloning diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 1c82ee01..593974f9 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -97,12 +97,13 @@ spec: allowedNodes: description: |- AllowedNodes specifies all Proxmox nodes which will be considered - for operations. This implies that VMs can be cloned on different nodes from - the node which holds the VM template. + for operations. This implies that VMs can be cloned on different nodes. This field is optional and should only be set if you want to restrict the nodes where the VM can be cloned. - If not set, the ProxmoxCluster will be used to determine the nodes. + + if set overrides ProxmoxCluster.spec.AllowedNodes + if not set node discovery will be used items: type: string type: array @@ -173,6 +174,12 @@ spec: This is always done when you clone a normal VM. Create a Full clone by default. type: boolean + localStorage: + default: false + description: |- + LocalStorage defines whether the VM template stored on local storage. + Combination of (TemplateID, SourceNode, Target) and Localstorage is mutually exclusive. + type: boolean memoryMiB: description: |- MemoryMiB is the size of a virtual machine's memory, in MiB. @@ -654,8 +661,7 @@ spec: sourceNode: description: |- SourceNode is the initially selected proxmox node. - This node will be used to locate the template VM, which will - be used for cloning operations. + SourceNode should be used together with TemplateID. Cloning will be performed according to the configuration. Setting the `Target` field will tell Proxmox to clone the @@ -682,8 +688,10 @@ spec: type: array x-kubernetes-list-type: set target: - description: Target node. Only allowed if the original - VM is on shared storage. + description: |- + Target node. Only allowed if the template is on shared storage. + Requires templateID and sourceNode to be set. + LocalStorage must be set to false. type: string templateID: description: TemplateID the vm_template vmid used diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index 601feb97..12bee12e 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -68,12 +68,13 @@ spec: allowedNodes: description: |- AllowedNodes specifies all Proxmox nodes which will be considered - for operations. This implies that VMs can be cloned on different nodes from - the node which holds the VM template. + for operations. This implies that VMs can be cloned on different nodes. This field is optional and should only be set if you want to restrict the nodes where the VM can be cloned. - If not set, the ProxmoxCluster will be used to determine the nodes. + + if set overrides ProxmoxCluster.spec.AllowedNodes + if not set node discovery will be used items: type: string type: array @@ -140,6 +141,12 @@ spec: This is always done when you clone a normal VM. Create a Full clone by default. type: boolean + localStorage: + default: false + description: |- + LocalStorage defines whether the VM template stored on local storage. + Combination of (TemplateID, SourceNode, Target) and Localstorage is mutually exclusive. + type: boolean memoryMiB: description: |- MemoryMiB is the size of a virtual machine's memory, in MiB. @@ -571,8 +578,7 @@ spec: sourceNode: description: |- SourceNode is the initially selected proxmox node. - This node will be used to locate the template VM, which will - be used for cloning operations. + SourceNode should be used together with TemplateID. Cloning will be performed according to the configuration. Setting the `Target` field will tell Proxmox to clone the @@ -598,8 +604,10 @@ spec: type: array x-kubernetes-list-type: set target: - description: Target node. Only allowed if the original VM is on shared - storage. + description: |- + Target node. Only allowed if the template is on shared storage. + Requires templateID and sourceNode to be set. + LocalStorage must be set to false. type: string templateID: description: TemplateID the vm_template vmid used for cloning a new diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index a14519c7..5be53baf 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -79,12 +79,13 @@ spec: allowedNodes: description: |- AllowedNodes specifies all Proxmox nodes which will be considered - for operations. This implies that VMs can be cloned on different nodes from - the node which holds the VM template. + for operations. This implies that VMs can be cloned on different nodes. This field is optional and should only be set if you want to restrict the nodes where the VM can be cloned. - If not set, the ProxmoxCluster will be used to determine the nodes. + + if set overrides ProxmoxCluster.spec.AllowedNodes + if not set node discovery will be used items: type: string type: array @@ -153,6 +154,12 @@ spec: This is always done when you clone a normal VM. Create a Full clone by default. type: boolean + localStorage: + default: false + description: |- + LocalStorage defines whether the VM template stored on local storage. + Combination of (TemplateID, SourceNode, Target) and Localstorage is mutually exclusive. + type: boolean memoryMiB: description: |- MemoryMiB is the size of a virtual machine's memory, in MiB. @@ -611,8 +618,7 @@ spec: sourceNode: description: |- SourceNode is the initially selected proxmox node. - This node will be used to locate the template VM, which will - be used for cloning operations. + SourceNode should be used together with TemplateID. Cloning will be performed according to the configuration. Setting the `Target` field will tell Proxmox to clone the @@ -639,8 +645,10 @@ spec: type: array x-kubernetes-list-type: set target: - description: Target node. Only allowed if the original VM - is on shared storage. + description: |- + Target node. Only allowed if the template is on shared storage. + Requires templateID and sourceNode to be set. + LocalStorage must be set to false. type: string templateID: description: TemplateID the vm_template vmid used for cloning diff --git a/examples/cluster-calico.yaml b/examples/cluster-calico.yaml index 0cebab88..5581ee74 100644 --- a/examples/cluster-calico.yaml +++ b/examples/cluster-calico.yaml @@ -36,11 +36,11 @@ spec: addresses: [10.10.10.10-10.10.10.20] gateway: 10.10.10.1 prefix: 24 -# - name: ipv6Config -# value: -# addresses: [2001:db8:1::10-2001:db8:1::20] -# gateway: 2001:db8:1::1 -# prefix: 64 + # - name: ipv6Config + # value: + # addresses: [2001:db8:1::10-2001:db8:1::20] + # gateway: 2001:db8:1::1 + # prefix: 64 - name: kubeProxy value: mode: iptables @@ -51,70 +51,70 @@ spec: network: default: bridge: vmbr0 -# additionalDevices: -# - name: net1 -# model: virtio -# ipv4PoolRef: -# name: shared-inclusteripv4pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# ipv6PoolRef: -# name: shared-inclusteripv6pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# bridge: vmbr1 -# numSockets: 1 -# numCores: 4 -# memoryMiB: 4096 + # additionalDevices: + # - name: net1 + # model: virtio + # ipv4PoolRef: + # name: shared-inclusteripv4pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # ipv6PoolRef: + # name: shared-inclusteripv6pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # bridge: vmbr1 + # numSockets: 1 + # numCores: 4 + # memoryMiB: 4096 sourceNode: pve1 templateID: 100 workerNode: network: default: bridge: vmbr0 -# additionalDevices: -# - name: net1 -# model: virtio -# ipv4PoolRef: -# name: shared-inclusteripv4pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# ipv6PoolRef: -# name: shared-inclusteripv6pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# bridge: vmbr1 -# numSockets: 1 -# numCores: 4 -# memoryMiB: 4096 + # additionalDevices: + # - name: net1 + # model: virtio + # ipv4PoolRef: + # name: shared-inclusteripv4pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # ipv6PoolRef: + # name: shared-inclusteripv6pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # bridge: vmbr1 + # numSockets: 1 + # numCores: 4 + # memoryMiB: 4096 sourceNode: pve1 templateID: 100 loadBalancer: network: default: bridge: vmbr0 -# additionalDevices: -# - name: net1 -# model: virtio -# ipv4PoolRef: -# name: shared-inclusteripv4pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# ipv6PoolRef: -# name: shared-inclusteripv6pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# bridge: vmbr1 -# vrfs: -# - name: vrf-ext -# interfaces: -# - net1 -# table: 500 -# routingPolicy: -# - from: 192.0.2.0/24 -# numSockets: 1 -# numCores: 2 -# memoryMiB: 2048 + # additionalDevices: + # - name: net1 + # model: virtio + # ipv4PoolRef: + # name: shared-inclusteripv4pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # ipv6PoolRef: + # name: shared-inclusteripv6pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # bridge: vmbr1 + # vrfs: + # - name: vrf-ext + # interfaces: + # - net1 + # table: 500 + # routingPolicy: + # - from: 192.0.2.0/24 + # numSockets: 1 + # numCores: 2 + # memoryMiB: 2048 sourceNode: pve1 templateID: 100 sshAuthorizedKeys: diff --git a/examples/cluster-cilium.yaml b/examples/cluster-cilium.yaml index 6de39d8c..9bb7cf65 100644 --- a/examples/cluster-cilium.yaml +++ b/examples/cluster-cilium.yaml @@ -36,11 +36,11 @@ spec: addresses: [10.10.10.10-10.10.10.20] gateway: 10.10.10.1 prefix: 24 -# - name: ipv6Config -# value: -# addresses: [2001:db8:1::10-2001:db8:1::20] -# gateway: 2001:db8:1::1 -# prefix: 64 + # - name: ipv6Config + # value: + # addresses: [2001:db8:1::10-2001:db8:1::20] + # gateway: 2001:db8:1::1 + # prefix: 64 - name: kubeProxy value: mode: iptables @@ -51,70 +51,70 @@ spec: network: default: bridge: vmbr0 -# additionalDevices: -# - name: net1 -# model: virtio -# ipv4PoolRef: -# name: shared-inclusteripv4pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# ipv6PoolRef: -# name: shared-inclusteripv6pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# bridge: vmbr1 -# numSockets: 1 -# numCores: 4 -# memoryMiB: 4096 + # additionalDevices: + # - name: net1 + # model: virtio + # ipv4PoolRef: + # name: shared-inclusteripv4pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # ipv6PoolRef: + # name: shared-inclusteripv6pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # bridge: vmbr1 + # numSockets: 1 + # numCores: 4 + # memoryMiB: 4096 sourceNode: pve1 templateID: 100 workerNode: network: default: bridge: vmbr0 -# additionalDevices: -# - name: net1 -# model: virtio -# ipv4PoolRef: -# name: shared-inclusteripv4pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# ipv6PoolRef: -# name: shared-inclusteripv6pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# bridge: vmbr1 -# numSockets: 1 -# numCores: 4 -# memoryMiB: 4096 + # additionalDevices: + # - name: net1 + # model: virtio + # ipv4PoolRef: + # name: shared-inclusteripv4pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # ipv6PoolRef: + # name: shared-inclusteripv6pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # bridge: vmbr1 + # numSockets: 1 + # numCores: 4 + # memoryMiB: 4096 sourceNode: pve1 templateID: 100 loadBalancer: network: default: bridge: vmbr0 -# additionalDevices: -# - name: net1 -# model: virtio -# ipv4PoolRef: -# name: shared-inclusteripv4pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# ipv6PoolRef: -# name: shared-inclusteripv6pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# bridge: vmbr1 -# vrfs: -# - name: vrf-ext -# interfaces: -# - net1 -# table: 500 -# routingPolicy: -# - from: 192.0.2.0/24 -# numSockets: 1 -# numCores: 2 -# memoryMiB: 2048 + # additionalDevices: + # - name: net1 + # model: virtio + # ipv4PoolRef: + # name: shared-inclusteripv4pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # ipv6PoolRef: + # name: shared-inclusteripv6pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # bridge: vmbr1 + # vrfs: + # - name: vrf-ext + # interfaces: + # - net1 + # table: 500 + # routingPolicy: + # - from: 192.0.2.0/24 + # numSockets: 1 + # numCores: 2 + # memoryMiB: 2048 sourceNode: pve1 templateID: 100 sshAuthorizedKeys: diff --git a/examples/cluster.yaml b/examples/cluster.yaml index aed92e45..20664065 100644 --- a/examples/cluster.yaml +++ b/examples/cluster.yaml @@ -36,11 +36,11 @@ spec: addresses: [10.10.10.10-10.10.10.20] gateway: 10.10.10.1 prefix: 24 -# - name: ipv6Config -# value: -# addresses: [2001:db8:1::10-2001:db8:1::20] -# gateway: 2001:db8:1::1 -# prefix: 64 + # - name: ipv6Config + # value: + # addresses: [2001:db8:1::10-2001:db8:1::20] + # gateway: 2001:db8:1::1 + # prefix: 64 - name: cloneSpec value: machineSpec: @@ -48,70 +48,70 @@ spec: network: default: bridge: vmbr0 -# additionalDevices: -# - name: net1 -# model: virtio -# ipv4PoolRef: -# name: shared-inclusteripv4pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# ipv6PoolRef: -# name: shared-inclusteripv6pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# bridge: vmbr1 -# numSockets: 1 -# numCores: 4 -# memoryMiB: 4096 + # additionalDevices: + # - name: net1 + # model: virtio + # ipv4PoolRef: + # name: shared-inclusteripv4pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # ipv6PoolRef: + # name: shared-inclusteripv6pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # bridge: vmbr1 + # numSockets: 1 + # numCores: 4 + # memoryMiB: 4096 sourceNode: pve1 templateID: 100 workerNode: network: default: bridge: vmbr0 -# additionalDevices: -# - name: net1 -# model: virtio -# ipv4PoolRef: -# name: shared-inclusteripv4pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# ipv6PoolRef: -# name: shared-inclusteripv6pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# bridge: vmbr1 -# numSockets: 1 -# numCores: 4 -# memoryMiB: 4096 + # additionalDevices: + # - name: net1 + # model: virtio + # ipv4PoolRef: + # name: shared-inclusteripv4pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # ipv6PoolRef: + # name: shared-inclusteripv6pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # bridge: vmbr1 + # numSockets: 1 + # numCores: 4 + # memoryMiB: 4096 sourceNode: pve1 templateID: 100 loadBalancer: network: default: bridge: vmbr0 -# additionalDevices: -# - name: net1 -# model: virtio -# ipv4PoolRef: -# name: shared-inclusteripv4pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# ipv6PoolRef: -# name: shared-inclusteripv6pool -# apiGroup: ipam.cluster.x-k8s.io -# kind: GlobalInClusterIPPool -# bridge: vmbr1 -# vrfs: -# - name: vrf-ext -# interfaces: -# - net1 -# table: 500 -# routingPolicy: -# - from: 192.0.2.0/24 -# numSockets: 1 -# numCores: 2 -# memoryMiB: 2048 + # additionalDevices: + # - name: net1 + # model: virtio + # ipv4PoolRef: + # name: shared-inclusteripv4pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # ipv6PoolRef: + # name: shared-inclusteripv6pool + # apiGroup: ipam.cluster.x-k8s.io + # kind: GlobalInClusterIPPool + # bridge: vmbr1 + # vrfs: + # - name: vrf-ext + # interfaces: + # - net1 + # table: 500 + # routingPolicy: + # - from: 192.0.2.0/24 + # numSockets: 1 + # numCores: 2 + # memoryMiB: 2048 sourceNode: pve1 templateID: 100 sshAuthorizedKeys: diff --git a/go.sum b/go.sum index f38504c3..ccc5ea75 100644 --- a/go.sum +++ b/go.sum @@ -146,6 +146,7 @@ github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= diff --git a/internal/service/scheduler/vmscheduler.go b/internal/service/scheduler/vmscheduler.go index 50ad6557..2d6161c1 100644 --- a/internal/service/scheduler/vmscheduler.go +++ b/internal/service/scheduler/vmscheduler.go @@ -45,22 +45,19 @@ func (err InsufficientMemoryError) Error() string { // ScheduleVM decides which node to a ProxmoxMachine should be scheduled on. // It requires the machine's ProxmoxCluster to have at least 1 allowed node. -func ScheduleVM(ctx context.Context, machineScope *scope.MachineScope) (string, error) { +func ScheduleVM(ctx context.Context, + machineScope *scope.MachineScope, + templateMap map[string]int32, + allowedNodes []string, +) (string, int32, error) { client := machineScope.InfraCluster.ProxmoxClient - // Use the default allowed nodes from the ProxmoxCluster. - allowedNodes := machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes schedulerHints := machineScope.InfraCluster.ProxmoxCluster.Spec.SchedulerHints locations := machineScope.InfraCluster.ProxmoxCluster.Status.NodeLocations.Workers if util.IsControlPlaneMachine(machineScope.Machine) { locations = machineScope.InfraCluster.ProxmoxCluster.Status.NodeLocations.ControlPlane } - // If ProxmoxMachine defines allowedNodes use them instead - if len(machineScope.ProxmoxMachine.Spec.AllowedNodes) > 0 { - allowedNodes = machineScope.ProxmoxMachine.Spec.AllowedNodes - } - - return selectNode(ctx, client, machineScope.ProxmoxMachine, locations, allowedNodes, schedulerHints) + return selectNode(ctx, client, machineScope.ProxmoxMachine, locations, allowedNodes, schedulerHints, templateMap) } func selectNode( @@ -70,22 +67,22 @@ func selectNode( locations []infrav1.NodeLocation, allowedNodes []string, schedulerHints *infrav1.SchedulerHints, -) (string, error) { + templateMap map[string]int32, +) (string, int32, error) { byMemory := make(sortByAvailableMemory, len(allowedNodes)) for i, nodeName := range allowedNodes { mem, err := client.GetReservableMemoryBytes(ctx, nodeName, schedulerHints.GetMemoryAdjustment()) if err != nil { - return "", err + return "", 0, err } byMemory[i] = nodeInfo{Name: nodeName, AvailableMemory: mem} } - sort.Sort(byMemory) requestedMemory := uint64(machine.Spec.MemoryMiB) * 1024 * 1024 // convert to bytes if requestedMemory > byMemory[0].AvailableMemory { // no more space on the node with the highest amount of available memory - return "", InsufficientMemoryError{ + return "", 0, InsufficientMemoryError{ node: byMemory[0].Name, available: byMemory[0].AvailableMemory, requested: requestedMemory, @@ -123,8 +120,8 @@ func selectNode( "resultNode", decision, ) } - - return decision, nil + templateID := templateMap[decision] + return decision, templateID, nil } type resourceClient interface { diff --git a/internal/service/scheduler/vmscheduler_test.go b/internal/service/scheduler/vmscheduler_test.go index 3251dea1..93a816d6 100644 --- a/internal/service/scheduler/vmscheduler_test.go +++ b/internal/service/scheduler/vmscheduler_test.go @@ -49,6 +49,7 @@ func miBytes(in uint64) uint64 { func TestSelectNode(t *testing.T) { allowedNodes := []string{"pve1", "pve2", "pve3"} + templateMap := map[string]int32{} var locations []infrav1.NodeLocation const requestMiB = 8 availableMem := map[string]uint64{ @@ -74,7 +75,7 @@ func TestSelectNode(t *testing.T) { client := fakeResourceClient(availableMem) - node, err := selectNode(context.Background(), client, proxmoxMachine, locations, allowedNodes, &infrav1.SchedulerHints{}) + node, _, err := selectNode(context.Background(), client, proxmoxMachine, locations, allowedNodes, &infrav1.SchedulerHints{}, templateMap) require.NoError(t, err) require.Equal(t, expectedNode, node) @@ -94,7 +95,7 @@ func TestSelectNode(t *testing.T) { client := fakeResourceClient(availableMem) - node, err := selectNode(context.Background(), client, proxmoxMachine, locations, allowedNodes, &infrav1.SchedulerHints{}) + node, _, err := selectNode(context.Background(), client, proxmoxMachine, locations, allowedNodes, &infrav1.SchedulerHints{}, templateMap) require.ErrorAs(t, err, &InsufficientMemoryError{}) require.Empty(t, node) @@ -112,6 +113,8 @@ func TestScheduleVM(t *testing.T) { require.NotNil(t, ctrlClient) ipamHelper := &ipam.Helper{} + templateMap := map[string]int32{} + allowedNodes := []string{"pve1", "pve2", "pve3"} proxmoxCluster := infrav1.ProxmoxCluster{ ObjectMeta: metav1.ObjectMeta{ @@ -181,7 +184,7 @@ func TestScheduleVM(t *testing.T) { fakeProxmoxClient.EXPECT().GetReservableMemoryBytes(context.Background(), "pve2", uint64(100)).Return(miBytes(20), nil) fakeProxmoxClient.EXPECT().GetReservableMemoryBytes(context.Background(), "pve3", uint64(100)).Return(miBytes(20), nil) - node, err := ScheduleVM(context.Background(), machineScope) + node, _, err := ScheduleVM(context.Background(), machineScope, templateMap, allowedNodes) require.NoError(t, err) require.Equal(t, "pve2", node) } diff --git a/internal/service/vmservice/find_test.go b/internal/service/vmservice/find_test.go index 503909ff..8976a4e2 100644 --- a/internal/service/vmservice/find_test.go +++ b/internal/service/vmservice/find_test.go @@ -28,13 +28,19 @@ import ( infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" ) +const ( + firstNode = "node1" +) + func TestFindVM_FindByNodeAndID(t *testing.T) { ctx := context.TODO() machineScope, proxmoxClient, _ := setupReconcilerTest(t) vm := newRunningVM() machineScope.ProxmoxMachine.Spec.VirtualMachineID = ptr.To(int64(vm.VMID)) + // this is used in LocateProxmoxNode function cannot be empty. + machineScope.ProxmoxMachine.Spec.SourceNode = firstNode - proxmoxClient.EXPECT().GetVM(ctx, "node1", int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(ctx, firstNode, int64(123)).Return(vm, nil).Once() _, err := FindVM(ctx, machineScope) require.NoError(t, err) @@ -100,7 +106,7 @@ func TestUpdateVMLocation_MissingName(t *testing.T) { machineScope.ProxmoxMachine.Spec.VirtualMachineID = ptr.To(int64(vm.VMID)) proxmoxClient.EXPECT().FindVMResource(ctx, uint64(123)).Return(vmr, nil).Once() - proxmoxClient.EXPECT().GetVM(ctx, "node1", int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(ctx, firstNode, int64(123)).Return(vm, nil).Once() require.Error(t, updateVMLocation(ctx, machineScope)) } @@ -116,7 +122,7 @@ func TestUpdateVMLocation_NameMismatch(t *testing.T) { machineScope.ProxmoxMachine.Spec.VirtualMachineID = ptr.To(int64(vm.VMID)) proxmoxClient.EXPECT().FindVMResource(ctx, uint64(123)).Return(vmr, nil).Once() - proxmoxClient.EXPECT().GetVM(ctx, "node1", int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(ctx, firstNode, int64(123)).Return(vm, nil).Once() require.Error(t, updateVMLocation(ctx, machineScope)) require.True(t, machineScope.HasFailed(), "expected failureReason and failureMessage to be set") @@ -135,7 +141,7 @@ func TestUpdateVMLocation_UpdateNode(t *testing.T) { }, false) proxmoxClient.EXPECT().FindVMResource(ctx, uint64(123)).Return(vmr, nil).Once() - proxmoxClient.EXPECT().GetVM(ctx, "node1", int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(ctx, firstNode, int64(123)).Return(vm, nil).Once() require.NoError(t, updateVMLocation(ctx, machineScope)) require.Equal(t, vmr.Node, *machineScope.ProxmoxMachine.Status.ProxmoxNode) @@ -164,7 +170,7 @@ func TestUpdateVMLocation_WithoutTaskNameMismatch(t *testing.T) { machineScope.ProxmoxMachine.Status.TaskRef = nil proxmoxClient.EXPECT().FindVMResource(ctx, uint64(123)).Return(vmr, nil).Once() - proxmoxClient.EXPECT().GetVM(ctx, "node1", int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(ctx, firstNode, int64(123)).Return(vm, nil).Once() require.Error(t, updateVMLocation(ctx, machineScope)) require.True(t, machineScope.HasFailed(), "expected failureReason and failureMessage to be set") diff --git a/internal/service/vmservice/helpers_test.go b/internal/service/vmservice/helpers_test.go index 034d596f..3d7a5271 100644 --- a/internal/service/vmservice/helpers_test.go +++ b/internal/service/vmservice/helpers_test.go @@ -107,14 +107,6 @@ func setupReconcilerTest(t *testing.T) (*scope.MachineScope, *proxmoxtest.MockCl infrav1alpha1.MachineFinalizer, }, }, - Spec: infrav1alpha1.ProxmoxMachineSpec{ - VirtualMachineCloneSpec: infrav1alpha1.VirtualMachineCloneSpec{ - TemplateSource: infrav1alpha1.TemplateSource{ - SourceNode: "node1", - TemplateID: ptr.To[int32](123), - }, - }, - }, } scheme := runtime.NewScheme() diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index 2d97768e..e4f1f433 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -346,7 +346,8 @@ func getMachineAddresses(scope *scope.MachineScope) ([]clusterv1.MachineAddress, return addresses, nil } -func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneResponse, error) { +// TODO: split function to avoid gocyclo. +func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneResponse, error) { //nolint: gocyclo vmid, err := getVMID(ctx, scope) if err != nil { if errors.Is(err, ErrNoVMIDInRangeFree) { @@ -392,15 +393,52 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe scope.InfraCluster.ProxmoxCluster.Status.NodeLocations = new(infrav1alpha1.NodeLocations) } - templateID := scope.ProxmoxMachine.GetTemplateID() - var templateMap = make(map[string]int32) + // check if localstorage is set + localStorage := scope.ProxmoxMachine.GetLocalStorage() + // we fail if localstorage is set and any of target/templateid/sourcenode is set as we cannot use those in case of localstorage + if localStorage && + (scope.ProxmoxMachine.Spec.Target != nil || scope.ProxmoxMachine.Spec.TemplateID != nil || scope.ProxmoxMachine.Spec.SourceNode != "") { + err := goproxmox.ErrWrongLocalStorageConfig + scope.SetFailureMessage(err) + conditions.MarkFalse(scope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.VMProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err) + return proxmox.VMCloneResponse{}, err + } + + allowedNodes := scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes + // if ProxmoxMachine defines allowedNodes use them instead + if len(scope.ProxmoxMachine.Spec.AllowedNodes) > 0 { + allowedNodes = scope.ProxmoxMachine.Spec.AllowedNodes + } + + if len(allowedNodes) == 0 && scope.ProxmoxMachine.Spec.Target != nil { + allowedNodes = append(allowedNodes, *scope.ProxmoxMachine.Spec.Target) + } + + // if we do not have a target node or allowedNodes is empty, we need to get the nodes from the cluster + if len(allowedNodes) == 0 && scope.ProxmoxMachine.Spec.Target == nil { + // We have no Node list to try to schedule on. Let's try to use all nodes in cluster. + allowedNodes, err = scope.InfraCluster.ProxmoxClient.GetAllNodeNames(ctx) + if err != nil { + scope.SetFailureMessage(err) + scope.SetFailureReason(capierrors.InsufficientResourcesMachineError) + return proxmox.VMCloneResponse{}, err + } + } + // we first try to find templates, so we can use those during scheduling + var templateID int32 - // if templateID is -1 then we assume that templateSelector is used and we need to search templates by tags - if templateID == -1 { - var err error + // return template if it's .spec.TemplateID and .spec.SourceNode + templateMap := scope.ProxmoxMachine.GetTemplateMap() + // and only if templateMap is nil (which means TemplateSelector should be used) + if templateMap == nil { + // get templateSelectorTags from the spec templateSelectorTags := scope.ProxmoxMachine.GetTemplateSelectorTags() - templateMap, err = scope.InfraCluster.ProxmoxClient.FindVMTemplatesByTags(ctx, templateSelectorTags) + + // find templates based on tags and allowed nodes + templateMap, err = scope.InfraCluster.ProxmoxClient.FindVMTemplatesByTags(ctx, templateSelectorTags, allowedNodes, localStorage) + + // fail if templates are not found or multiple templates found if localstorage is false if err != nil { scope.SetFailureMessage(err) if errors.Is(err, goproxmox.ErrTemplateNotFound) { @@ -413,39 +451,34 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe } } - // if no target was specified but we have a set of nodes defined in the spec, we want to evenly distribute - // the nodes across the cluster. - if scope.ProxmoxMachine.Spec.Target == nil && - (len(scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes) > 0 || len(scope.ProxmoxMachine.Spec.AllowedNodes) > 0) { - var err error - // if template is on one node and allowedNodes has more then one node - set target to that node - if len(templateMap) == 1 && - (len(scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes) > 1 || len(scope.ProxmoxMachine.Spec.AllowedNodes) > 1) { - options.Target, templateID = func(m map[string]int32) (string, int32) { - for k, v := range m { - return k, v - } - return "", 0 // Fallback (should not happen if len(m) == 1) - }(templateMap) - } else { - // select next node as a target - options.Target, err = selectNextNode(ctx, scope) - } - // if we have template on that node, set Node and templateID - if err != nil { - if errors.As(err, &scheduler.InsufficientMemoryError{}) { - scope.SetFailureMessage(err) - scope.SetFailureReason(capierrors.InsufficientResourcesMachineError) - } - return proxmox.VMCloneResponse{}, err + // when we got templateMap and allowedNodes we do node scheduling + options.Target, templateID, err = selectNextNode(ctx, scope, templateMap, allowedNodes) + if err != nil { + if errors.As(err, &scheduler.InsufficientMemoryError{}) { + scope.SetFailureMessage(err) + scope.SetFailureReason(capierrors.InsufficientResourcesMachineError) } + return proxmox.VMCloneResponse{}, err } - // set template ID on selected node, and set options.Target to same node. - if val, ok := templateMap[options.Target]; ok { + + // if localStorage use options.Target as options.Node + if localStorage { options.Node = options.Target - templateID = val + } + // if there is no options.Node, and templateMap has only one entry, + // we set the options.Node and templateID to the only entry in templateMap + if options.Node == "" { + for i, k := range templateMap { + options.Node = i + templateID = k + break + } + } + if templateID == 0 { + templateID = templateMap[options.Node] } + // at last clone machine res, err := scope.InfraCluster.ProxmoxClient.CloneVM(ctx, int(templateID), options) if err != nil { return res, err diff --git a/internal/service/vmservice/vm_test.go b/internal/service/vmservice/vm_test.go index 2450730f..bd2bcd5c 100644 --- a/internal/service/vmservice/vm_test.go +++ b/internal/service/vmservice/vm_test.go @@ -39,11 +39,12 @@ func TestReconcileVM_EverythingReady(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) vm := newRunningVM() machineScope.SetVirtualMachineID(int64(vm.VMID)) + machineScope.ProxmoxMachine.Spec.SourceNode = firstNode machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true - proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(vm, nil).Once() proxmoxClient.EXPECT().CloudInitStatus(context.Background(), vm).Return(false, nil).Once() proxmoxClient.EXPECT().QemuAgentStatus(context.Background(), vm).Return(nil).Once() @@ -60,11 +61,13 @@ func TestReconcileVM_QemuAgentCheckDisabled(t *testing.T) { machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true + + machineScope.ProxmoxMachine.Spec.SourceNode = firstNode machineScope.ProxmoxMachine.Spec.Checks = &infrav1alpha1.ProxmoxMachineChecks{ SkipQemuGuestAgent: ptr.To(true), } - proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(vm, nil).Once() // proxmoxClient.EXPECT().CloudInitStatus(context.Background(), vm).Return(false, nil).Once() result, err := ReconcileVM(context.Background(), machineScope) @@ -80,11 +83,12 @@ func TestReconcileVM_CloudInitCheckDisabled(t *testing.T) { machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true + machineScope.ProxmoxMachine.Spec.SourceNode = firstNode machineScope.ProxmoxMachine.Spec.Checks = &infrav1alpha1.ProxmoxMachineChecks{ SkipCloudInitStatus: ptr.To(true), } - proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(vm, nil).Once() proxmoxClient.EXPECT().QemuAgentStatus(context.Background(), vm).Return(nil) result, err := ReconcileVM(context.Background(), machineScope) @@ -100,20 +104,42 @@ func TestReconcileVM_InitCheckDisabled(t *testing.T) { machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true + machineScope.ProxmoxMachine.Spec.SourceNode = firstNode machineScope.ProxmoxMachine.Spec.Checks = &infrav1alpha1.ProxmoxMachineChecks{ SkipCloudInitStatus: ptr.To(true), SkipQemuGuestAgent: ptr.To(true), } - proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(vm, nil).Once() result, err := ReconcileVM(context.Background(), machineScope) require.NoError(t, err) require.Equal(t, infrav1alpha1.VirtualMachineStateReady, result.State) require.Equal(t, "10.10.10.10", machineScope.ProxmoxMachine.Status.Addresses[1].Address) } +func TestTargetNotSetWithLocalStorage(t *testing.T) { + ctx := context.Background() + localStorage := true + + machineScope, _, _ := setupReconcilerTest(t) + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(localStorage) + machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") + machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) + machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) + machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") + machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") + machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") + machineScope.ProxmoxMachine.Spec.Target = ptr.To("node2") + + _, err := createVM(ctx, machineScope) + + require.Equal(t, ptr.To("Localstorage does not allow usage of target/templateid/sourcenode"), machineScope.ProxmoxMachine.Status.FailureMessage) + require.Error(t, err) + require.Contains(t, "Localstorage does not allow usage of target/templateid/sourcenode", err.Error()) +} func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { + ctx := context.Background() machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) @@ -122,8 +148,10 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") machineScope.ProxmoxMachine.Spec.Target = ptr.To("node2") + machineScope.ProxmoxMachine.Spec.TemplateID = ptr.To(int32(123)) + machineScope.ProxmoxMachine.Spec.SourceNode = firstNode expectedOptions := proxmox.VMCloneRequest{ - Node: "node1", + Node: firstNode, Name: "test", Description: "test vm", Format: "raw", @@ -133,10 +161,17 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { Storage: "storage", Target: "node2", } - response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()} - proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() - requeue, err := ensureVirtualMachine(context.Background(), machineScope) + response := proxmox.VMCloneResponse{ + NewID: 123, + Task: newTask(), + } + + proxmoxClient.EXPECT().CloneVM(ctx, 123, expectedOptions).Return(response, nil).Once() + + proxmoxClient.EXPECT().GetReservableMemoryBytes(ctx, "node2", uint64(100)).Return(uint64(5000), nil).Once() + + requeue, err := ensureVirtualMachine(ctx, machineScope) require.NoError(t, err) require.True(t, requeue) @@ -145,26 +180,32 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) } -func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector(t *testing.T) { +func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_SharedStorage(t *testing.T) { + ctx := context.Background() vmTemplateTags := []string{"foo", "bar"} + allowedNodes := []string{firstNode, "node2"} machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ TemplateSource: infrav1alpha1.TemplateSource{ + LocalStorage: ptr.To(true), TemplateSelector: &infrav1alpha1.TemplateSelector{ MatchTags: vmTemplateTags, }, }, } + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(false) machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") - machineScope.ProxmoxMachine.Spec.Target = ptr.To("node2") + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(true) + // we do not set allowedNodes from spec, we use node discovery + // machineScope.ProxmoxMachine.Spec.AllowedNodes = allowedNodes expectedOptions := proxmox.VMCloneRequest{ - Node: "node2", + Node: firstNode, Name: "test", Description: "test vm", Format: "raw", @@ -172,19 +213,105 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector(t *testing.T Pool: "pool", SnapName: "snap", Storage: "storage", - Target: "node2", + Target: firstNode, } - proxmoxClient.EXPECT().FindVMTemplatesByTags(context.Background(), vmTemplateTags).Return(map[string]int32{"node2": int32(123)}, nil).Once() + proxmoxClient.EXPECT(). + GetAllNodeNames(ctx). + Return(allowedNodes, nil). + Once() + + proxmoxClient.EXPECT(). + GetReservableMemoryBytes(ctx, firstNode, uint64(100)). + Return(uint64(5000), nil). + Once() + + proxmoxClient.EXPECT(). + GetReservableMemoryBytes(ctx, "node2", uint64(100)). + Return(uint64(5000), nil). + Once() + + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(ctx, vmTemplateTags, allowedNodes, true). + Return(map[string]int32{firstNode: int32(123), "node2": int32(124)}, nil). + Once() response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()} - proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() + proxmoxClient.EXPECT(). + CloneVM(ctx, 123, expectedOptions). + Return(response, nil). + Once() - requeue, err := ensureVirtualMachine(context.Background(), machineScope) + requeue, err := ensureVirtualMachine(ctx, machineScope) require.NoError(t, err) require.True(t, requeue) - require.Equal(t, "node2", *machineScope.ProxmoxMachine.Status.ProxmoxNode) + require.Equal(t, firstNode, *machineScope.ProxmoxMachine.Status.ProxmoxNode) + require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) +} + +func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_LocalStorage(t *testing.T) { + ctx := context.Background() + vmTemplateTags := []string{"foo", "bar"} + allowedNodes := []string{firstNode, "node2"} + + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + LocalStorage: ptr.To(false), + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: vmTemplateTags, + }, + }, + } + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(false) + machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") + machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) + machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) + machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") + machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") + machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(false) + machineScope.ProxmoxMachine.Spec.AllowedNodes = allowedNodes + expectedOptions := proxmox.VMCloneRequest{ + Node: firstNode, + Name: "test", + Description: "test vm", + Format: "raw", + Full: 1, + Pool: "pool", + SnapName: "snap", + Storage: "storage", + Target: firstNode, + } + + proxmoxClient.EXPECT(). + GetReservableMemoryBytes(ctx, firstNode, uint64(100)). + Return(uint64(5000), nil). + Once() + + proxmoxClient.EXPECT(). + GetReservableMemoryBytes(ctx, "node2", uint64(100)). + Return(uint64(5000), nil). + Once() + + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(ctx, vmTemplateTags, allowedNodes, false). + Return(map[string]int32{firstNode: int32(123), "node2": int32(124)}, nil). + Once() + + response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()} + proxmoxClient.EXPECT(). + CloneVM(ctx, 123, expectedOptions). + Return(response, nil). + Once() + + requeue, err := ensureVirtualMachine(ctx, machineScope) + require.NoError(t, err) + require.True(t, requeue) + + require.Equal(t, firstNode, *machineScope.ProxmoxMachine.Status.ProxmoxNode) require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) } @@ -192,6 +319,8 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector(t *testing.T func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_VMTemplateNotFound(t *testing.T) { ctx := context.Background() vmTemplateTags := []string{"foo", "bar"} + localStorage := true + allowedNodes := []string{firstNode, "node2"} machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ @@ -201,15 +330,16 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_VMTemplateNo }, }, } + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(true) machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") - machineScope.ProxmoxMachine.Spec.Target = ptr.To("node2") + machineScope.ProxmoxMachine.Spec.AllowedNodes = allowedNodes - proxmoxClient.EXPECT().FindVMTemplatesByTags(context.Background(), vmTemplateTags).Return(map[string]int32{"": -1}, goproxmox.ErrTemplateNotFound).Once() + proxmoxClient.EXPECT().FindVMTemplatesByTags(context.Background(), vmTemplateTags, allowedNodes, localStorage).Return(map[string]int32{}, goproxmox.ErrTemplateNotFound).Once() _, err := createVM(ctx, machineScope) @@ -219,18 +349,47 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_VMTemplateNo require.Contains(t, "VM template not found", err.Error()) } +// localstorage false. func TestEnsureVirtualMachine_CreateVM_SelectNode(t *testing.T) { + vmTemplateTags := []string{"foo", "bar"} + localStorage := false + allowedNodes := []string{"node3"} + machineScope, proxmoxClient, _ := setupReconcilerTest(t) - machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = []string{"node1", "node2", "node3"} + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: vmTemplateTags, + }, + }, + } + machineScope.ProxmoxMachine.Spec.AllowedNodes = allowedNodes + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(localStorage) + + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(context.Background(), vmTemplateTags, allowedNodes, localStorage). + Return(map[string]int32{firstNode: 123}, nil). + Once() - selectNextNode = func(context.Context, *scope.MachineScope) (string, error) { - return "node3", nil + selectNextNode = func(context.Context, *scope.MachineScope, map[string]int32, []string) (string, int32, error) { + return "node3", 0, nil } t.Cleanup(func() { selectNextNode = scheduler.ScheduleVM }) - expectedOptions := proxmox.VMCloneRequest{Node: "node1", Name: "test", Target: "node3"} - response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()} - proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() + expectedOptions := proxmox.VMCloneRequest{ + Node: firstNode, + Name: "test", + Target: "node3", + } + + response := proxmox.VMCloneResponse{ + NewID: 123, + Task: newTask(), + } + proxmoxClient.EXPECT(). + CloneVM(context.Background(), 123, expectedOptions). + Return(response, nil). + Once() requeue, err := ensureVirtualMachine(context.Background(), machineScope) require.NoError(t, err) @@ -241,17 +400,78 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode(t *testing.T) { requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) } -func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes(t *testing.T) { +func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes_SharedStorage(t *testing.T) { + clusterAllowedNodes := []string{firstNode, "node2", "node3", "node4"} + proxmoxMachineAllowedNodes := []string{firstNode, "node2"} + + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = clusterAllowedNodes + machineScope.ProxmoxMachine.Spec.AllowedNodes = proxmoxMachineAllowedNodes + // we need to search for templates, as we cannot do node scheduling without them + vmTemplateTags := []string{"foo", "bar"} + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(false) + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + LocalStorage: ptr.To(false), + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: vmTemplateTags, + }, + }, + } + templateMap := map[string]int32{firstNode: int32(122)} + + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(context.Background(), vmTemplateTags, proxmoxMachineAllowedNodes, false). + Return(templateMap, nil). + Once() + + selectNextNode = func(context.Context, *scope.MachineScope, map[string]int32, []string) (string, int32, error) { + return "node2", 123, nil + } + t.Cleanup(func() { selectNextNode = scheduler.ScheduleVM }) + + expectedOptions := proxmox.VMCloneRequest{Node: firstNode, Name: "test", Target: "node2"} + response := proxmox.VMCloneResponse{NewID: 122, Task: newTask()} + proxmoxClient.EXPECT().CloneVM(context.Background(), 122, expectedOptions).Return(response, nil).Once() + + requeue, err := ensureVirtualMachine(context.Background(), machineScope) + require.NoError(t, err) + require.True(t, requeue) + + require.Contains(t, []string{"node2", firstNode}, *machineScope.ProxmoxMachine.Status.ProxmoxNode) + require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) +} + +func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes_LocalStorage(t *testing.T) { + clusterAllowedNodes := []string{firstNode, "node2", "node3", "node4"} + proxmoxMachineAllowedNodes := []string{firstNode, "node2"} + vmTemplateTags := []string{"foo", "bar"} + machineScope, proxmoxClient, _ := setupReconcilerTest(t) - machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = []string{"node1", "node2", "node3", "node4"} - machineScope.ProxmoxMachine.Spec.AllowedNodes = []string{"node1", "node2"} + machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = clusterAllowedNodes + machineScope.ProxmoxMachine.Spec.AllowedNodes = proxmoxMachineAllowedNodes + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(true) + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + LocalStorage: ptr.To(true), + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: vmTemplateTags, + }, + }, + } - selectNextNode = func(context.Context, *scope.MachineScope) (string, error) { - return "node2", nil + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(context.Background(), vmTemplateTags, proxmoxMachineAllowedNodes, true). + Return(map[string]int32{firstNode: int32(122), "node2": int32(123)}, nil). + Once() + + selectNextNode = func(context.Context, *scope.MachineScope, map[string]int32, []string) (string, int32, error) { + return "node2", 123, nil } t.Cleanup(func() { selectNextNode = scheduler.ScheduleVM }) - expectedOptions := proxmox.VMCloneRequest{Node: "node1", Name: "test", Target: "node2"} + expectedOptions := proxmox.VMCloneRequest{Node: "node2", Name: "test", Target: "node2"} response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()} proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() @@ -265,11 +485,24 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes(t *testing } func TestEnsureVirtualMachine_CreateVM_SelectNode_InsufficientMemory(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) - machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = []string{"node1"} + allowedNodes := []string{firstNode} + vmTemplateTags := []string{"foo"} + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = allowedNodes + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: vmTemplateTags, + }, + }, + } + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(context.Background(), vmTemplateTags, allowedNodes, false). + Return(map[string]int32{firstNode: int32(122)}, nil). + Once() - selectNextNode = func(context.Context, *scope.MachineScope) (string, error) { - return "", fmt.Errorf("error: %w", scheduler.InsufficientMemoryError{}) + selectNextNode = func(context.Context, *scope.MachineScope, map[string]int32, []string) (string, int32, error) { + return "", 0, fmt.Errorf("error: %w", scheduler.InsufficientMemoryError{}) } t.Cleanup(func() { selectNextNode = scheduler.ScheduleVM }) @@ -282,16 +515,37 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_InsufficientMemory(t *testing. } func TestEnsureVirtualMachine_CreateVM_VMIDRange(t *testing.T) { + vmTemplateTags := []string{"foo"} + allowedNodes := []string{firstNode} machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = allowedNodes + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: vmTemplateTags, + }, + }, + } machineScope.ProxmoxMachine.Spec.VMIDRange = &infrav1alpha1.VMIDRange{ Start: 1000, End: 1002, } - expectedOptions := proxmox.VMCloneRequest{Node: "node1", NewID: 1001, Name: "test"} + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(context.Background(), vmTemplateTags, allowedNodes, false). + Return(map[string]int32{firstNode: int32(123)}, nil). + Once() + + proxmoxClient.EXPECT(). + GetReservableMemoryBytes(context.Background(), firstNode, uint64(100)). + Return(uint64(5000), nil). + Once() + + expectedOptions := proxmox.VMCloneRequest{Node: firstNode, NewID: 1001, Name: "test", Target: firstNode} response := proxmox.VMCloneResponse{Task: newTask(), NewID: int64(1001)} proxmoxClient.Mock.On("CheckID", context.Background(), int64(1000)).Return(false, nil) proxmoxClient.Mock.On("CheckID", context.Background(), int64(1001)).Return(true, nil) + proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() requeue, err := ensureVirtualMachine(context.Background(), machineScope) @@ -342,6 +596,13 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRangeCheckExisting(t *testing.T) { VirtualMachineID: ptr.To(int64(1000)), }, } + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: []string{"foo"}, + }, + }, + } machine := clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "vm1000", @@ -367,7 +628,22 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRangeCheckExisting(t *testing.T) { _, err = ensureVirtualMachine(context.Background(), machineScopeVMThousand) require.NoError(t, err) - expectedOptions := proxmox.VMCloneRequest{Node: "node1", NewID: 1002, Name: "test"} + proxmoxClient.EXPECT(). + GetAllNodeNames(context.Background()). + Return([]string{firstNode}, nil). + Once() + + proxmoxClient.EXPECT(). + FindVMTemplatesByTags(context.Background(), []string{"foo"}, []string{firstNode}, false). + Return(map[string]int32{firstNode: int32(123)}, nil). + Once() + + proxmoxClient.EXPECT(). + GetReservableMemoryBytes(context.Background(), firstNode, uint64(100)). + Return(uint64(5000), nil). + Once() + + expectedOptions := proxmox.VMCloneRequest{Node: firstNode, NewID: 1002, Name: "test", Target: firstNode} response := proxmox.VMCloneResponse{Task: newTask(), NewID: int64(1002)} proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() proxmoxClient.Mock.On("CheckID", context.Background(), int64(1001)).Return(false, nil).Once() @@ -382,10 +658,17 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRangeCheckExisting(t *testing.T) { func TestEnsureVirtualMachine_FindVM(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.SetVirtualMachineID(123) + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + SourceNode: firstNode, + TemplateID: ptr.To[int32](123), + }, + } + vm := newStoppedVM() vm.VirtualMachineConfig.SMBios1 = "uuid=56603c36-46b9-4608-90ae-c731c15eae64" - proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(vm, nil).Once() requeue, err := ensureVirtualMachine(context.Background(), machineScope) require.NoError(t, err) @@ -398,8 +681,13 @@ func TestEnsureVirtualMachine_FindVM(t *testing.T) { func TestEnsureVirtualMachine_UpdateVMLocation_Error(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.SetVirtualMachineID(123) - - proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(nil, fmt.Errorf("not found")).Once() + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + SourceNode: firstNode, + TemplateID: ptr.To[int32](123), + }, + } + proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(nil, fmt.Errorf("not found")).Once() proxmoxClient.EXPECT().FindVMResource(context.Background(), uint64(123)).Return(nil, fmt.Errorf("unavailalbe")).Once() _, err := ensureVirtualMachine(context.Background(), machineScope) @@ -622,8 +910,14 @@ func TestReconcileVM_CloudInitFailed(t *testing.T) { machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + SourceNode: firstNode, + TemplateID: ptr.To[int32](123), + }, + } - proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(vm, nil).Once() proxmoxClient.EXPECT().CloudInitStatus(context.Background(), vm).Return(false, goproxmox.ErrCloudInitFailed).Once() proxmoxClient.EXPECT().QemuAgentStatus(context.Background(), vm).Return(nil).Once() @@ -640,8 +934,14 @@ func TestReconcileVM_CloudInitRunning(t *testing.T) { machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + SourceNode: firstNode, + TemplateID: ptr.To[int32](123), + }, + } - proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(vm, nil).Once() proxmoxClient.EXPECT().CloudInitStatus(context.Background(), vm).Return(true, nil).Once() proxmoxClient.EXPECT().QemuAgentStatus(context.Background(), vm).Return(nil).Once() diff --git a/pkg/proxmox/client.go b/pkg/proxmox/client.go index 271804e6..63a882b8 100644 --- a/pkg/proxmox/client.go +++ b/pkg/proxmox/client.go @@ -30,7 +30,8 @@ type Client interface { ConfigureVM(ctx context.Context, vm *proxmox.VirtualMachine, options ...VirtualMachineOption) (*proxmox.Task, error) FindVMResource(ctx context.Context, vmID uint64) (*proxmox.ClusterResource, error) - FindVMTemplatesByTags(ctx context.Context, templateTags []string) (map[string]int32, error) + + FindVMTemplatesByTags(ctx context.Context, templateTags []string, allowedNodes []string, localStorage bool) (map[string]int32, error) CheckID(ctx context.Context, vmID int64) (bool, error) @@ -38,6 +39,8 @@ type Client interface { DeleteVM(ctx context.Context, nodeName string, vmID int64) (*proxmox.Task, error) + GetAllNodeNames(ctx context.Context) ([]string, error) + GetTask(ctx context.Context, upID string) (*proxmox.Task, error) GetReservableMemoryBytes(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64) (uint64, error) diff --git a/pkg/proxmox/goproxmox/api_client.go b/pkg/proxmox/goproxmox/api_client.go index 9dba8436..5d5a99b1 100644 --- a/pkg/proxmox/goproxmox/api_client.go +++ b/pkg/proxmox/goproxmox/api_client.go @@ -142,10 +142,18 @@ func (c *APIClient) FindVMResource(ctx context.Context, vmID uint64) (*proxmox.C return nil, fmt.Errorf("unable to find VM with ID %d on any of the nodes", vmID) } +func isNotInAllowedNodes(allowedNodes []string, node string) bool { + return !slices.Contains(allowedNodes, node) +} + // FindVMTemplatesByTags finds VM templates by tags across the whole cluster and ensures only one template per node. -func (c *APIClient) FindVMTemplatesByTags(ctx context.Context, templateTags []string) (map[string]int32, error) { +func (c *APIClient) FindVMTemplatesByTags(ctx context.Context, templateTags []string, allowedNodes []string, localStorage bool) (map[string]int32, error) { templates := make(map[string]int32) + // if for some reason there is not tags, we fail early and return error + if len(templateTags) == 0 { + return nil, fmt.Errorf("%w: no template tags defined", ErrTemplateNotFound) + } sortedTags := make([]string, len(templateTags)) for i, tag := range templateTags { // Proxmox VM tags are always lowercase @@ -172,7 +180,13 @@ func (c *APIClient) FindVMTemplatesByTags(ctx context.Context, templateTags []st vmTags := strings.Split(vm.Tags, ";") slices.Sort(vmTags) + // if localstorage template should be on all allowed nodes + if localStorage && isNotInAllowedNodes(allowedNodes, vm.Node) { + continue + } + if slices.Equal(vmTags, uniqueTags) { + // check if we have multiple templates per node if _, exists := templates[vm.Node]; exists { return nil, fmt.Errorf("%w: multiple VM templates found on node %q with tags %q", ErrMultipleTemplatesFound, vm.Node, strings.Join(templateTags, ";")) } @@ -180,8 +194,14 @@ func (c *APIClient) FindVMTemplatesByTags(ctx context.Context, templateTags []st } } - if len(templates) == 0 { - return nil, fmt.Errorf("%w: no VM templates found with tags %q", ErrTemplateNotFound, strings.Join(templateTags, ";")) + if (len(templates) != len(allowedNodes)) && localStorage { + return nil, fmt.Errorf("found %d templates on allowedNodes %q with tags %q", len(templates), strings.Join(allowedNodes, ","), strings.Join(templateTags, ";")) + } + + if !localStorage { + if n := len(templates); n != 1 { + return nil, fmt.Errorf("%s: found %d VM templates with tags %q", "ErrTemplateNotFound", n, strings.Join(templateTags, ";")) + } } return templates, nil @@ -230,6 +250,32 @@ func (c *APIClient) DeleteVM(ctx context.Context, nodeName string, vmID int64) ( return task, nil } +// GetAllNodeNames get all nodes in cluster against which we are authenticated. +func (c *APIClient) GetAllNodeNames(ctx context.Context) ([]string, error) { + cluster, err := c.Client.Cluster(ctx) + if err != nil { + return nil, fmt.Errorf("cannot get cluster status: %w", err) + } + err = cluster.Status(ctx) + if err != nil { + return nil, fmt.Errorf("cannot get cluster status: %w", err) + } + var nodes []string + for _, node := range cluster.Nodes { + nodes = appendIfMissing(nodes, node.Name) + } + + return nodes, nil +} + +// appendIfMissing make sure we add only uniq items to the slice. +func appendIfMissing(slice []string, item string) []string { + if slices.Contains(slice, item) { + return slice + } + return append(slice, item) +} + // CheckID checks if the vmid is available on the cluster. // Returns true if the vmid is available, false if it is taken. func (c *APIClient) CheckID(ctx context.Context, vmid int64) (bool, error) { diff --git a/pkg/proxmox/goproxmox/api_client_test.go b/pkg/proxmox/goproxmox/api_client_test.go index db75c3b8..341fe46b 100644 --- a/pkg/proxmox/goproxmox/api_client_test.go +++ b/pkg/proxmox/goproxmox/api_client_test.go @@ -373,15 +373,28 @@ func TestProxmoxAPIClient_FindVMResource(t *testing.T) { func TestProxmoxAPIClient_FindVMTemplatesByTags(t *testing.T) { proxmoxClusterResources := proxmox.ClusterResources{ - &proxmox.ClusterResource{VMID: 101, Name: "k8s-node01", Node: "capmox01", Tags: ""}, - &proxmox.ClusterResource{VMID: 102, Name: "k8s-node02", Node: "capmox02", Tags: ""}, - &proxmox.ClusterResource{VMID: 150, Name: "template-without-tags", Node: "capmox01", Tags: "", Template: uint64(1)}, - &proxmox.ClusterResource{VMID: 201, Name: "ubuntu-22.04-k8s-v1.28.3", Node: "capmox01", Tags: "template;capmox;v1.28.3", Template: uint64(1)}, - &proxmox.ClusterResource{VMID: 202, Name: "ubuntu-22.04-k8s-v1.30.2", Node: "capmox02", Tags: "capmox;template;v1.30.2", Template: uint64(1)}, - &proxmox.ClusterResource{VMID: 301, Name: "ubuntu-22.04-k8s-v1.29.2", Node: "capmox02", Tags: "capmox;template;v1.29.2", Template: uint64(1)}, - &proxmox.ClusterResource{VMID: 302, Name: "ubuntu-22.04-k8s-v1.29.2", Node: "capmox02", Tags: "capmox;template;v1.29.2", Template: uint64(1)}, - &proxmox.ClusterResource{VMID: 401, Name: "ubuntu-v1.28.3-1", Node: "capmox01", Tags: "ubuntu;capmox;v1.28.3", Template: uint64(1)}, - &proxmox.ClusterResource{VMID: 402, Name: "ubuntu-v1.28.3-2", Node: "capmox02", Tags: "ubuntu;capmox;v1.28.3", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 101, Name: "template01", Node: "capmox01", Tags: ""}, + &proxmox.ClusterResource{VMID: 102, Name: "template02", Node: "capmox02", Tags: ""}, + + &proxmox.ClusterResource{VMID: 302, Name: "template-shared-storage", Node: "capmox01", Tags: "template-shared-storage", Template: uint64(1)}, + + &proxmox.ClusterResource{VMID: 303, Name: "template-local-storage-other-node", Node: "capmox01", Tags: "template-local-storage-other-node", Template: uint64(1)}, + + &proxmox.ClusterResource{VMID: 202, Name: "template-local-storage", Node: "capmox01", Tags: "template-local-storage", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 201, Name: "template-local-storage", Node: "capmox02", Tags: "template-local-storage", Template: uint64(1)}, + + &proxmox.ClusterResource{VMID: 401, Name: "template-duplicate", Node: "capmox01", Tags: "template-duplicate", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 402, Name: "template-duplicate", Node: "capmox01", Tags: "template-duplicate", Template: uint64(1)}, + + &proxmox.ClusterResource{VMID: 403, Name: "template-duplicate-same-node", Node: "capmox01", Tags: "template-duplicate;same-node", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 404, Name: "template-duplicate-same-node", Node: "capmox01", Tags: "template-duplicate;same-node", Template: uint64(1)}, + + &proxmox.ClusterResource{VMID: 503, Name: "template-duplicate-multiple-nodes", Node: "capmox01", Tags: "template-duplicate;multiple-nodes", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 504, Name: "template-duplicate-multiple-nodes", Node: "capmox01", Tags: "template-duplicate;multiple-nodes", Template: uint64(1)}, + + &proxmox.ClusterResource{VMID: 603, Name: "template-node1", Node: "capmox01", Tags: "template-extra-node", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 604, Name: "template-node2", Node: "capmox02", Tags: "template-extra-node", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 605, Name: "template-node3", Node: "capmox03", Tags: "template-extra-node", Template: uint64(1)}, } tests := []struct { name string @@ -390,83 +403,141 @@ func TestProxmoxAPIClient_FindVMTemplatesByTags(t *testing.T) { fails bool err string vmTemplateNode map[string]int32 + localStorage bool + allowedNodes []string }{ { - name: "clusterstatus broken", + // if tags are not defined we fail + name: "no tags defined", http: []int{500, 200}, fails: true, - err: "cannot get cluster status: 500", + err: "VM template not found: no template tags defined", }, { - name: "resourcelisting broken", - http: []int{200, 500}, - fails: true, - err: "could not list VM resources: 500", + name: "clusterstatus broken", + http: []int{500, 200}, + fails: true, + err: "cannot get cluster status: 500", + vmTags: []string{"template", "capmox", "v1.28.3"}, }, { - name: "find-template", + name: "resourcelisting broken", + http: []int{200, 500}, + fails: true, + err: "could not list VM resources: 500", + vmTags: []string{"template", "capmox", "v1.28.3"}, + }, + { + // if shared template we can use template for one node. + name: "find-template-shared-localstorage-false", + http: []int{200, 200}, + vmTags: []string{"template-shared-storage"}, + fails: false, + err: "", + vmTemplateNode: map[string]int32{"capmox01": 302}, + localStorage: false, + allowedNodes: []string{"capmox01"}, + }, + { + // if shared template and localstorage + name: "find-template-shared-localstorage-true", http: []int{200, 200}, - vmTags: []string{"template", "capmox", "v1.28.3"}, + vmTags: []string{"template-shared-storage"}, fails: false, err: "", - vmTemplateNode: map[string]int32{"capmox01": 201}, + vmTemplateNode: map[string]int32{"capmox01": 302}, + localStorage: true, + allowedNodes: []string{"capmox01"}, }, { - name: "find-template-nil", + name: "find-template-localstorage-true-not-allowed-node", http: []int{200, 200}, - vmTags: nil, + vmTags: []string{"template-local-storage-other-node"}, fails: true, - err: "VM template not found: no VM templates found with tags \"\"", - vmTemplateNode: map[string]int32{"capmox01": 201}, + err: "found 0 templates on allowedNodes \"capmox02\" with tags \"template-local-storage-other-node\"", + vmTemplateNode: map[string]int32{"capmox01": 303}, + localStorage: true, + allowedNodes: []string{"capmox02"}, }, { - // Proxmox VM tags are always lowercase - name: "find-template-uppercase", + name: "find-template-localstorage-true", http: []int{200, 200}, - vmTags: []string{"TEMPLATE", "CAPMOX", "v1.28.3"}, + vmTags: []string{"template-local-storage"}, fails: false, err: "", - vmTemplateNode: map[string]int32{"capmox01": 201}, + vmTemplateNode: map[string]int32{"capmox01": 202, "capmox02": 201}, + localStorage: true, + allowedNodes: []string{"capmox01", "capmox02"}, }, { - name: "find-template-unordered", + name: "find-template-localstorage-false", http: []int{200, 200}, - vmTags: []string{"template", "capmox", "v1.30.2"}, - fails: false, - err: "", - vmTemplateNode: map[string]int32{"capmox02": 202}, + vmTags: []string{"template-local-storage"}, + fails: true, + err: "ErrTemplateNotFound: found 2 VM templates with tags \"template-local-storage\"", + vmTemplateNode: map[string]int32{"capmox01": 202, "capmox02": 301}, + localStorage: false, + allowedNodes: []string{"capmox01", "capmox02"}, }, { - name: "find-template-duplicate-tag", + name: "find-template-duplicate-shared", http: []int{200, 200}, - vmTags: []string{"template", "capmox", "capmox", "v1.30.2"}, - fails: false, - err: "", - vmTemplateNode: map[string]int32{"capmox02": 202}, + vmTags: []string{"template-duplicate"}, + fails: true, + err: "Multiple templates found: multiple VM templates found on node \"capmox01\" with tags \"template-duplicate\"", + vmTemplateNode: map[string]int32{"capmox01": 401, "capmox02": 402}, + localStorage: false, + allowedNodes: []string{"capmox01", "capmox02"}, + }, + { + name: "find-template-duplicate-localstorage", + http: []int{200, 200}, + vmTags: []string{"template-duplicate"}, + fails: true, + err: "Multiple templates found: multiple VM templates found on node \"capmox01\" with tags \"template-duplicate\"", + vmTemplateNode: map[string]int32{"capmox01": 401, "capmox02": 402}, + localStorage: true, + allowedNodes: []string{"capmox01", "capmox02"}, + }, + { + name: "find-template-duplicate-shared-same-node", + http: []int{200, 200}, + vmTags: []string{"template-duplicate", "same-node"}, + fails: true, + err: "Multiple templates found: multiple VM templates found on node \"capmox01\" with tags \"template-duplicate;same-node\"", + vmTemplateNode: map[string]int32{}, + localStorage: false, + allowedNodes: []string{"capmox01", "capmox02"}, }, { - name: "find-multiple-templates", + name: "find-template-duplicate-localstorage-same-node", http: []int{200, 200}, - vmTags: []string{"template", "capmox"}, + vmTags: []string{"template-duplicate", "same-node"}, fails: true, - err: "VM template not found: no VM templates found with tags \"template;capmox\"", - vmTemplateNode: map[string]int32{"nice": 69}, + err: "Multiple templates found: multiple VM templates found on node \"capmox01\" with tags \"template-duplicate;same-node\"", + vmTemplateNode: map[string]int32{}, + localStorage: true, + allowedNodes: []string{"capmox01", "capmox02"}, }, { - name: "find-multiple-templates-same-node", + name: "find-template-duplicate-localstorage-multiple-nodes", http: []int{200, 200}, - vmTags: []string{"template", "capmox", "v1.29.2"}, + vmTags: []string{"template-duplicate", "multiple-nodes"}, fails: true, - err: "Multiple templates found: multiple VM templates found on node \"capmox02\" with tags \"template;capmox;v1.29.2\"", - vmTemplateNode: map[string]int32{"nice": 11}, + err: "Multiple templates found: multiple VM templates found on node \"capmox01\" with tags \"template-duplicate;multiple-nodes\"", + vmTemplateNode: map[string]int32{}, + localStorage: true, + allowedNodes: []string{"capmox01", "capmox02"}, }, { - name: "find-multiple-templates-multiple-nodes", + name: "find-template-extra-node", http: []int{200, 200}, - vmTags: []string{"ubuntu", "capmox", "v1.28.3"}, + vmTags: []string{"template-extra-node"}, fails: false, err: "", - vmTemplateNode: map[string]int32{"capmox01": 401, "capmox02": 402}, + vmTemplateNode: map[string]int32{"capmox01": 603, "capmox02": 604}, + localStorage: true, + allowedNodes: []string{"capmox01", "capmox02"}, }, } @@ -479,7 +550,7 @@ func TestProxmoxAPIClient_FindVMTemplatesByTags(t *testing.T) { httpmock.RegisterResponder(http.MethodGet, `=~/cluster/resources`, newJSONResponder(test.http[1], proxmoxClusterResources)) - vmTemplateNode, err := client.FindVMTemplatesByTags(context.Background(), test.vmTags) + vmTemplateNode, err := client.FindVMTemplatesByTags(context.Background(), test.vmTags, test.allowedNodes, test.localStorage) if test.fails { require.Error(t, err) diff --git a/pkg/proxmox/goproxmox/errors.go b/pkg/proxmox/goproxmox/errors.go index 1dca7549..69d7424f 100644 --- a/pkg/proxmox/goproxmox/errors.go +++ b/pkg/proxmox/goproxmox/errors.go @@ -9,5 +9,9 @@ var ( // ErrTemplateNotFound is returned when a VM template is not found. ErrTemplateNotFound = errors.New("VM template not found") + // ErrMultipleTemplatesFound is returned when multiple VM templates are found. ErrMultipleTemplatesFound = errors.New("Multiple templates found") + + // ErrWrongLocalStorageConfig Wrong combination of local storage configuration. + ErrWrongLocalStorageConfig = errors.New("Localstorage does not allow usage of target/templateid/sourcenode") ) diff --git a/pkg/proxmox/proxmoxtest/mock_client.go b/pkg/proxmox/proxmoxtest/mock_client.go index 932e490b..660e2c5b 100644 --- a/pkg/proxmox/proxmoxtest/mock_client.go +++ b/pkg/proxmox/proxmoxtest/mock_client.go @@ -389,9 +389,9 @@ func (_c *MockClient_FindVMResource_Call) RunAndReturn(run func(context.Context, return _c } -// FindVMTemplatesByTags provides a mock function with given fields: ctx, templateTags -func (_m *MockClient) FindVMTemplatesByTags(ctx context.Context, templateTags []string) (map[string]int32, error) { - ret := _m.Called(ctx, templateTags) +// FindVMTemplatesByTags provides a mock function with given fields: ctx, templateTags, allowedNodes, localStorage +func (_m *MockClient) FindVMTemplatesByTags(ctx context.Context, templateTags []string, allowedNodes []string, localStorage bool) (map[string]int32, error) { + ret := _m.Called(ctx, templateTags, allowedNodes, localStorage) if len(ret) == 0 { panic("no return value specified for FindVMTemplatesByTags") @@ -399,19 +399,19 @@ func (_m *MockClient) FindVMTemplatesByTags(ctx context.Context, templateTags [] var r0 map[string]int32 var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []string) (map[string]int32, error)); ok { - return rf(ctx, templateTags) + if rf, ok := ret.Get(0).(func(context.Context, []string, []string, bool) (map[string]int32, error)); ok { + return rf(ctx, templateTags, allowedNodes, localStorage) } - if rf, ok := ret.Get(0).(func(context.Context, []string) map[string]int32); ok { - r0 = rf(ctx, templateTags) + if rf, ok := ret.Get(0).(func(context.Context, []string, []string, bool) map[string]int32); ok { + r0 = rf(ctx, templateTags, allowedNodes, localStorage) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(map[string]int32) } } - if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok { - r1 = rf(ctx, templateTags) + if rf, ok := ret.Get(1).(func(context.Context, []string, []string, bool) error); ok { + r1 = rf(ctx, templateTags, allowedNodes, localStorage) } else { r1 = ret.Error(1) } @@ -427,13 +427,15 @@ type MockClient_FindVMTemplatesByTags_Call struct { // FindVMTemplatesByTags is a helper method to define mock.On call // - ctx context.Context // - templateTags []string -func (_e *MockClient_Expecter) FindVMTemplatesByTags(ctx interface{}, templateTags interface{}) *MockClient_FindVMTemplatesByTags_Call { - return &MockClient_FindVMTemplatesByTags_Call{Call: _e.mock.On("FindVMTemplatesByTags", ctx, templateTags)} +// - allowedNodes []string +// - localStorage bool +func (_e *MockClient_Expecter) FindVMTemplatesByTags(ctx interface{}, templateTags interface{}, allowedNodes interface{}, localStorage interface{}) *MockClient_FindVMTemplatesByTags_Call { + return &MockClient_FindVMTemplatesByTags_Call{Call: _e.mock.On("FindVMTemplatesByTags", ctx, templateTags, allowedNodes, localStorage)} } -func (_c *MockClient_FindVMTemplatesByTags_Call) Run(run func(ctx context.Context, templateTags []string)) *MockClient_FindVMTemplatesByTags_Call { +func (_c *MockClient_FindVMTemplatesByTags_Call) Run(run func(ctx context.Context, templateTags []string, allowedNodes []string, localStorage bool)) *MockClient_FindVMTemplatesByTags_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].([]string)) + run(args[0].(context.Context), args[1].([]string), args[2].([]string), args[3].(bool)) }) return _c } @@ -443,7 +445,65 @@ func (_c *MockClient_FindVMTemplatesByTags_Call) Return(_a0 map[string]int32, _a return _c } -func (_c *MockClient_FindVMTemplatesByTags_Call) RunAndReturn(run func(context.Context, []string) (map[string]int32, error)) *MockClient_FindVMTemplatesByTags_Call { +func (_c *MockClient_FindVMTemplatesByTags_Call) RunAndReturn(run func(context.Context, []string, []string, bool) (map[string]int32, error)) *MockClient_FindVMTemplatesByTags_Call { + _c.Call.Return(run) + return _c +} + +// GetAllNodeNames provides a mock function with given fields: ctx +func (_m *MockClient) GetAllNodeNames(ctx context.Context) ([]string, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetAllNodeNames") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]string, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []string); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockClient_GetAllNodeNames_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllNodeNames' +type MockClient_GetAllNodeNames_Call struct { + *mock.Call +} + +// GetAllNodeNames is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockClient_Expecter) GetAllNodeNames(ctx interface{}) *MockClient_GetAllNodeNames_Call { + return &MockClient_GetAllNodeNames_Call{Call: _e.mock.On("GetAllNodeNames", ctx)} +} + +func (_c *MockClient_GetAllNodeNames_Call) Run(run func(ctx context.Context)) *MockClient_GetAllNodeNames_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockClient_GetAllNodeNames_Call) Return(_a0 []string, _a1 error) *MockClient_GetAllNodeNames_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockClient_GetAllNodeNames_Call) RunAndReturn(run func(context.Context) ([]string, error)) *MockClient_GetAllNodeNames_Call { _c.Call.Return(run) return _c } From d7115af678f70e5e79781efa079a75a784ffa645 Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Wed, 30 Apr 2025 13:10:59 +0300 Subject: [PATCH 04/16] Rework how validations are made --- .github/actionlint.yaml | 10 +- api/v1alpha1/proxmoxmachine_types.go | 12 +- api/v1alpha1/zz_generated.deepcopy.go | 5 - ...ture.cluster.x-k8s.io_proxmoxclusters.yaml | 11 +- ...ster.x-k8s.io_proxmoxclustertemplates.yaml | 11 +- ...ture.cluster.x-k8s.io_proxmoxmachines.yaml | 11 +- ...ster.x-k8s.io_proxmoxmachinetemplates.yaml | 11 +- docs/advanced-setups.md | 16 +- examples/cluster-calico.yaml | 114 ++++++------ examples/cluster-cilium.yaml | 114 ++++++------ examples/cluster.yaml | 116 ++++++------ internal/service/vmservice/find_test.go | 16 +- internal/service/vmservice/vm.go | 61 +++---- internal/service/vmservice/vm_test.go | 166 +++++++----------- internal/webhook/proxmoxmachine_webhook.go | 30 ++++ .../webhook/proxmoxmachine_webhook_test.go | 9 +- internal/webhook/webhook_suite_test.go | 9 +- pkg/proxmox/client.go | 2 - pkg/proxmox/goproxmox/api_client.go | 32 +--- pkg/proxmox/goproxmox/errors.go | 4 +- pkg/proxmox/proxmoxtest/mock_client.go | 58 ------ test/helpers/envtest.go | 3 +- 22 files changed, 345 insertions(+), 476 deletions(-) diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml index 9ad1fe5d..06172822 100644 --- a/.github/actionlint.yaml +++ b/.github/actionlint.yaml @@ -2,11 +2,11 @@ self-hosted-runner: # Labels of self-hosted runner in array of strings. labels: - - "self-hosted" - - "proxmox" - - "e2e" - - "capmox" - - "dcd-playground" + - "self-hosted" + - "proxmox" + - "e2e" + - "capmox" + - "dcd-playground" # Configuration variables in array of strings defined in your repository or # organization. `null` means disabling configuration variables check. # Empty array means no configuration variable is allowed. diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index 8954e99c..bacce702 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -120,8 +120,8 @@ type ProxmoxMachineSpec struct { // This field is optional and should only be set if you want to restrict // the nodes where the VM can be cloned. // - // if set overrides ProxmoxCluster.spec.AllowedNodes - // if not set node discovery will be used + // If not set, the ProxmoxCluster will be used to determine the nodes. + // // +optional AllowedNodes []string `json:"allowedNodes,omitempty"` @@ -203,7 +203,7 @@ type TemplateSource struct { TemplateSelector *TemplateSelector `json:"templateSelector,omitempty"` // LocalStorage defines whether the VM template stored on local storage. - // Combination of (TemplateID, SourceNode, Target) and Localstorage is mutually exclusive. + // Combination of (TemplateID, SourceNode) and LocalStorage is mutually exclusive. // +kubebuilder:default=false // +optional LocalStorage *bool `json:"localStorage,omitempty"` @@ -242,12 +242,6 @@ type VirtualMachineCloneSpec struct { // Storage for full clone. // +optional Storage *string `json:"storage,omitempty"` - - // Target node. Only allowed if the template is on shared storage. - // Requires templateID and sourceNode to be set. - // LocalStorage must be set to false. - // +optional - Target *string `json:"target,omitempty"` } // TemplateSelector defines MatchTags for looking up VM templates. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 1537fd4c..8f86061a 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1090,11 +1090,6 @@ func (in *VirtualMachineCloneSpec) DeepCopyInto(out *VirtualMachineCloneSpec) { *out = new(string) **out = **in } - if in.Target != nil { - in, out := &in.Target, &out.Target - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineCloneSpec. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index 4511eb4c..80bbe797 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -80,8 +80,7 @@ spec: This field is optional and should only be set if you want to restrict the nodes where the VM can be cloned. - if set overrides ProxmoxCluster.spec.AllowedNodes - if not set node discovery will be used + If not set, the ProxmoxCluster will be used to determine the nodes. items: type: string type: array @@ -154,7 +153,7 @@ spec: default: false description: |- LocalStorage defines whether the VM template stored on local storage. - Combination of (TemplateID, SourceNode, Target) and Localstorage is mutually exclusive. + Combination of (TemplateID, SourceNode) and LocalStorage is mutually exclusive. type: boolean memoryMiB: description: |- @@ -646,12 +645,6 @@ spec: minItems: 1 type: array x-kubernetes-list-type: set - target: - description: |- - Target node. Only allowed if the template is on shared storage. - Requires templateID and sourceNode to be set. - LocalStorage must be set to false. - type: string templateID: description: TemplateID the vm_template vmid used for cloning a new VM. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 593974f9..6c0029f9 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -102,8 +102,7 @@ spec: This field is optional and should only be set if you want to restrict the nodes where the VM can be cloned. - if set overrides ProxmoxCluster.spec.AllowedNodes - if not set node discovery will be used + If not set, the ProxmoxCluster will be used to determine the nodes. items: type: string type: array @@ -178,7 +177,7 @@ spec: default: false description: |- LocalStorage defines whether the VM template stored on local storage. - Combination of (TemplateID, SourceNode, Target) and Localstorage is mutually exclusive. + Combination of (TemplateID, SourceNode) and LocalStorage is mutually exclusive. type: boolean memoryMiB: description: |- @@ -687,12 +686,6 @@ spec: minItems: 1 type: array x-kubernetes-list-type: set - target: - description: |- - Target node. Only allowed if the template is on shared storage. - Requires templateID and sourceNode to be set. - LocalStorage must be set to false. - type: string templateID: description: TemplateID the vm_template vmid used for cloning a new VM. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index 12bee12e..894b1bad 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -73,8 +73,7 @@ spec: This field is optional and should only be set if you want to restrict the nodes where the VM can be cloned. - if set overrides ProxmoxCluster.spec.AllowedNodes - if not set node discovery will be used + If not set, the ProxmoxCluster will be used to determine the nodes. items: type: string type: array @@ -145,7 +144,7 @@ spec: default: false description: |- LocalStorage defines whether the VM template stored on local storage. - Combination of (TemplateID, SourceNode, Target) and Localstorage is mutually exclusive. + Combination of (TemplateID, SourceNode) and LocalStorage is mutually exclusive. type: boolean memoryMiB: description: |- @@ -603,12 +602,6 @@ spec: minItems: 1 type: array x-kubernetes-list-type: set - target: - description: |- - Target node. Only allowed if the template is on shared storage. - Requires templateID and sourceNode to be set. - LocalStorage must be set to false. - type: string templateID: description: TemplateID the vm_template vmid used for cloning a new VM. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index 5be53baf..911f2f1f 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -84,8 +84,7 @@ spec: This field is optional and should only be set if you want to restrict the nodes where the VM can be cloned. - if set overrides ProxmoxCluster.spec.AllowedNodes - if not set node discovery will be used + If not set, the ProxmoxCluster will be used to determine the nodes. items: type: string type: array @@ -158,7 +157,7 @@ spec: default: false description: |- LocalStorage defines whether the VM template stored on local storage. - Combination of (TemplateID, SourceNode, Target) and Localstorage is mutually exclusive. + Combination of (TemplateID, SourceNode) and LocalStorage is mutually exclusive. type: boolean memoryMiB: description: |- @@ -644,12 +643,6 @@ spec: minItems: 1 type: array x-kubernetes-list-type: set - target: - description: |- - Target node. Only allowed if the template is on shared storage. - Requires templateID and sourceNode to be set. - LocalStorage must be set to false. - type: string templateID: description: TemplateID the vm_template vmid used for cloning a new VM. diff --git a/docs/advanced-setups.md b/docs/advanced-setups.md index 38c8f24f..caca1996 100644 --- a/docs/advanced-setups.md +++ b/docs/advanced-setups.md @@ -182,7 +182,21 @@ Our provider is able to look up templates based on their attached tags, for `Pro For example, you can set the `TEMPLATE_TAGS="tag1,tag2"` environment variable. Your custom image will then be used when using the [auto-image](https://github.com/ionos-cloud/cluster-api-provider-ionoscloud/blob/main/templates/cluster-template-auto-image.yaml) template. -Please note: Passed tags must be an exact 1:1 match with the tags on the template you want to use. The matched result must be unique. If multiple templates are found, provisioning will fail. +**N.B.** Passed tags must be an exact 1:1 match with the tags on the template you want to use. + +### localStorage + +`false` (default) +* Shared storage is assumed. +* The template must be accessible from any node. +* Only one template with the matching tags should exist in the entire cluster. +* If multiple templates found, the operation fails. + +`true` +* Local storage is assumed for templates. +* Each node listed in the `allowedNodes` is expected to have a copy of the template stored locally. +* If any of the nodes in `allowedNodes` is missing the template, the operation fails. + ## Proxmox RBAC with least privileges diff --git a/examples/cluster-calico.yaml b/examples/cluster-calico.yaml index 5581ee74..0cebab88 100644 --- a/examples/cluster-calico.yaml +++ b/examples/cluster-calico.yaml @@ -36,11 +36,11 @@ spec: addresses: [10.10.10.10-10.10.10.20] gateway: 10.10.10.1 prefix: 24 - # - name: ipv6Config - # value: - # addresses: [2001:db8:1::10-2001:db8:1::20] - # gateway: 2001:db8:1::1 - # prefix: 64 +# - name: ipv6Config +# value: +# addresses: [2001:db8:1::10-2001:db8:1::20] +# gateway: 2001:db8:1::1 +# prefix: 64 - name: kubeProxy value: mode: iptables @@ -51,70 +51,70 @@ spec: network: default: bridge: vmbr0 - # additionalDevices: - # - name: net1 - # model: virtio - # ipv4PoolRef: - # name: shared-inclusteripv4pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # ipv6PoolRef: - # name: shared-inclusteripv6pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # bridge: vmbr1 - # numSockets: 1 - # numCores: 4 - # memoryMiB: 4096 +# additionalDevices: +# - name: net1 +# model: virtio +# ipv4PoolRef: +# name: shared-inclusteripv4pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# ipv6PoolRef: +# name: shared-inclusteripv6pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# bridge: vmbr1 +# numSockets: 1 +# numCores: 4 +# memoryMiB: 4096 sourceNode: pve1 templateID: 100 workerNode: network: default: bridge: vmbr0 - # additionalDevices: - # - name: net1 - # model: virtio - # ipv4PoolRef: - # name: shared-inclusteripv4pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # ipv6PoolRef: - # name: shared-inclusteripv6pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # bridge: vmbr1 - # numSockets: 1 - # numCores: 4 - # memoryMiB: 4096 +# additionalDevices: +# - name: net1 +# model: virtio +# ipv4PoolRef: +# name: shared-inclusteripv4pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# ipv6PoolRef: +# name: shared-inclusteripv6pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# bridge: vmbr1 +# numSockets: 1 +# numCores: 4 +# memoryMiB: 4096 sourceNode: pve1 templateID: 100 loadBalancer: network: default: bridge: vmbr0 - # additionalDevices: - # - name: net1 - # model: virtio - # ipv4PoolRef: - # name: shared-inclusteripv4pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # ipv6PoolRef: - # name: shared-inclusteripv6pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # bridge: vmbr1 - # vrfs: - # - name: vrf-ext - # interfaces: - # - net1 - # table: 500 - # routingPolicy: - # - from: 192.0.2.0/24 - # numSockets: 1 - # numCores: 2 - # memoryMiB: 2048 +# additionalDevices: +# - name: net1 +# model: virtio +# ipv4PoolRef: +# name: shared-inclusteripv4pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# ipv6PoolRef: +# name: shared-inclusteripv6pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# bridge: vmbr1 +# vrfs: +# - name: vrf-ext +# interfaces: +# - net1 +# table: 500 +# routingPolicy: +# - from: 192.0.2.0/24 +# numSockets: 1 +# numCores: 2 +# memoryMiB: 2048 sourceNode: pve1 templateID: 100 sshAuthorizedKeys: diff --git a/examples/cluster-cilium.yaml b/examples/cluster-cilium.yaml index 9bb7cf65..6de39d8c 100644 --- a/examples/cluster-cilium.yaml +++ b/examples/cluster-cilium.yaml @@ -36,11 +36,11 @@ spec: addresses: [10.10.10.10-10.10.10.20] gateway: 10.10.10.1 prefix: 24 - # - name: ipv6Config - # value: - # addresses: [2001:db8:1::10-2001:db8:1::20] - # gateway: 2001:db8:1::1 - # prefix: 64 +# - name: ipv6Config +# value: +# addresses: [2001:db8:1::10-2001:db8:1::20] +# gateway: 2001:db8:1::1 +# prefix: 64 - name: kubeProxy value: mode: iptables @@ -51,70 +51,70 @@ spec: network: default: bridge: vmbr0 - # additionalDevices: - # - name: net1 - # model: virtio - # ipv4PoolRef: - # name: shared-inclusteripv4pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # ipv6PoolRef: - # name: shared-inclusteripv6pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # bridge: vmbr1 - # numSockets: 1 - # numCores: 4 - # memoryMiB: 4096 +# additionalDevices: +# - name: net1 +# model: virtio +# ipv4PoolRef: +# name: shared-inclusteripv4pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# ipv6PoolRef: +# name: shared-inclusteripv6pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# bridge: vmbr1 +# numSockets: 1 +# numCores: 4 +# memoryMiB: 4096 sourceNode: pve1 templateID: 100 workerNode: network: default: bridge: vmbr0 - # additionalDevices: - # - name: net1 - # model: virtio - # ipv4PoolRef: - # name: shared-inclusteripv4pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # ipv6PoolRef: - # name: shared-inclusteripv6pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # bridge: vmbr1 - # numSockets: 1 - # numCores: 4 - # memoryMiB: 4096 +# additionalDevices: +# - name: net1 +# model: virtio +# ipv4PoolRef: +# name: shared-inclusteripv4pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# ipv6PoolRef: +# name: shared-inclusteripv6pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# bridge: vmbr1 +# numSockets: 1 +# numCores: 4 +# memoryMiB: 4096 sourceNode: pve1 templateID: 100 loadBalancer: network: default: bridge: vmbr0 - # additionalDevices: - # - name: net1 - # model: virtio - # ipv4PoolRef: - # name: shared-inclusteripv4pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # ipv6PoolRef: - # name: shared-inclusteripv6pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # bridge: vmbr1 - # vrfs: - # - name: vrf-ext - # interfaces: - # - net1 - # table: 500 - # routingPolicy: - # - from: 192.0.2.0/24 - # numSockets: 1 - # numCores: 2 - # memoryMiB: 2048 +# additionalDevices: +# - name: net1 +# model: virtio +# ipv4PoolRef: +# name: shared-inclusteripv4pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# ipv6PoolRef: +# name: shared-inclusteripv6pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# bridge: vmbr1 +# vrfs: +# - name: vrf-ext +# interfaces: +# - net1 +# table: 500 +# routingPolicy: +# - from: 192.0.2.0/24 +# numSockets: 1 +# numCores: 2 +# memoryMiB: 2048 sourceNode: pve1 templateID: 100 sshAuthorizedKeys: diff --git a/examples/cluster.yaml b/examples/cluster.yaml index 20664065..aac2584a 100644 --- a/examples/cluster.yaml +++ b/examples/cluster.yaml @@ -3,7 +3,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: - cluster.x-k8s.io/proxmox-cluster-cni: + cluster.x-k8s.io/proxmox-cluster-cni: name: capmox-cluster spec: topology: @@ -36,11 +36,11 @@ spec: addresses: [10.10.10.10-10.10.10.20] gateway: 10.10.10.1 prefix: 24 - # - name: ipv6Config - # value: - # addresses: [2001:db8:1::10-2001:db8:1::20] - # gateway: 2001:db8:1::1 - # prefix: 64 +# - name: ipv6Config +# value: +# addresses: [2001:db8:1::10-2001:db8:1::20] +# gateway: 2001:db8:1::1 +# prefix: 64 - name: cloneSpec value: machineSpec: @@ -48,70 +48,70 @@ spec: network: default: bridge: vmbr0 - # additionalDevices: - # - name: net1 - # model: virtio - # ipv4PoolRef: - # name: shared-inclusteripv4pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # ipv6PoolRef: - # name: shared-inclusteripv6pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # bridge: vmbr1 - # numSockets: 1 - # numCores: 4 - # memoryMiB: 4096 +# additionalDevices: +# - name: net1 +# model: virtio +# ipv4PoolRef: +# name: shared-inclusteripv4pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# ipv6PoolRef: +# name: shared-inclusteripv6pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# bridge: vmbr1 +# numSockets: 1 +# numCores: 4 +# memoryMiB: 4096 sourceNode: pve1 templateID: 100 workerNode: network: default: bridge: vmbr0 - # additionalDevices: - # - name: net1 - # model: virtio - # ipv4PoolRef: - # name: shared-inclusteripv4pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # ipv6PoolRef: - # name: shared-inclusteripv6pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # bridge: vmbr1 - # numSockets: 1 - # numCores: 4 - # memoryMiB: 4096 +# additionalDevices: +# - name: net1 +# model: virtio +# ipv4PoolRef: +# name: shared-inclusteripv4pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# ipv6PoolRef: +# name: shared-inclusteripv6pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# bridge: vmbr1 +# numSockets: 1 +# numCores: 4 +# memoryMiB: 4096 sourceNode: pve1 templateID: 100 loadBalancer: network: default: bridge: vmbr0 - # additionalDevices: - # - name: net1 - # model: virtio - # ipv4PoolRef: - # name: shared-inclusteripv4pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # ipv6PoolRef: - # name: shared-inclusteripv6pool - # apiGroup: ipam.cluster.x-k8s.io - # kind: GlobalInClusterIPPool - # bridge: vmbr1 - # vrfs: - # - name: vrf-ext - # interfaces: - # - net1 - # table: 500 - # routingPolicy: - # - from: 192.0.2.0/24 - # numSockets: 1 - # numCores: 2 - # memoryMiB: 2048 +# additionalDevices: +# - name: net1 +# model: virtio +# ipv4PoolRef: +# name: shared-inclusteripv4pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# ipv6PoolRef: +# name: shared-inclusteripv6pool +# apiGroup: ipam.cluster.x-k8s.io +# kind: GlobalInClusterIPPool +# bridge: vmbr1 +# vrfs: +# - name: vrf-ext +# interfaces: +# - net1 +# table: 500 +# routingPolicy: +# - from: 192.0.2.0/24 +# numSockets: 1 +# numCores: 2 +# memoryMiB: 2048 sourceNode: pve1 templateID: 100 sshAuthorizedKeys: diff --git a/internal/service/vmservice/find_test.go b/internal/service/vmservice/find_test.go index 8976a4e2..eaf60402 100644 --- a/internal/service/vmservice/find_test.go +++ b/internal/service/vmservice/find_test.go @@ -28,19 +28,15 @@ import ( infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" ) -const ( - firstNode = "node1" -) - func TestFindVM_FindByNodeAndID(t *testing.T) { ctx := context.TODO() machineScope, proxmoxClient, _ := setupReconcilerTest(t) vm := newRunningVM() machineScope.ProxmoxMachine.Spec.VirtualMachineID = ptr.To(int64(vm.VMID)) // this is used in LocateProxmoxNode function cannot be empty. - machineScope.ProxmoxMachine.Spec.SourceNode = firstNode + machineScope.ProxmoxMachine.Spec.SourceNode = "node1" - proxmoxClient.EXPECT().GetVM(ctx, firstNode, int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(ctx, "node1", int64(123)).Return(vm, nil).Once() _, err := FindVM(ctx, machineScope) require.NoError(t, err) @@ -106,7 +102,7 @@ func TestUpdateVMLocation_MissingName(t *testing.T) { machineScope.ProxmoxMachine.Spec.VirtualMachineID = ptr.To(int64(vm.VMID)) proxmoxClient.EXPECT().FindVMResource(ctx, uint64(123)).Return(vmr, nil).Once() - proxmoxClient.EXPECT().GetVM(ctx, firstNode, int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(ctx, "node1", int64(123)).Return(vm, nil).Once() require.Error(t, updateVMLocation(ctx, machineScope)) } @@ -122,7 +118,7 @@ func TestUpdateVMLocation_NameMismatch(t *testing.T) { machineScope.ProxmoxMachine.Spec.VirtualMachineID = ptr.To(int64(vm.VMID)) proxmoxClient.EXPECT().FindVMResource(ctx, uint64(123)).Return(vmr, nil).Once() - proxmoxClient.EXPECT().GetVM(ctx, firstNode, int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(ctx, "node1", int64(123)).Return(vm, nil).Once() require.Error(t, updateVMLocation(ctx, machineScope)) require.True(t, machineScope.HasFailed(), "expected failureReason and failureMessage to be set") @@ -141,7 +137,7 @@ func TestUpdateVMLocation_UpdateNode(t *testing.T) { }, false) proxmoxClient.EXPECT().FindVMResource(ctx, uint64(123)).Return(vmr, nil).Once() - proxmoxClient.EXPECT().GetVM(ctx, firstNode, int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(ctx, "node1", int64(123)).Return(vm, nil).Once() require.NoError(t, updateVMLocation(ctx, machineScope)) require.Equal(t, vmr.Node, *machineScope.ProxmoxMachine.Status.ProxmoxNode) @@ -170,7 +166,7 @@ func TestUpdateVMLocation_WithoutTaskNameMismatch(t *testing.T) { machineScope.ProxmoxMachine.Status.TaskRef = nil proxmoxClient.EXPECT().FindVMResource(ctx, uint64(123)).Return(vmr, nil).Once() - proxmoxClient.EXPECT().GetVM(ctx, firstNode, int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(ctx, "node1", int64(123)).Return(vm, nil).Once() require.Error(t, updateVMLocation(ctx, machineScope)) require.True(t, machineScope.HasFailed(), "expected failureReason and failureMessage to be set") diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index e4f1f433..2f0d3ab5 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -346,8 +346,7 @@ func getMachineAddresses(scope *scope.MachineScope) ([]clusterv1.MachineAddress, return addresses, nil } -// TODO: split function to avoid gocyclo. -func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneResponse, error) { //nolint: gocyclo +func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneResponse, error) { vmid, err := getVMID(ctx, scope) if err != nil { if errors.Is(err, ErrNoVMIDInRangeFree) { @@ -385,52 +384,25 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe if scope.ProxmoxMachine.Spec.Storage != nil { options.Storage = *scope.ProxmoxMachine.Spec.Storage } - if scope.ProxmoxMachine.Spec.Target != nil { - options.Target = *scope.ProxmoxMachine.Spec.Target - } if scope.InfraCluster.ProxmoxCluster.Status.NodeLocations == nil { scope.InfraCluster.ProxmoxCluster.Status.NodeLocations = new(infrav1alpha1.NodeLocations) } - // check if localstorage is set + // get localStorage localStorage := scope.ProxmoxMachine.GetLocalStorage() - // we fail if localstorage is set and any of target/templateid/sourcenode is set as we cannot use those in case of localstorage - if localStorage && - (scope.ProxmoxMachine.Spec.Target != nil || scope.ProxmoxMachine.Spec.TemplateID != nil || scope.ProxmoxMachine.Spec.SourceNode != "") { - err := goproxmox.ErrWrongLocalStorageConfig - scope.SetFailureMessage(err) - conditions.MarkFalse(scope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.VMProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err) - return proxmox.VMCloneResponse{}, err - } - - allowedNodes := scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes - // if ProxmoxMachine defines allowedNodes use them instead - if len(scope.ProxmoxMachine.Spec.AllowedNodes) > 0 { - allowedNodes = scope.ProxmoxMachine.Spec.AllowedNodes - } - if len(allowedNodes) == 0 && scope.ProxmoxMachine.Spec.Target != nil { - allowedNodes = append(allowedNodes, *scope.ProxmoxMachine.Spec.Target) - } + // set allowedNodes (we need to use this in scheduler) + // if Target is set ProxmoxNode still can be out of memory) + allowedNodes := setAllowedNodes(scope) - // if we do not have a target node or allowedNodes is empty, we need to get the nodes from the cluster - if len(allowedNodes) == 0 && scope.ProxmoxMachine.Spec.Target == nil { - // We have no Node list to try to schedule on. Let's try to use all nodes in cluster. - allowedNodes, err = scope.InfraCluster.ProxmoxClient.GetAllNodeNames(ctx) - if err != nil { - scope.SetFailureMessage(err) - scope.SetFailureReason(capierrors.InsufficientResourcesMachineError) - return proxmox.VMCloneResponse{}, err - } - } // we first try to find templates, so we can use those during scheduling var templateID int32 - // return template if it's .spec.TemplateID and .spec.SourceNode + // return templateMap if TemplateID and SourceNode used templateMap := scope.ProxmoxMachine.GetTemplateMap() - // and only if templateMap is nil (which means TemplateSelector should be used) + // TemplateSelector should be used if templateMap == nil { // get templateSelectorTags from the spec templateSelectorTags := scope.ProxmoxMachine.GetTemplateSelectorTags() @@ -451,7 +423,7 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe } } - // when we got templateMap and allowedNodes we do node scheduling + // we have allowedNodes, so we use them to schedule the VM options.Target, templateID, err = selectNextNode(ctx, scope, templateMap, allowedNodes) if err != nil { if errors.As(err, &scheduler.InsufficientMemoryError{}) { @@ -460,11 +432,11 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe } return proxmox.VMCloneResponse{}, err } - - // if localStorage use options.Target as options.Node + // if localStorage use same node for Template as Target if localStorage { options.Node = options.Target } + // if there is no options.Node, and templateMap has only one entry, // we set the options.Node and templateID to the only entry in templateMap if options.Node == "" { @@ -501,6 +473,19 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe return res, scope.InfraCluster.PatchObject() } +func setAllowedNodes(scope *scope.MachineScope) []string { + + // defaults to ProxmoxCluster allowedNodes + allowedNodes := scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes + + // if ProxmoxMachine defines allowedNodes use them instead + if len(scope.ProxmoxMachine.Spec.AllowedNodes) > 0 { + allowedNodes = scope.ProxmoxMachine.Spec.AllowedNodes + } + + return allowedNodes +} + func getVMID(ctx context.Context, scope *scope.MachineScope) (int64, error) { if scope.ProxmoxMachine.Spec.VMIDRange != nil { vmIDRangeStart := scope.ProxmoxMachine.Spec.VMIDRange.Start diff --git a/internal/service/vmservice/vm_test.go b/internal/service/vmservice/vm_test.go index bd2bcd5c..3d944daa 100644 --- a/internal/service/vmservice/vm_test.go +++ b/internal/service/vmservice/vm_test.go @@ -39,12 +39,12 @@ func TestReconcileVM_EverythingReady(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) vm := newRunningVM() machineScope.SetVirtualMachineID(int64(vm.VMID)) - machineScope.ProxmoxMachine.Spec.SourceNode = firstNode + machineScope.ProxmoxMachine.Spec.SourceNode = "node1" machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true - proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() proxmoxClient.EXPECT().CloudInitStatus(context.Background(), vm).Return(false, nil).Once() proxmoxClient.EXPECT().QemuAgentStatus(context.Background(), vm).Return(nil).Once() @@ -62,12 +62,12 @@ func TestReconcileVM_QemuAgentCheckDisabled(t *testing.T) { machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true - machineScope.ProxmoxMachine.Spec.SourceNode = firstNode + machineScope.ProxmoxMachine.Spec.SourceNode = "node1" machineScope.ProxmoxMachine.Spec.Checks = &infrav1alpha1.ProxmoxMachineChecks{ SkipQemuGuestAgent: ptr.To(true), } - proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() // proxmoxClient.EXPECT().CloudInitStatus(context.Background(), vm).Return(false, nil).Once() result, err := ReconcileVM(context.Background(), machineScope) @@ -83,12 +83,12 @@ func TestReconcileVM_CloudInitCheckDisabled(t *testing.T) { machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true - machineScope.ProxmoxMachine.Spec.SourceNode = firstNode + machineScope.ProxmoxMachine.Spec.SourceNode = "node1" machineScope.ProxmoxMachine.Spec.Checks = &infrav1alpha1.ProxmoxMachineChecks{ SkipCloudInitStatus: ptr.To(true), } - proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() proxmoxClient.EXPECT().QemuAgentStatus(context.Background(), vm).Return(nil) result, err := ReconcileVM(context.Background(), machineScope) @@ -104,40 +104,19 @@ func TestReconcileVM_InitCheckDisabled(t *testing.T) { machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) machineScope.ProxmoxMachine.Status.Ready = true - machineScope.ProxmoxMachine.Spec.SourceNode = firstNode + machineScope.ProxmoxMachine.Spec.SourceNode = "node1" machineScope.ProxmoxMachine.Spec.Checks = &infrav1alpha1.ProxmoxMachineChecks{ SkipCloudInitStatus: ptr.To(true), SkipQemuGuestAgent: ptr.To(true), } - proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() result, err := ReconcileVM(context.Background(), machineScope) require.NoError(t, err) require.Equal(t, infrav1alpha1.VirtualMachineStateReady, result.State) require.Equal(t, "10.10.10.10", machineScope.ProxmoxMachine.Status.Addresses[1].Address) } -func TestTargetNotSetWithLocalStorage(t *testing.T) { - ctx := context.Background() - localStorage := true - - machineScope, _, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(localStorage) - machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") - machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) - machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) - machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") - machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") - machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") - machineScope.ProxmoxMachine.Spec.Target = ptr.To("node2") - - _, err := createVM(ctx, machineScope) - - require.Equal(t, ptr.To("Localstorage does not allow usage of target/templateid/sourcenode"), machineScope.ProxmoxMachine.Status.FailureMessage) - require.Error(t, err) - require.Contains(t, "Localstorage does not allow usage of target/templateid/sourcenode", err.Error()) -} - func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { ctx := context.Background() machineScope, proxmoxClient, _ := setupReconcilerTest(t) @@ -147,11 +126,11 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") - machineScope.ProxmoxMachine.Spec.Target = ptr.To("node2") + machineScope.ProxmoxMachine.Spec.AllowedNodes = []string{"node2"} machineScope.ProxmoxMachine.Spec.TemplateID = ptr.To(int32(123)) - machineScope.ProxmoxMachine.Spec.SourceNode = firstNode + machineScope.ProxmoxMachine.Spec.SourceNode = "node1" expectedOptions := proxmox.VMCloneRequest{ - Node: firstNode, + Node: "node1", Name: "test", Description: "test vm", Format: "raw", @@ -166,11 +145,13 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { NewID: 123, Task: newTask(), } + proxmoxClient.EXPECT(). + GetReservableMemoryBytes(ctx, "node2", uint64(100)). + Return(uint64(5000), nil). + Once() proxmoxClient.EXPECT().CloneVM(ctx, 123, expectedOptions).Return(response, nil).Once() - proxmoxClient.EXPECT().GetReservableMemoryBytes(ctx, "node2", uint64(100)).Return(uint64(5000), nil).Once() - requeue, err := ensureVirtualMachine(ctx, machineScope) require.NoError(t, err) require.True(t, requeue) @@ -183,7 +164,7 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_SharedStorage(t *testing.T) { ctx := context.Background() vmTemplateTags := []string{"foo", "bar"} - allowedNodes := []string{firstNode, "node2"} + allowedNodes := []string{"node1", "node2"} machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ @@ -201,11 +182,9 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_SharedStorag machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") - machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(true) - // we do not set allowedNodes from spec, we use node discovery - // machineScope.ProxmoxMachine.Spec.AllowedNodes = allowedNodes + machineScope.ProxmoxMachine.Spec.AllowedNodes = allowedNodes expectedOptions := proxmox.VMCloneRequest{ - Node: firstNode, + Node: "node1", Name: "test", Description: "test vm", Format: "raw", @@ -213,16 +192,11 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_SharedStorag Pool: "pool", SnapName: "snap", Storage: "storage", - Target: firstNode, + Target: "node1", } proxmoxClient.EXPECT(). - GetAllNodeNames(ctx). - Return(allowedNodes, nil). - Once() - - proxmoxClient.EXPECT(). - GetReservableMemoryBytes(ctx, firstNode, uint64(100)). + GetReservableMemoryBytes(ctx, "node1", uint64(100)). Return(uint64(5000), nil). Once() @@ -232,8 +206,8 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_SharedStorag Once() proxmoxClient.EXPECT(). - FindVMTemplatesByTags(ctx, vmTemplateTags, allowedNodes, true). - Return(map[string]int32{firstNode: int32(123), "node2": int32(124)}, nil). + FindVMTemplatesByTags(ctx, vmTemplateTags, allowedNodes, false). + Return(map[string]int32{"node1": int32(123), "node2": int32(124)}, nil). Once() response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()} @@ -246,7 +220,7 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_SharedStorag require.NoError(t, err) require.True(t, requeue) - require.Equal(t, firstNode, *machineScope.ProxmoxMachine.Status.ProxmoxNode) + require.Equal(t, "node1", *machineScope.ProxmoxMachine.Status.ProxmoxNode) require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) } @@ -254,7 +228,7 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_SharedStorag func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_LocalStorage(t *testing.T) { ctx := context.Background() vmTemplateTags := []string{"foo", "bar"} - allowedNodes := []string{firstNode, "node2"} + allowedNodes := []string{"node1", "node2"} machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ @@ -265,17 +239,16 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_LocalStorage }, }, } - machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(false) machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") - machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(false) + machineScope.ProxmoxMachine.Spec.LocalStorage = ptr.To(true) machineScope.ProxmoxMachine.Spec.AllowedNodes = allowedNodes expectedOptions := proxmox.VMCloneRequest{ - Node: firstNode, + Node: "node1", Name: "test", Description: "test vm", Format: "raw", @@ -283,11 +256,11 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_LocalStorage Pool: "pool", SnapName: "snap", Storage: "storage", - Target: firstNode, + Target: "node1", } proxmoxClient.EXPECT(). - GetReservableMemoryBytes(ctx, firstNode, uint64(100)). + GetReservableMemoryBytes(ctx, "node1", uint64(100)). Return(uint64(5000), nil). Once() @@ -297,8 +270,8 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_LocalStorage Once() proxmoxClient.EXPECT(). - FindVMTemplatesByTags(ctx, vmTemplateTags, allowedNodes, false). - Return(map[string]int32{firstNode: int32(123), "node2": int32(124)}, nil). + FindVMTemplatesByTags(ctx, vmTemplateTags, allowedNodes, true). + Return(map[string]int32{"node1": int32(123), "node2": int32(124)}, nil). Once() response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()} @@ -311,7 +284,7 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_LocalStorage require.NoError(t, err) require.True(t, requeue) - require.Equal(t, firstNode, *machineScope.ProxmoxMachine.Status.ProxmoxNode) + require.Equal(t, "node1", *machineScope.ProxmoxMachine.Status.ProxmoxNode) require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) } @@ -320,7 +293,7 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_VMTemplateNo ctx := context.Background() vmTemplateTags := []string{"foo", "bar"} localStorage := true - allowedNodes := []string{firstNode, "node2"} + allowedNodes := []string{"node1", "node2"} machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ @@ -368,7 +341,7 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode(t *testing.T) { proxmoxClient.EXPECT(). FindVMTemplatesByTags(context.Background(), vmTemplateTags, allowedNodes, localStorage). - Return(map[string]int32{firstNode: 123}, nil). + Return(map[string]int32{"node1": 123}, nil). Once() selectNextNode = func(context.Context, *scope.MachineScope, map[string]int32, []string) (string, int32, error) { @@ -377,7 +350,7 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode(t *testing.T) { t.Cleanup(func() { selectNextNode = scheduler.ScheduleVM }) expectedOptions := proxmox.VMCloneRequest{ - Node: firstNode, + Node: "node1", Name: "test", Target: "node3", } @@ -401,8 +374,8 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode(t *testing.T) { } func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes_SharedStorage(t *testing.T) { - clusterAllowedNodes := []string{firstNode, "node2", "node3", "node4"} - proxmoxMachineAllowedNodes := []string{firstNode, "node2"} + clusterAllowedNodes := []string{"node1", "node2", "node3", "node4"} + proxmoxMachineAllowedNodes := []string{"node1", "node2"} machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = clusterAllowedNodes @@ -418,7 +391,7 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes_SharedStor }, }, } - templateMap := map[string]int32{firstNode: int32(122)} + templateMap := map[string]int32{"node1": int32(122)} proxmoxClient.EXPECT(). FindVMTemplatesByTags(context.Background(), vmTemplateTags, proxmoxMachineAllowedNodes, false). @@ -430,7 +403,7 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes_SharedStor } t.Cleanup(func() { selectNextNode = scheduler.ScheduleVM }) - expectedOptions := proxmox.VMCloneRequest{Node: firstNode, Name: "test", Target: "node2"} + expectedOptions := proxmox.VMCloneRequest{Node: "node1", Name: "test", Target: "node2"} response := proxmox.VMCloneResponse{NewID: 122, Task: newTask()} proxmoxClient.EXPECT().CloneVM(context.Background(), 122, expectedOptions).Return(response, nil).Once() @@ -438,14 +411,14 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes_SharedStor require.NoError(t, err) require.True(t, requeue) - require.Contains(t, []string{"node2", firstNode}, *machineScope.ProxmoxMachine.Status.ProxmoxNode) + require.Contains(t, []string{"node2", "node1"}, *machineScope.ProxmoxMachine.Status.ProxmoxNode) require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) } func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes_LocalStorage(t *testing.T) { - clusterAllowedNodes := []string{firstNode, "node2", "node3", "node4"} - proxmoxMachineAllowedNodes := []string{firstNode, "node2"} + clusterAllowedNodes := []string{"node1", "node2", "node3", "node4"} + proxmoxMachineAllowedNodes := []string{"node1", "node2"} vmTemplateTags := []string{"foo", "bar"} machineScope, proxmoxClient, _ := setupReconcilerTest(t) @@ -463,7 +436,7 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes_LocalStora proxmoxClient.EXPECT(). FindVMTemplatesByTags(context.Background(), vmTemplateTags, proxmoxMachineAllowedNodes, true). - Return(map[string]int32{firstNode: int32(122), "node2": int32(123)}, nil). + Return(map[string]int32{"node1": int32(122), "node2": int32(123)}, nil). Once() selectNextNode = func(context.Context, *scope.MachineScope, map[string]int32, []string) (string, int32, error) { @@ -485,7 +458,7 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes_LocalStora } func TestEnsureVirtualMachine_CreateVM_SelectNode_InsufficientMemory(t *testing.T) { - allowedNodes := []string{firstNode} + allowedNodes := []string{"node1"} vmTemplateTags := []string{"foo"} machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = allowedNodes @@ -498,7 +471,7 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_InsufficientMemory(t *testing. } proxmoxClient.EXPECT(). FindVMTemplatesByTags(context.Background(), vmTemplateTags, allowedNodes, false). - Return(map[string]int32{firstNode: int32(122)}, nil). + Return(map[string]int32{"node1": int32(122)}, nil). Once() selectNextNode = func(context.Context, *scope.MachineScope, map[string]int32, []string) (string, int32, error) { @@ -516,7 +489,7 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_InsufficientMemory(t *testing. func TestEnsureVirtualMachine_CreateVM_VMIDRange(t *testing.T) { vmTemplateTags := []string{"foo"} - allowedNodes := []string{firstNode} + allowedNodes := []string{"node1"} machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = allowedNodes machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ @@ -533,15 +506,15 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRange(t *testing.T) { proxmoxClient.EXPECT(). FindVMTemplatesByTags(context.Background(), vmTemplateTags, allowedNodes, false). - Return(map[string]int32{firstNode: int32(123)}, nil). + Return(map[string]int32{"node1": int32(123)}, nil). Once() proxmoxClient.EXPECT(). - GetReservableMemoryBytes(context.Background(), firstNode, uint64(100)). + GetReservableMemoryBytes(context.Background(), "node1", uint64(100)). Return(uint64(5000), nil). Once() - expectedOptions := proxmox.VMCloneRequest{Node: firstNode, NewID: 1001, Name: "test", Target: firstNode} + expectedOptions := proxmox.VMCloneRequest{Node: "node1", NewID: 1001, Name: "test", Target: "node1"} response := proxmox.VMCloneResponse{Task: newTask(), NewID: int64(1001)} proxmoxClient.Mock.On("CheckID", context.Background(), int64(1000)).Return(false, nil) proxmoxClient.Mock.On("CheckID", context.Background(), int64(1001)).Return(true, nil) @@ -580,6 +553,7 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRangeCheckExisting(t *testing.T) { Start: 1000, End: 1002, } + machineScope.ProxmoxMachine.Spec.AllowedNodes = []string{"node1"} // Add a VM with ID 1000. // Make sure the check for a free vmid skips 1000 by ensuring the Proxmox CheckID function isn't called more than once. @@ -596,13 +570,14 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRangeCheckExisting(t *testing.T) { VirtualMachineID: ptr.To(int64(1000)), }, } - machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ - TemplateSource: infrav1alpha1.TemplateSource{ - TemplateSelector: &infrav1alpha1.TemplateSelector{ - MatchTags: []string{"foo"}, + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = + infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: []string{"foo"}, + }, }, - }, - } + } machine := clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "vm1000", @@ -629,21 +604,16 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRangeCheckExisting(t *testing.T) { require.NoError(t, err) proxmoxClient.EXPECT(). - GetAllNodeNames(context.Background()). - Return([]string{firstNode}, nil). - Once() - - proxmoxClient.EXPECT(). - FindVMTemplatesByTags(context.Background(), []string{"foo"}, []string{firstNode}, false). - Return(map[string]int32{firstNode: int32(123)}, nil). + FindVMTemplatesByTags(context.Background(), []string{"foo"}, []string{"node1"}, false). + Return(map[string]int32{"node1": int32(123)}, nil). Once() proxmoxClient.EXPECT(). - GetReservableMemoryBytes(context.Background(), firstNode, uint64(100)). + GetReservableMemoryBytes(context.Background(), "node1", uint64(100)). Return(uint64(5000), nil). Once() - expectedOptions := proxmox.VMCloneRequest{Node: firstNode, NewID: 1002, Name: "test", Target: firstNode} + expectedOptions := proxmox.VMCloneRequest{Node: "node1", NewID: 1002, Name: "test", Target: "node1"} response := proxmox.VMCloneResponse{Task: newTask(), NewID: int64(1002)} proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() proxmoxClient.Mock.On("CheckID", context.Background(), int64(1001)).Return(false, nil).Once() @@ -660,7 +630,7 @@ func TestEnsureVirtualMachine_FindVM(t *testing.T) { machineScope.SetVirtualMachineID(123) machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ TemplateSource: infrav1alpha1.TemplateSource{ - SourceNode: firstNode, + SourceNode: "node1", TemplateID: ptr.To[int32](123), }, } @@ -668,7 +638,7 @@ func TestEnsureVirtualMachine_FindVM(t *testing.T) { vm := newStoppedVM() vm.VirtualMachineConfig.SMBios1 = "uuid=56603c36-46b9-4608-90ae-c731c15eae64" - proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() requeue, err := ensureVirtualMachine(context.Background(), machineScope) require.NoError(t, err) @@ -683,11 +653,11 @@ func TestEnsureVirtualMachine_UpdateVMLocation_Error(t *testing.T) { machineScope.SetVirtualMachineID(123) machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ TemplateSource: infrav1alpha1.TemplateSource{ - SourceNode: firstNode, + SourceNode: "node1", TemplateID: ptr.To[int32](123), }, } - proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(nil, fmt.Errorf("not found")).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(nil, fmt.Errorf("not found")).Once() proxmoxClient.EXPECT().FindVMResource(context.Background(), uint64(123)).Return(nil, fmt.Errorf("unavailalbe")).Once() _, err := ensureVirtualMachine(context.Background(), machineScope) @@ -912,12 +882,12 @@ func TestReconcileVM_CloudInitFailed(t *testing.T) { machineScope.ProxmoxMachine.Status.Ready = true machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ TemplateSource: infrav1alpha1.TemplateSource{ - SourceNode: firstNode, + SourceNode: "node1", TemplateID: ptr.To[int32](123), }, } - proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() proxmoxClient.EXPECT().CloudInitStatus(context.Background(), vm).Return(false, goproxmox.ErrCloudInitFailed).Once() proxmoxClient.EXPECT().QemuAgentStatus(context.Background(), vm).Return(nil).Once() @@ -936,12 +906,12 @@ func TestReconcileVM_CloudInitRunning(t *testing.T) { machineScope.ProxmoxMachine.Status.Ready = true machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ TemplateSource: infrav1alpha1.TemplateSource{ - SourceNode: firstNode, + SourceNode: "node1", TemplateID: ptr.To[int32](123), }, } - proxmoxClient.EXPECT().GetVM(context.Background(), firstNode, int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() proxmoxClient.EXPECT().CloudInitStatus(context.Background(), vm).Return(true, nil).Once() proxmoxClient.EXPECT().QemuAgentStatus(context.Background(), vm).Return(nil).Once() diff --git a/internal/webhook/proxmoxmachine_webhook.go b/internal/webhook/proxmoxmachine_webhook.go index d560ed91..df1cb08e 100644 --- a/internal/webhook/proxmoxmachine_webhook.go +++ b/internal/webhook/proxmoxmachine_webhook.go @@ -53,6 +53,12 @@ func (p *ProxmoxMachine) ValidateCreate(_ context.Context, obj runtime.Object) ( return warnings, apierrors.NewBadRequest(fmt.Sprintf("expected a ProxmoxMachine but got %T", obj)) } + err = validateLocalStorage(machine) + if err != nil { + warnings = append(warnings, "localStorage is mutually exclusive with templateID/sourceNode") + return warnings, apierrors.NewBadRequest("localStorage is mutually exclusive with templateID/sourceNode") + } + err = validateNetworks(machine) if err != nil { warnings = append(warnings, fmt.Sprintf("cannot create proxmox machine %s", machine.GetName())) @@ -78,6 +84,12 @@ func (p *ProxmoxMachine) ValidateUpdate(_ context.Context, old, newObj runtime.O return warnings, apierrors.NewBadRequest("tags are immutable") } + err = validateLocalStorage(newMachine) + if err != nil { + warnings = append(warnings, "localStorage is mutually exclusive with templateID/sourceNode") + return warnings, apierrors.NewBadRequest("localStorage is mutually exclusive with templateID/sourceNode") + } + err = validateNetworks(newMachine) if err != nil { warnings = append(warnings, fmt.Sprintf("cannot update proxmox machine %s", newMachine.GetName())) @@ -92,6 +104,24 @@ func (p *ProxmoxMachine) ValidateDelete(_ context.Context, _ runtime.Object) (wa return nil, nil } +func validateLocalStorage(machine *infrav1.ProxmoxMachine) error { + gk, name := machine.GroupVersionKind().GroupKind(), machine.GetName() + + // if localStorage is set to true, then templateid/sourcenode must be nil + if machine.Spec.LocalStorage != nil && *machine.Spec.LocalStorage { + if machine.Spec.TemplateID != nil || machine.Spec.SourceNode != "" { + return apierrors.NewInvalid( + gk, + name, + field.ErrorList{ + field.Invalid( + field.NewPath("spec", "localStorage"), machine.Spec.LocalStorage, "localStorage is mutually exclusive with templateID/sourceNode"), + }) + } + } + return nil +} + func validateNetworks(machine *infrav1.ProxmoxMachine) error { if machine.Spec.Network == nil { return nil diff --git a/internal/webhook/proxmoxmachine_webhook_test.go b/internal/webhook/proxmoxmachine_webhook_test.go index c39bf0fa..cb282985 100644 --- a/internal/webhook/proxmoxmachine_webhook_test.go +++ b/internal/webhook/proxmoxmachine_webhook_test.go @@ -19,10 +19,9 @@ package webhook import ( "time" - corev1 "k8s.io/api/core/v1" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -44,6 +43,12 @@ var _ = Describe("Controller Test", func() { g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("spec.network.default.vlan: Invalid value"))) }) + It("should disallow use templateID/sourceNode if localStorage", func() { + machine := validProxmoxMachine("test-machine") + machine.Spec.LocalStorage = ptr.To(true) + g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("localStorage is mutually exclusive with templateID/sourceNode"))) + }) + It("should disallow invalid network mtu for additional device", func() { machine := validProxmoxMachine("test-machine") machine.Spec.Network.AdditionalDevices[0].MTU = ptr.To(uint16(1000)) diff --git a/internal/webhook/webhook_suite_test.go b/internal/webhook/webhook_suite_test.go index 35998b6c..585790ad 100644 --- a/internal/webhook/webhook_suite_test.go +++ b/internal/webhook/webhook_suite_test.go @@ -24,17 +24,16 @@ import ( "testing" "time" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/ionos-cloud/cluster-api-provider-proxmox/test/helpers" - //+kubebuilder:scaffold:imports + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/ionos-cloud/cluster-api-provider-proxmox/test/helpers" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to diff --git a/pkg/proxmox/client.go b/pkg/proxmox/client.go index 63a882b8..012c333c 100644 --- a/pkg/proxmox/client.go +++ b/pkg/proxmox/client.go @@ -39,8 +39,6 @@ type Client interface { DeleteVM(ctx context.Context, nodeName string, vmID int64) (*proxmox.Task, error) - GetAllNodeNames(ctx context.Context) ([]string, error) - GetTask(ctx context.Context, upID string) (*proxmox.Task, error) GetReservableMemoryBytes(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64) (uint64, error) diff --git a/pkg/proxmox/goproxmox/api_client.go b/pkg/proxmox/goproxmox/api_client.go index 5d5a99b1..bfb2c1f4 100644 --- a/pkg/proxmox/goproxmox/api_client.go +++ b/pkg/proxmox/goproxmox/api_client.go @@ -142,10 +142,6 @@ func (c *APIClient) FindVMResource(ctx context.Context, vmID uint64) (*proxmox.C return nil, fmt.Errorf("unable to find VM with ID %d on any of the nodes", vmID) } -func isNotInAllowedNodes(allowedNodes []string, node string) bool { - return !slices.Contains(allowedNodes, node) -} - // FindVMTemplatesByTags finds VM templates by tags across the whole cluster and ensures only one template per node. func (c *APIClient) FindVMTemplatesByTags(ctx context.Context, templateTags []string, allowedNodes []string, localStorage bool) (map[string]int32, error) { templates := make(map[string]int32) @@ -181,7 +177,7 @@ func (c *APIClient) FindVMTemplatesByTags(ctx context.Context, templateTags []st slices.Sort(vmTags) // if localstorage template should be on all allowed nodes - if localStorage && isNotInAllowedNodes(allowedNodes, vm.Node) { + if localStorage && !slices.Contains(allowedNodes, vm.Node) { continue } @@ -250,32 +246,6 @@ func (c *APIClient) DeleteVM(ctx context.Context, nodeName string, vmID int64) ( return task, nil } -// GetAllNodeNames get all nodes in cluster against which we are authenticated. -func (c *APIClient) GetAllNodeNames(ctx context.Context) ([]string, error) { - cluster, err := c.Client.Cluster(ctx) - if err != nil { - return nil, fmt.Errorf("cannot get cluster status: %w", err) - } - err = cluster.Status(ctx) - if err != nil { - return nil, fmt.Errorf("cannot get cluster status: %w", err) - } - var nodes []string - for _, node := range cluster.Nodes { - nodes = appendIfMissing(nodes, node.Name) - } - - return nodes, nil -} - -// appendIfMissing make sure we add only uniq items to the slice. -func appendIfMissing(slice []string, item string) []string { - if slices.Contains(slice, item) { - return slice - } - return append(slice, item) -} - // CheckID checks if the vmid is available on the cluster. // Returns true if the vmid is available, false if it is taken. func (c *APIClient) CheckID(ctx context.Context, vmid int64) (bool, error) { diff --git a/pkg/proxmox/goproxmox/errors.go b/pkg/proxmox/goproxmox/errors.go index 69d7424f..d20db09f 100644 --- a/pkg/proxmox/goproxmox/errors.go +++ b/pkg/proxmox/goproxmox/errors.go @@ -12,6 +12,6 @@ var ( // ErrMultipleTemplatesFound is returned when multiple VM templates are found. ErrMultipleTemplatesFound = errors.New("Multiple templates found") - // ErrWrongLocalStorageConfig Wrong combination of local storage configuration. - ErrWrongLocalStorageConfig = errors.New("Localstorage does not allow usage of target/templateid/sourcenode") + // ErrNoNode is returned when no target or alowedNodes is specified. + ErrNoNode = errors.New("No target or allowedNodes specified") ) diff --git a/pkg/proxmox/proxmoxtest/mock_client.go b/pkg/proxmox/proxmoxtest/mock_client.go index 660e2c5b..7bb1a8c5 100644 --- a/pkg/proxmox/proxmoxtest/mock_client.go +++ b/pkg/proxmox/proxmoxtest/mock_client.go @@ -450,64 +450,6 @@ func (_c *MockClient_FindVMTemplatesByTags_Call) RunAndReturn(run func(context.C return _c } -// GetAllNodeNames provides a mock function with given fields: ctx -func (_m *MockClient) GetAllNodeNames(ctx context.Context) ([]string, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GetAllNodeNames") - } - - var r0 []string - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) ([]string, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) []string); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]string) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// MockClient_GetAllNodeNames_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllNodeNames' -type MockClient_GetAllNodeNames_Call struct { - *mock.Call -} - -// GetAllNodeNames is a helper method to define mock.On call -// - ctx context.Context -func (_e *MockClient_Expecter) GetAllNodeNames(ctx interface{}) *MockClient_GetAllNodeNames_Call { - return &MockClient_GetAllNodeNames_Call{Call: _e.mock.On("GetAllNodeNames", ctx)} -} - -func (_c *MockClient_GetAllNodeNames_Call) Run(run func(ctx context.Context)) *MockClient_GetAllNodeNames_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *MockClient_GetAllNodeNames_Call) Return(_a0 []string, _a1 error) *MockClient_GetAllNodeNames_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *MockClient_GetAllNodeNames_Call) RunAndReturn(run func(context.Context) ([]string, error)) *MockClient_GetAllNodeNames_Call { - _c.Call.Return(run) - return _c -} - // GetReservableMemoryBytes provides a mock function with given fields: ctx, nodeName, nodeMemoryAdjustment func (_m *MockClient) GetReservableMemoryBytes(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64) (uint64, error) { ret := _m.Called(ctx, nodeName, nodeMemoryAdjustment) diff --git a/test/helpers/envtest.go b/test/helpers/envtest.go index bbb2b5a6..a708f908 100644 --- a/test/helpers/envtest.go +++ b/test/helpers/envtest.go @@ -105,14 +105,13 @@ func NewTestEnvironment(setupWebhook bool, pmClient proxmox.Client) *TestEnviron } apiServer := env.ControlPlane.GetAPIServer() apiServer.Configure().Set("disable-admission-plugins", "NamespaceLifecycle") - if setupWebhook { env.WebhookInstallOptions = envtest.WebhookInstallOptions{ Paths: []string{filepath.Join(root, "..", "..", "config", "webhook")}, } } - if _, err := env.Start(); err != nil { + fmt.Println(err) err = kerrors.NewAggregate([]error{err, env.Stop()}) panic(err) } From ff82d5e67eb63bb0f367a243ef57286a75995a95 Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Wed, 30 Apr 2025 13:13:51 +0300 Subject: [PATCH 05/16] Rework how validations are made --- examples/cluster.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/cluster.yaml b/examples/cluster.yaml index aac2584a..aed92e45 100644 --- a/examples/cluster.yaml +++ b/examples/cluster.yaml @@ -3,7 +3,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: - cluster.x-k8s.io/proxmox-cluster-cni: + cluster.x-k8s.io/proxmox-cluster-cni: name: capmox-cluster spec: topology: From aa6e22c0b070742aadee12e7efbf9d737add2620 Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Wed, 30 Apr 2025 13:18:08 +0300 Subject: [PATCH 06/16] Rework how validations are made --- internal/service/vmservice/vm.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index 2f0d3ab5..f0eb5738 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -392,16 +392,15 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe // get localStorage localStorage := scope.ProxmoxMachine.GetLocalStorage() - // set allowedNodes (we need to use this in scheduler) - // if Target is set ProxmoxNode still can be out of memory) - allowedNodes := setAllowedNodes(scope) - // we first try to find templates, so we can use those during scheduling var templateID int32 // return templateMap if TemplateID and SourceNode used templateMap := scope.ProxmoxMachine.GetTemplateMap() + // set allowedNodes (we need to use this in scheduler and template search) + allowedNodes := setAllowedNodes(scope) + // TemplateSelector should be used if templateMap == nil { // get templateSelectorTags from the spec From 4d643f2334d3c54ae3a458534154dd766a069fec Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Wed, 30 Apr 2025 13:51:04 +0300 Subject: [PATCH 07/16] Rework how validations are made --- internal/service/vmservice/vm.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index f0eb5738..21eff2f4 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -357,7 +357,6 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe } options := proxmox.VMCloneRequest{ - Node: scope.ProxmoxMachine.GetNode(), NewID: int(vmid), Name: scope.ProxmoxMachine.GetName(), } @@ -434,20 +433,14 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe // if localStorage use same node for Template as Target if localStorage { options.Node = options.Target - } - - // if there is no options.Node, and templateMap has only one entry, - // we set the options.Node and templateID to the only entry in templateMap - if options.Node == "" { + } else { + // we use first and only template to set options.Node and templateID for i, k := range templateMap { options.Node = i templateID = k break } } - if templateID == 0 { - templateID = templateMap[options.Node] - } // at last clone machine res, err := scope.InfraCluster.ProxmoxClient.CloneVM(ctx, int(templateID), options) From ef1b9496985d6e83dbf596bac22ad84b0c2b9bf9 Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Wed, 30 Apr 2025 13:57:17 +0300 Subject: [PATCH 08/16] Rework how validations are made --- internal/service/vmservice/vm.go | 1 + pkg/proxmox/goproxmox/api_client.go | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index 21eff2f4..1319613d 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -465,6 +465,7 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe return res, scope.InfraCluster.PatchObject() } +// use ProxmoxCluster allowedNodes if ProxmoxMachine does not define them func setAllowedNodes(scope *scope.MachineScope) []string { // defaults to ProxmoxCluster allowedNodes diff --git a/pkg/proxmox/goproxmox/api_client.go b/pkg/proxmox/goproxmox/api_client.go index bfb2c1f4..b0311cf4 100644 --- a/pkg/proxmox/goproxmox/api_client.go +++ b/pkg/proxmox/goproxmox/api_client.go @@ -142,7 +142,7 @@ func (c *APIClient) FindVMResource(ctx context.Context, vmID uint64) (*proxmox.C return nil, fmt.Errorf("unable to find VM with ID %d on any of the nodes", vmID) } -// FindVMTemplatesByTags finds VM templates by tags across the whole cluster and ensures only one template per node. +// FindVMTemplatesByTags finds VM templates by tags across the whole cluster. func (c *APIClient) FindVMTemplatesByTags(ctx context.Context, templateTags []string, allowedNodes []string, localStorage bool) (map[string]int32, error) { templates := make(map[string]int32) @@ -176,7 +176,7 @@ func (c *APIClient) FindVMTemplatesByTags(ctx context.Context, templateTags []st vmTags := strings.Split(vm.Tags, ";") slices.Sort(vmTags) - // if localstorage template should be on all allowed nodes + // if localstorage - template should be on all allowed nodes if localStorage && !slices.Contains(allowedNodes, vm.Node) { continue } From 836077220e4ed2cca3d229748ac5e0b5fcc89545 Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Wed, 30 Apr 2025 13:58:22 +0300 Subject: [PATCH 09/16] Remove unused error --- pkg/proxmox/goproxmox/errors.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/proxmox/goproxmox/errors.go b/pkg/proxmox/goproxmox/errors.go index d20db09f..c6936bd7 100644 --- a/pkg/proxmox/goproxmox/errors.go +++ b/pkg/proxmox/goproxmox/errors.go @@ -11,7 +11,4 @@ var ( // ErrMultipleTemplatesFound is returned when multiple VM templates are found. ErrMultipleTemplatesFound = errors.New("Multiple templates found") - - // ErrNoNode is returned when no target or alowedNodes is specified. - ErrNoNode = errors.New("No target or allowedNodes specified") ) From b99fa0c09ae3471bf60d759790bc7a2158d22f63 Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Wed, 30 Apr 2025 13:59:36 +0300 Subject: [PATCH 10/16] Remove debugging info --- test/helpers/envtest.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/helpers/envtest.go b/test/helpers/envtest.go index a708f908..b72cee30 100644 --- a/test/helpers/envtest.go +++ b/test/helpers/envtest.go @@ -105,13 +105,13 @@ func NewTestEnvironment(setupWebhook bool, pmClient proxmox.Client) *TestEnviron } apiServer := env.ControlPlane.GetAPIServer() apiServer.Configure().Set("disable-admission-plugins", "NamespaceLifecycle") + if setupWebhook { env.WebhookInstallOptions = envtest.WebhookInstallOptions{ Paths: []string{filepath.Join(root, "..", "..", "config", "webhook")}, } } if _, err := env.Start(); err != nil { - fmt.Println(err) err = kerrors.NewAggregate([]error{err, env.Stop()}) panic(err) } From c14b5ebb8fc1bd99d883a4578a00e87df978f169 Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Wed, 30 Apr 2025 14:10:03 +0300 Subject: [PATCH 11/16] Remove function as it's not needed there --- internal/service/vmservice/vm.go | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index 1319613d..8237a046 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -398,7 +398,13 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe templateMap := scope.ProxmoxMachine.GetTemplateMap() // set allowedNodes (we need to use this in scheduler and template search) - allowedNodes := setAllowedNodes(scope) + // defaults to ProxmoxCluster allowedNodes + allowedNodes := scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes + + // if ProxmoxMachine defines allowedNodes use them instead + if len(scope.ProxmoxMachine.Spec.AllowedNodes) > 0 { + allowedNodes = scope.ProxmoxMachine.Spec.AllowedNodes + } // TemplateSelector should be used if templateMap == nil { @@ -465,20 +471,6 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe return res, scope.InfraCluster.PatchObject() } -// use ProxmoxCluster allowedNodes if ProxmoxMachine does not define them -func setAllowedNodes(scope *scope.MachineScope) []string { - - // defaults to ProxmoxCluster allowedNodes - allowedNodes := scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes - - // if ProxmoxMachine defines allowedNodes use them instead - if len(scope.ProxmoxMachine.Spec.AllowedNodes) > 0 { - allowedNodes = scope.ProxmoxMachine.Spec.AllowedNodes - } - - return allowedNodes -} - func getVMID(ctx context.Context, scope *scope.MachineScope) (int64, error) { if scope.ProxmoxMachine.Spec.VMIDRange != nil { vmIDRangeStart := scope.ProxmoxMachine.Spec.VMIDRange.Start From 19f947e69995d6d8caa062e36a3b974d6ebfb38a Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Thu, 15 May 2025 11:16:00 +0300 Subject: [PATCH 12/16] add .spec.Target back and add deprecation notice --- api/v1alpha1/proxmoxmachine_types.go | 5 +++++ api/v1alpha1/zz_generated.deepcopy.go | 5 +++++ .../infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml | 5 +++++ ...astructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml | 5 +++++ .../infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml | 5 +++++ ...astructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml | 5 +++++ docs/advanced-setups.md | 2 +- internal/controller/proxmoxmachine_controller.go | 6 ++++++ internal/service/vmservice/vm.go | 5 +++++ 9 files changed, 42 insertions(+), 1 deletion(-) diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index bacce702..3d6242b1 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -242,6 +242,11 @@ type VirtualMachineCloneSpec struct { // Storage for full clone. // +optional Storage *string `json:"storage,omitempty"` + + // Target node. Only allowed if the original VM is on shared storage. + // +optional + // Deprecated: Use `AllowedNodes`` instead. + Target *string `json:"target,omitempty"` } // TemplateSelector defines MatchTags for looking up VM templates. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 8f86061a..1537fd4c 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1090,6 +1090,11 @@ func (in *VirtualMachineCloneSpec) DeepCopyInto(out *VirtualMachineCloneSpec) { *out = new(string) **out = **in } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineCloneSpec. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index 80bbe797..dfd3fad9 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -645,6 +645,11 @@ spec: minItems: 1 type: array x-kubernetes-list-type: set + target: + description: |- + Target node. Only allowed if the original VM is on shared storage. + Deprecated: Use `AllowedNodes`` instead.. + type: string templateID: description: TemplateID the vm_template vmid used for cloning a new VM. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 6c0029f9..c2a4d35a 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -686,6 +686,11 @@ spec: minItems: 1 type: array x-kubernetes-list-type: set + target: + description: |- + Target node. Only allowed if the original VM is on shared storage. + Deprecated: Use `AllowedNodes`` instead.. + type: string templateID: description: TemplateID the vm_template vmid used for cloning a new VM. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index 894b1bad..64d125a5 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -602,6 +602,11 @@ spec: minItems: 1 type: array x-kubernetes-list-type: set + target: + description: |- + Target node. Only allowed if the original VM is on shared storage. + Deprecated: Use `AllowedNodes`` instead.. + type: string templateID: description: TemplateID the vm_template vmid used for cloning a new VM. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index 911f2f1f..7b072b0f 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -643,6 +643,11 @@ spec: minItems: 1 type: array x-kubernetes-list-type: set + target: + description: |- + Target node. Only allowed if the original VM is on shared storage. + Deprecated: Use `AllowedNodes`` instead.. + type: string templateID: description: TemplateID the vm_template vmid used for cloning a new VM. diff --git a/docs/advanced-setups.md b/docs/advanced-setups.md index caca1996..5a7e7229 100644 --- a/docs/advanced-setups.md +++ b/docs/advanced-setups.md @@ -187,7 +187,7 @@ For example, you can set the `TEMPLATE_TAGS="tag1,tag2"` environment variable. Y ### localStorage `false` (default) -* Shared storage is assumed. +* Shared storage is assumed for template. * The template must be accessible from any node. * Only one template with the matching tags should exist in the entire cluster. * If multiple templates found, the operation fails. diff --git a/internal/controller/proxmoxmachine_controller.go b/internal/controller/proxmoxmachine_controller.go index 5f3d3850..24b2d2c4 100644 --- a/internal/controller/proxmoxmachine_controller.go +++ b/internal/controller/proxmoxmachine_controller.go @@ -91,6 +91,12 @@ func (r *ProxmoxMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, err } + // Emit deprecation warning if .spec.Target is set + if proxmoxMachine.Spec.Target != nil { + logger.Info("DEPRECATION NOTICE: .spec.Target is deprecated and will be removed in a future release. Use .spec.AllowedNodes instead.", + "ProxmoxMachine", req.NamespacedName) + } + // Fetch the Machine. machine, err := util.GetOwnerMachine(ctx, r.Client, proxmoxMachine.ObjectMeta) if err != nil { diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index 8237a046..390fcd67 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -405,6 +405,11 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe if len(scope.ProxmoxMachine.Spec.AllowedNodes) > 0 { allowedNodes = scope.ProxmoxMachine.Spec.AllowedNodes } + // set .spec.Target as allowedNodes (we use this in scheduler) + if scope.ProxmoxMachine.Spec.Target != nil && + (len(scope.ProxmoxMachine.Spec.AllowedNodes) == 0 || len(scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes) == 0) { + allowedNodes = []string{*scope.ProxmoxMachine.Spec.Target} + } // TemplateSelector should be used if templateMap == nil { From e798db8216d74e9872dbff562e2950fd83adacf8 Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Mon, 19 May 2025 17:31:54 +0300 Subject: [PATCH 13/16] Update go.sum and add nolint to test --- go.sum | 1 - internal/service/vmservice/find_test.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/go.sum b/go.sum index ccc5ea75..f38504c3 100644 --- a/go.sum +++ b/go.sum @@ -146,7 +146,6 @@ github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= diff --git a/internal/service/vmservice/find_test.go b/internal/service/vmservice/find_test.go index eaf60402..09800326 100644 --- a/internal/service/vmservice/find_test.go +++ b/internal/service/vmservice/find_test.go @@ -34,7 +34,7 @@ func TestFindVM_FindByNodeAndID(t *testing.T) { vm := newRunningVM() machineScope.ProxmoxMachine.Spec.VirtualMachineID = ptr.To(int64(vm.VMID)) // this is used in LocateProxmoxNode function cannot be empty. - machineScope.ProxmoxMachine.Spec.SourceNode = "node1" + machineScope.ProxmoxMachine.Spec.SourceNode = "node1" //nolint:goconst proxmoxClient.EXPECT().GetVM(ctx, "node1", int64(123)).Return(vm, nil).Once() From ce641bfb28ac3791ff8726bedb7e6ba1f4c1bffa Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Tue, 20 May 2025 11:33:39 +0300 Subject: [PATCH 14/16] Update crds --- .../bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml | 2 +- ...infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml | 2 +- .../bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml | 2 +- ...infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index dfd3fad9..0569eb42 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -648,7 +648,7 @@ spec: target: description: |- Target node. Only allowed if the original VM is on shared storage. - Deprecated: Use `AllowedNodes`` instead.. + Deprecated: Use `AllowedNodes`` instead. type: string templateID: description: TemplateID the vm_template vmid used for cloning diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index c2a4d35a..32685c93 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -689,7 +689,7 @@ spec: target: description: |- Target node. Only allowed if the original VM is on shared storage. - Deprecated: Use `AllowedNodes`` instead.. + Deprecated: Use `AllowedNodes`` instead. type: string templateID: description: TemplateID the vm_template vmid used diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index 64d125a5..61bec348 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -605,7 +605,7 @@ spec: target: description: |- Target node. Only allowed if the original VM is on shared storage. - Deprecated: Use `AllowedNodes`` instead.. + Deprecated: Use `AllowedNodes`` instead. type: string templateID: description: TemplateID the vm_template vmid used for cloning a new diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index 7b072b0f..a92fbf82 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -646,7 +646,7 @@ spec: target: description: |- Target node. Only allowed if the original VM is on shared storage. - Deprecated: Use `AllowedNodes`` instead.. + Deprecated: Use `AllowedNodes`` instead. type: string templateID: description: TemplateID the vm_template vmid used for cloning From e393e185abe4297cd0feffa90af5f6a8addb261e Mon Sep 17 00:00:00 2001 From: Aivars Sterns Date: Tue, 20 May 2025 12:05:09 +0300 Subject: [PATCH 15/16] Fix typo --- api/v1alpha1/proxmoxmachine_types.go | 2 +- .../bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml | 2 +- ...infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml | 2 +- .../bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml | 2 +- ...infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index 3d6242b1..b167456b 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -245,7 +245,7 @@ type VirtualMachineCloneSpec struct { // Target node. Only allowed if the original VM is on shared storage. // +optional - // Deprecated: Use `AllowedNodes`` instead. + // Deprecated: Use `AllowedNodes` instead. Target *string `json:"target,omitempty"` } diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index 0569eb42..42b6b758 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -648,7 +648,7 @@ spec: target: description: |- Target node. Only allowed if the original VM is on shared storage. - Deprecated: Use `AllowedNodes`` instead. + Deprecated: Use `AllowedNodes` instead. type: string templateID: description: TemplateID the vm_template vmid used for cloning diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 32685c93..e8324705 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -689,7 +689,7 @@ spec: target: description: |- Target node. Only allowed if the original VM is on shared storage. - Deprecated: Use `AllowedNodes`` instead. + Deprecated: Use `AllowedNodes` instead. type: string templateID: description: TemplateID the vm_template vmid used diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index 61bec348..8bb5af1b 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -605,7 +605,7 @@ spec: target: description: |- Target node. Only allowed if the original VM is on shared storage. - Deprecated: Use `AllowedNodes`` instead. + Deprecated: Use `AllowedNodes` instead. type: string templateID: description: TemplateID the vm_template vmid used for cloning a new diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index a92fbf82..b1dd86bd 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -646,7 +646,7 @@ spec: target: description: |- Target node. Only allowed if the original VM is on shared storage. - Deprecated: Use `AllowedNodes`` instead. + Deprecated: Use `AllowedNodes` instead. type: string templateID: description: TemplateID the vm_template vmid used for cloning From 8f9ce93ed0de70fde5cd47fabf1f612e855a9e33 Mon Sep 17 00:00:00 2001 From: holmesb <5072156+holmesb@users.noreply.github.com> Date: Thu, 30 Oct 2025 08:29:02 +0000 Subject: [PATCH 16/16] Add multiple node PVE templates (with local storage) capability to cluster classes. --- docs/advanced-setups.md | 109 ++++++++++++++ templates/cluster-class-calico.yaml | 205 +++++++++++++++++++++----- templates/cluster-class-cilium.yaml | 205 +++++++++++++++++++++----- templates/cluster-class.yaml | 213 +++++++++++++++++++++++----- 4 files changed, 618 insertions(+), 114 deletions(-) diff --git a/docs/advanced-setups.md b/docs/advanced-setups.md index 5a7e7229..f190c456 100644 --- a/docs/advanced-setups.md +++ b/docs/advanced-setups.md @@ -197,6 +197,50 @@ For example, you can set the `TEMPLATE_TAGS="tag1,tag2"` environment variable. Y * Each node listed in the `allowedNodes` is expected to have a copy of the template stored locally. * If any of the nodes in `allowedNodes` is missing the template, the operation fails. +### Template lookup and cluster classes +With the ClusterClasses provided in this repo, in your cluster you can choose one of two ways to tell CAPMOX which +Proxmox VM template to clone from. 1) Selector mode (tag-based) — recommended for multi-node setups: +```yaml +spec: + topology: + variables: + - name: templateSelector + value: + matchTags: ["capmox","ubuntu-24.04","k8s-1.33"] +``` +This enables `templateSelector.matchTags`, and removes `sourceNode` and `templateID`. + +Or 2) Explicit mode (sourceNode/templateID) — default if templateSelector is not set. +```yaml +spec: + topology: + variables: + - name: cloneSpec + value: + machineSpec: + controlPlane: + sourceNode: pve1 + templateID: 100 + workerNode: + sourceNode: pve1 + templateID: 100 + loadBalancer: + sourceNode: pve1 + templateID: 100 + ``` + +If using local (not shared) storage on nodes, you can also set `localStorage: true` in the selector: +```yaml +spec: + topology: + variables: + - name: localStorage + value: true +``` +See earlier `localStorage` section for details. If using Explicit mode, `localStorage` is not applicable and should not +be set or leave the default `false`. In explicit mode you’re telling CAPMOX exactly which node and template ID to use. +That implicitly pins the clone to the node where that template resides, regardless of whether the backing storage is +local or shared. ## Proxmox RBAC with least privileges @@ -322,6 +366,71 @@ spec: You can set either `ipv4PoolRef` or `ipv6PoolRef` or you can also set them both for dual-stack. It's up for you also to manage the IP Pool, you can choose a `GlobalInClusterIPPool` or an `InClusterIPPool`. +## Additional Volumes +By default, only a boot volume is created in machines. If additional disks are required for data storage, they can be +specified in the ProxmoxMachineTemplates. + +```yaml +kind: ProxmoxMachineTemplate +spec: + template: + spec: + storage: local-lvm # Optional: a default storage to use when a volume doesn't set .storage + disks: + additionalVolumes: + - disk: scsi1 + sizeGb: 200 + - disk: scsi2 # target slot (e.g. scsi1, sata1, virtio1, ide2) + sizeGb: 80 # capacity in gigabytes + # Optional flags: + storage: my-nfs # Optional per-volume storage override. Uses .spec.template.spec.storage if omitted + format: qcow2 # Only specify if using file-backed storage. If omitted, default for disk is used. + discard: true + ioThread: true + ssd: true +``` +In the same way, additionalVolumes can also be specified in ProxmoxClusters, ProxmoxClusterTemplates, +and ProxmoxMachines. Flags: format, discard, ioThread, and ssd are supported by this provider. +See Proxmox [docs](https://pve.proxmox.com/pve-docs/qm.1.html#qm_hard_disk) for details about these flags. + +Alternatively if using cluster-class, define additionalVolumes in your cluster: +```yaml +kind: Cluster +spec: + topology: + class: proxmox-clusterclass-cilium-v0.1.0 + variables: + - name: workerAdditionalVolumes + value: + - { disk: scsi1, sizeGb: 80, storage: my-lvm } + - { disk: ide1, sizeGb: 80, storage: my-zfs } + - name: controlPlaneAdditionalVolumes + value: + - { disk: virtio1, sizeGb: 80, storage: my-zfs } + - name: loadBalancerAdditionalVolumes + value: + - { disk: sata1, sizeGb: 80, storage: my-nfs, format: qcow2 } +``` +To use the same storage for all machines of a given type, can specify a `Storage` variable and then omit `storage` +from the `workerAdditionalVolumes`. Eg for workers: +```yaml +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/proxmox-cluster-cni: cilium + name: capmox-cluster +spec: + topology: + class: proxmox-clusterclass-cilium-v0.1.0 + variables: + - name: workerStorage + value: my-lvm + - name: workerAdditionalVolumes + value: + - { disk: scsi1, sizeGb: 80 } + - { disk: scsi2, sizeGb: 80 } +``` + ## Notes * Clusters with IPV6 only is supported. diff --git a/templates/cluster-class-calico.yaml b/templates/cluster-class-calico.yaml index 83e80a70..4c05ac99 100644 --- a/templates/cluster-class-calico.yaml +++ b/templates/cluster-class-calico.yaml @@ -166,6 +166,29 @@ spec: mode: type: string enum: ["ipvs","iptables"] + - name: templateSelector + required: false + schema: + openAPIV3Schema: + description: When set, we use tag-based template selection (and drop sourceNode/templateID). + type: object + properties: + matchTags: + type: array + items: + type: string + minItems: 1 + required: + - matchTags + - name: localStorage + required: false + schema: + openAPIV3Schema: + type: boolean + default: false + description: > + If true, CAPMOX will schedule clones from a template on the same node where the VM is created (works with + templateSelector or explicit sourceNode/templateID). Defaults to false. - name: cloneSpec required: true schema: @@ -187,8 +210,6 @@ spec: properties: controlPlane: &machineSpec type: object - required: - - sourceNode properties: disks: type: object @@ -512,20 +533,162 @@ spec: path: /etc/kubernetes/admin.conf type: FileOrCreate name: kubeconfig + - name: ApplyLocalStorageFlag-ControlPlane + definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 kind: ProxmoxMachineTemplate matchResources: controlPlane: true jsonPatches: - - op: replace + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + + - name: ApplyLocalStorageFlag-Worker + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + + - name: ApplyLocalStorageFlag-LoadBalancer + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + +# (A) Selector mode: if templateSelector is provided, set it and REMOVE sourceNode/templateID + - name: TemplateSelectorMode-ControlPlane + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + - name: TemplateSelectorMode-Worker + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + - name: TemplateSelectorMode-LoadBalancer + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + + # (B) Explicit mode: when NO templateSelector, set sourceNode/templateID from cloneSpec + - name: ExplicitMode-ControlPlane + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add path: /spec/template/spec/sourceNode valueFrom: variable: cloneSpec.machineSpec.controlPlane.sourceNode - - op: replace + - op: add path: /spec/template/spec/templateID valueFrom: variable: cloneSpec.machineSpec.controlPlane.templateID + - name: ExplicitMode-Worker + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/sourceNode + valueFrom: + variable: cloneSpec.machineSpec.workerNode.sourceNode + - op: add + path: /spec/template/spec/templateID + valueFrom: + variable: cloneSpec.machineSpec.workerNode.templateID + - name: ExplicitMode-LoadBalancer + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/sourceNode + valueFrom: + variable: cloneSpec.machineSpec.loadBalancer.sourceNode + - op: add + path: /spec/template/spec/templateID + valueFrom: + variable: cloneSpec.machineSpec.loadBalancer.templateID - name: kube-proxy-setup description: "kube-proxy configuration" enabledIf: "{{ if eq .kubeProxy.mode \"ipvs\" }}true{{ end }}" @@ -573,23 +736,6 @@ spec: template: | - name: root sshAuthorizedKeys: {{ .cloneSpec.sshAuthorizedKeys }} - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: ProxmoxMachineTemplate - matchResources: - controlPlane: false - machineDeploymentClass: - names: - - proxmox-worker - jsonPatches: - - op: replace - path: /spec/template/spec/sourceNode - valueFrom: - variable: cloneSpec.machineSpec.workerNode.sourceNode - - op: replace - path: /spec/template/spec/templateID - valueFrom: - variable: cloneSpec.machineSpec.workerNode.templateID - name: LoadBalancerSetup description: "Configure LoadBalancer Node Initialisation" definitions: @@ -607,23 +753,6 @@ spec: template: | - name: root sshAuthorizedKeys: {{ .cloneSpec.sshAuthorizedKeys }} - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: ProxmoxMachineTemplate - matchResources: - controlPlane: false - machineDeploymentClass: - names: - - proxmox-loadbalancer - jsonPatches: - - op: replace - path: /spec/template/spec/sourceNode - valueFrom: - variable: cloneSpec.machineSpec.loadBalancer.sourceNode - - op: replace - path: /spec/template/spec/templateID - valueFrom: - variable: cloneSpec.machineSpec.loadBalancer.templateID - name: ControlPlaneNodeSockets description: "Configure Control Plane Sockets" enabledIf: "{{ if .cloneSpec.machineSpecs.controlPlane.numSockets }}true{{ end }}" diff --git a/templates/cluster-class-cilium.yaml b/templates/cluster-class-cilium.yaml index 7b696ae7..e8733f32 100644 --- a/templates/cluster-class-cilium.yaml +++ b/templates/cluster-class-cilium.yaml @@ -166,6 +166,29 @@ spec: mode: type: string enum: ["ipvs","iptables"] + - name: templateSelector + required: false + schema: + openAPIV3Schema: + description: When set, we use tag-based template selection (and drop sourceNode/templateID). + type: object + properties: + matchTags: + type: array + items: + type: string + minItems: 1 + required: + - matchTags + - name: localStorage + required: false + schema: + openAPIV3Schema: + type: boolean + default: false + description: > + If true, CAPMOX will schedule clones from a template on the same node where the VM is created (works with + templateSelector or explicit sourceNode/templateID). Defaults to false. - name: cloneSpec required: true schema: @@ -187,8 +210,6 @@ spec: properties: controlPlane: &machineSpec type: object - required: - - sourceNode properties: disks: type: object @@ -512,20 +533,162 @@ spec: path: /etc/kubernetes/admin.conf type: FileOrCreate name: kubeconfig + - name: ApplyLocalStorageFlag-ControlPlane + definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 kind: ProxmoxMachineTemplate matchResources: controlPlane: true jsonPatches: - - op: replace + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + + - name: ApplyLocalStorageFlag-Worker + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + + - name: ApplyLocalStorageFlag-LoadBalancer + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + +# (A) Selector mode: if templateSelector is provided, set it and REMOVE sourceNode/templateID + - name: TemplateSelectorMode-ControlPlane + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + - name: TemplateSelectorMode-Worker + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + - name: TemplateSelectorMode-LoadBalancer + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + + # (B) Explicit mode: when NO templateSelector, set sourceNode/templateID from cloneSpec + - name: ExplicitMode-ControlPlane + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add path: /spec/template/spec/sourceNode valueFrom: variable: cloneSpec.machineSpec.controlPlane.sourceNode - - op: replace + - op: add path: /spec/template/spec/templateID valueFrom: variable: cloneSpec.machineSpec.controlPlane.templateID + - name: ExplicitMode-Worker + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/sourceNode + valueFrom: + variable: cloneSpec.machineSpec.workerNode.sourceNode + - op: add + path: /spec/template/spec/templateID + valueFrom: + variable: cloneSpec.machineSpec.workerNode.templateID + - name: ExplicitMode-LoadBalancer + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/sourceNode + valueFrom: + variable: cloneSpec.machineSpec.loadBalancer.sourceNode + - op: add + path: /spec/template/spec/templateID + valueFrom: + variable: cloneSpec.machineSpec.loadBalancer.templateID - name: kube-proxy-setup description: "kube-proxy configuration" enabledIf: "{{ if eq .kubeProxy.mode \"ipvs\" }}true{{ end }}" @@ -573,23 +736,6 @@ spec: template: | - name: root sshAuthorizedKeys: {{ .cloneSpec.sshAuthorizedKeys }} - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: ProxmoxMachineTemplate - matchResources: - controlPlane: false - machineDeploymentClass: - names: - - proxmox-worker - jsonPatches: - - op: replace - path: /spec/template/spec/sourceNode - valueFrom: - variable: cloneSpec.machineSpec.workerNode.sourceNode - - op: replace - path: /spec/template/spec/templateID - valueFrom: - variable: cloneSpec.machineSpec.workerNode.templateID - name: LoadBalancerSetup description: "Configure LoadBalancer Node Initialisation" definitions: @@ -607,23 +753,6 @@ spec: template: | - name: root sshAuthorizedKeys: {{ .cloneSpec.sshAuthorizedKeys }} - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: ProxmoxMachineTemplate - matchResources: - controlPlane: false - machineDeploymentClass: - names: - - proxmox-loadbalancer - jsonPatches: - - op: replace - path: /spec/template/spec/sourceNode - valueFrom: - variable: cloneSpec.machineSpec.loadBalancer.sourceNode - - op: replace - path: /spec/template/spec/templateID - valueFrom: - variable: cloneSpec.machineSpec.loadBalancer.templateID - name: ControlPlaneNodeSockets description: "Configure Control Plane Sockets" enabledIf: "{{ if .cloneSpec.machineSpecs.controlPlane.numSockets }}true{{ end }}" diff --git a/templates/cluster-class.yaml b/templates/cluster-class.yaml index cecdd33d..10b21e27 100644 --- a/templates/cluster-class.yaml +++ b/templates/cluster-class.yaml @@ -136,6 +136,29 @@ spec: mode: type: string enum: ["ipvs","iptables"] + - name: templateSelector + required: false + schema: + openAPIV3Schema: + description: When set, we use tag-based template selection (and drop sourceNode/templateID). + type: object + properties: + matchTags: + type: array + items: + type: string + minItems: 1 + required: + - matchTags + - name: localStorage + required: false + schema: + openAPIV3Schema: + type: boolean + default: false + description: > + If true, CAPMOX will schedule clones from a template on the same node where the VM is created (works with + templateSelector or explicit sourceNode/templateID). Defaults to false. - name: cloneSpec required: true schema: @@ -157,8 +180,6 @@ spec: properties: controlPlane: &machineSpec type: object - required: - - sourceNode properties: disks: type: object @@ -482,20 +503,162 @@ spec: path: /etc/kubernetes/admin.conf type: FileOrCreate name: kubeconfig + - name: ApplyLocalStorageFlag-ControlPlane + definitions: - selector: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 kind: ProxmoxMachineTemplate matchResources: controlPlane: true jsonPatches: - - op: replace + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + + - name: ApplyLocalStorageFlag-Worker + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + + - name: ApplyLocalStorageFlag-LoadBalancer + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/localStorage + valueFrom: + template: '{{ .localStorage | default false }}' + +# (A) Selector mode: if templateSelector is provided, set it and REMOVE sourceNode/templateID + - name: TemplateSelectorMode-ControlPlane + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + - name: TemplateSelectorMode-Worker + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + - name: TemplateSelectorMode-LoadBalancer + enabledIf: '{{ if .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/templateSelector + valueFrom: + variable: templateSelector + - op: remove + path: /spec/template/spec/sourceNode + - op: remove + path: /spec/template/spec/templateID + + # (B) Explicit mode: when NO templateSelector, set sourceNode/templateID from cloneSpec + - name: ExplicitMode-ControlPlane + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add path: /spec/template/spec/sourceNode valueFrom: variable: cloneSpec.machineSpec.controlPlane.sourceNode - - op: replace + - op: add path: /spec/template/spec/templateID valueFrom: variable: cloneSpec.machineSpec.controlPlane.templateID + - name: ExplicitMode-Worker + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-worker + jsonPatches: + - op: add + path: /spec/template/spec/sourceNode + valueFrom: + variable: cloneSpec.machineSpec.workerNode.sourceNode + - op: add + path: /spec/template/spec/templateID + valueFrom: + variable: cloneSpec.machineSpec.workerNode.templateID + - name: ExplicitMode-LoadBalancer + enabledIf: '{{ if not .templateSelector }}true{{ end }}' + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + matchResources: + machineDeploymentClass: + names: + - proxmox-loadbalancer + jsonPatches: + - op: add + path: /spec/template/spec/sourceNode + valueFrom: + variable: cloneSpec.machineSpec.loadBalancer.sourceNode + - op: add + path: /spec/template/spec/templateID + valueFrom: + variable: cloneSpec.machineSpec.loadBalancer.templateID - name: kube-proxy-setup description: "kube-proxy configuration" enabledIf: "{{ if eq .kubeProxy.mode \"ipvs\" }}true{{ end }}" @@ -543,23 +706,6 @@ spec: template: | - name: root sshAuthorizedKeys: {{ .cloneSpec.sshAuthorizedKeys }} - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: ProxmoxMachineTemplate - matchResources: - controlPlane: false - machineDeploymentClass: - names: - - proxmox-worker - jsonPatches: - - op: replace - path: /spec/template/spec/sourceNode - valueFrom: - variable: cloneSpec.machineSpec.workerNode.sourceNode - - op: replace - path: /spec/template/spec/templateID - valueFrom: - variable: cloneSpec.machineSpec.workerNode.templateID - name: LoadBalancerSetup description: "Configure LoadBalancer Node Initialisation" definitions: @@ -577,23 +723,6 @@ spec: template: | - name: root sshAuthorizedKeys: {{ .cloneSpec.sshAuthorizedKeys }} - - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: ProxmoxMachineTemplate - matchResources: - controlPlane: false - machineDeploymentClass: - names: - - proxmox-loadbalancer - jsonPatches: - - op: replace - path: /spec/template/spec/sourceNode - valueFrom: - variable: cloneSpec.machineSpec.loadBalancer.sourceNode - - op: replace - path: /spec/template/spec/templateID - valueFrom: - variable: cloneSpec.machineSpec.loadBalancer.templateID - name: ControlPlaneNodeSockets description: "Configure Control Plane Sockets" enabledIf: "{{ if .cloneSpec.machineSpecs.controlPlane.numSockets }}true{{ end }}" @@ -948,6 +1077,10 @@ spec: templateID: 100 format: qcow2 full: true + network: + default: + bridge: ${BRIDGE} + model: virtio --- kind: ProxmoxMachineTemplate apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 @@ -960,6 +1093,10 @@ spec: templateID: 100 format: qcow2 full: true + network: + default: + bridge: ${BRIDGE} + model: virtio --- kind: KubeadmConfigTemplate apiVersion: bootstrap.cluster.x-k8s.io/v1beta1