diff --git a/Makefile b/Makefile index db3b3b8..b893523 100644 --- a/Makefile +++ b/Makefile @@ -184,7 +184,7 @@ KIND ?= kind KUSTOMIZE ?= $(LOCALBIN)/kustomize CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen ENVTEST ?= $(LOCALBIN)/setup-envtest -GOLANGCI_LINT = $(LOCALBIN)/golangci-lint +GOLANGCI_LINT ?= $(LOCALBIN)/golangci-lint ## Tool Versions KUSTOMIZE_VERSION ?= v5.8.1 diff --git a/api/v1alpha1/valkeycluster_types.go b/api/v1alpha1/valkeycluster_types.go index 36e0dcc..d77f8fa 100644 --- a/api/v1alpha1/valkeycluster_types.go +++ b/api/v1alpha1/valkeycluster_types.go @@ -38,6 +38,15 @@ const ( ClusterStateFailed ClusterState = "Failed" ) +// This list defines specific Valkey server configuration parameters that cannot +// be overridden by a user-supplied configuration within the CR. Doing so would +// potentially break the operator's behavior, which could result in data loss, or +// a non-functioning cluster +var NonUserOverrideConfigParameters = []string{ + "cluster-enabled", + "aclfile", +} + // ValkeyClusterSpec defines the desired state of ValkeyCluster. type ValkeyClusterSpec struct { @@ -88,6 +97,10 @@ type ValkeyClusterSpec struct { // Additional containers or overrides for existing containers, applied using strategic merge patch // +optional Containers []corev1.Container `json:"containers,omitempty"` + + // Additional Valkey configuration parameters + // +optional + Config map[string]string `json:"config,omitempty"` } type ExporterSpec struct { diff --git a/api/v1alpha1/valkeynode_types.go b/api/v1alpha1/valkeynode_types.go index ef2f397..76cfe75 100644 --- a/api/v1alpha1/valkeynode_types.go +++ b/api/v1alpha1/valkeynode_types.go @@ -65,9 +65,10 @@ type ValkeyNodeSpec struct { // +optional Exporter ExporterSpec `json:"exporter,omitempty"` - // ScriptsConfigMapName specifies the name of the ConfigMap that contains the scripts for the ValkeyNode. + // UsersConfigMapName specifies the name of the ConfigMap that contains the + // scripts, and Valkey config for the ValkeyNode. // +optional - ScriptsConfigMapName string `json:"scriptsConfigMapName,omitempty"` + UsersConfigMapName string `json:"usersConfigMapName,omitempty"` // UsersACLSecretName is the name of the Secret containing the ACL user // file. When set, mounts a users-acl volume from this Secret so the diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index cf5c6ba..46a9466 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -253,6 +253,13 @@ func (in *ValkeyClusterSpec) DeepCopyInto(out *ValkeyClusterSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValkeyClusterSpec. diff --git a/config/crd/bases/valkey.io_valkeyclusters.yaml b/config/crd/bases/valkey.io_valkeyclusters.yaml index 5475fc1..575b7f2 100644 --- a/config/crd/bases/valkey.io_valkeyclusters.yaml +++ b/config/crd/bases/valkey.io_valkeyclusters.yaml @@ -971,6 +971,11 @@ spec: x-kubernetes-list-type: atomic type: object type: object + config: + additionalProperties: + type: string + description: Additional Valkey configuration parameters + type: object containers: description: Additional containers or overrides for existing containers, applied using strategic merge patch diff --git a/config/crd/bases/valkey.io_valkeynodes.yaml b/config/crd/bases/valkey.io_valkeynodes.yaml index c2adfa1..5ba737a 100644 --- a/config/crd/bases/valkey.io_valkeynodes.yaml +++ b/config/crd/bases/valkey.io_valkeynodes.yaml @@ -2639,10 +2639,6 @@ spec: More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object - scriptsConfigMapName: - description: ScriptsConfigMapName specifies the name of the ConfigMap - that contains the scripts for the ValkeyNode. - type: string tolerations: description: Tolerations defines the pod tolerations. items: @@ -2689,6 +2685,11 @@ spec: file. When set, mounts a users-acl volume from this Secret so the container can load aclfile /config/users/users.acl. type: string + usersConfigMapName: + description: |- + UsersConfigMapName specifies the name of the ConfigMap that contains the + scripts, and Valkey config for the ValkeyNode. + type: string workloadType: default: StatefulSet description: |- diff --git a/config/samples/v1alpha1_valkeycluster.yaml b/config/samples/v1alpha1_valkeycluster.yaml index 6fa0e24..dd4292f 100644 --- a/config/samples/v1alpha1_valkeycluster.yaml +++ b/config/samples/v1alpha1_valkeycluster.yaml @@ -5,6 +5,9 @@ metadata: spec: shards: 3 replicas: 1 + config: + maxmemory: 50mb + maxmemory-policy: allkeys-lfu resources: requests: memory: "256Mi" diff --git a/go.mod b/go.mod index 1975003..1b0c09b 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( k8s.io/apimachinery v0.35.0 k8s.io/client-go v0.35.0 sigs.k8s.io/controller-runtime v0.23.3 + sigs.k8s.io/yaml v1.6.0 ) require ( @@ -98,5 +99,4 @@ require ( sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect - sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/internal/controller/config.go b/internal/controller/config.go new file mode 100644 index 0000000..1003283 --- /dev/null +++ b/internal/controller/config.go @@ -0,0 +1,224 @@ +/* +Copyright 2025 Valkey Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "crypto/sha256" + "embed" + "fmt" + "maps" + "slices" + "strings" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + logf "sigs.k8s.io/controller-runtime/pkg/log" + valkeyiov1alpha1 "valkey.io/valkey-operator/api/v1alpha1" +) + +const ( + scriptsHashKey = "valkey.io/script-hash" + configHashKey = "valkey.io/config-hash" + configFileKey = "valkey.conf" + + readinessScriptKey = "readiness-check.sh" + livenessScriptKey = "liveness-check.sh" + + // This hash should be updated whenever the contents of either script changes, which would + // coincide with operator version bump. + // $ cat internal/controller/scripts/{liveness-check.sh,readiness-check.sh} | sha256sum + scriptsHash = "8531132f52ac311772dfcb45c107c34ab05e719a0df644cc332512277b564346" + + // Average-ish length of Valkey parameter + value + averageParameterLength = 20 +) + +//go:embed scripts/* +var scripts embed.FS + +func getConfigMapName(clusterName string) string { + return clusterName + "-config" +} + +// Return a base config of parameters that users shouldn't be able to override +func getBaseConfig() string { + return `# Base operator config +cluster-enabled yes +protected-mode no +cluster-node-timeout 2000 +aclfile /config/users/users.acl +` +} + +func getUserConfig(ctx context.Context, cluster *valkeyiov1alpha1.ValkeyCluster) string { + + specConfig := cluster.Spec.Config + + // Exit early if nothing + if len(specConfig) == 0 { + return "" + } + + log := logf.FromContext(ctx) + + // Build the config + var configBuilder strings.Builder + configBuilder.Grow(len(specConfig) * averageParameterLength) + writeConfigLine(&configBuilder, "#", "Extra Config") + + // Sort the config keys to keep consistent processing order + sortedKeys := slices.Sorted(maps.Keys(specConfig)) + + for _, param := range sortedKeys { + + if slices.Contains(valkeyiov1alpha1.NonUserOverrideConfigParameters, param) { + log.Error(nil, "Prohibited valkey server config", "parameter", param) + continue + } + + writeConfigLine(&configBuilder, param, specConfig[param]) + } + + return configBuilder.String() +} + +// Create or update a default valkey.conf +// If additional config is provided, append to the default map +func (r *ValkeyClusterReconciler) upsertConfigMap(ctx context.Context, cluster *valkeyiov1alpha1.ValkeyCluster) error { + + log := logf.FromContext(ctx) + + // Embed readiness check script + readiness, err := scripts.ReadFile("scripts/readiness-check.sh") + if err != nil { + return fmt.Errorf("reading embedded readiness-check.sh: %w", err) + } + + // Embed liveness check script + liveness, err := scripts.ReadFile("scripts/liveness-check.sh") + if err != nil { + return fmt.Errorf("reading embedded liveness-check.sh: %w", err) + } + + // Get base config + var newConfigBuilder strings.Builder + newConfigBuilder.WriteString(getBaseConfig()) + + // User-provided config from spec + newConfigBuilder.WriteString(getUserConfig(ctx, cluster)) + + // Final string version of the config + newServerConfig := newConfigBuilder.String() + + // Calculate hash of constructed configMap contents (ie: updated scripts, changed/added parameters) + newServerConfigHash := fmt.Sprintf("%x", sha256.Sum256([]byte(newServerConfig))) + + // Look for, and fetch existing configMap for this cluster + serverConfigMapName := getConfigMapName(cluster.Name) + serverConfigMap := &corev1.ConfigMap{} + if err := r.Get(ctx, types.NamespacedName{ + Name: serverConfigMapName, + Namespace: cluster.Namespace, + }, serverConfigMap); err != nil { + if !apierrors.IsNotFound(err) { + log.Error(err, "failed to fetch server configmap") + return err + } + + // ConfigMap not found; This happens on cluster init + log.V(2).Info("creating server configMap", "name", serverConfigMapName) + + // Create configMap object with contents + serverConfigMap.ObjectMeta = metav1.ObjectMeta{ + Name: serverConfigMapName, + Namespace: cluster.Namespace, + Labels: labels(cluster), + Annotations: map[string]string{ + configHashKey: newServerConfigHash, + scriptsHashKey: scriptsHash, + }, + } + serverConfigMap.Data = map[string]string{ + readinessScriptKey: string(readiness), + livenessScriptKey: string(liveness), + configFileKey: newServerConfig, + } + + // Register ownership of the configMap + if err := controllerutil.SetControllerReference(cluster, serverConfigMap, r.Scheme); err != nil { + log.Error(err, "Failed to grab ownership of server configMap") + r.Recorder.Eventf(cluster, nil, corev1.EventTypeWarning, "ConfigMapCreationFailed", "UpsertConfigMap", "Failed to grab ownership of server configMap: %v", err) + return err + } + + // Create the configMap + if err := r.Create(ctx, serverConfigMap); err != nil { + log.Error(err, "Failed to create server configMap") + r.Recorder.Eventf(cluster, nil, corev1.EventTypeWarning, "ConfigMapCreationFailed", "UpsertConfigMap", "Failed to create server configMap: %v", err) + return err + } + + r.Recorder.Eventf(cluster, nil, corev1.EventTypeNormal, "ConfigMapCreated", "UpsertConfigMap", "Created server configMap") + + // All good; new configMap with contents created + return nil + } + + // ConfigMap exists + + // Compare scripts hash in existing configMap to const value in operator; update scripts contents if different + updatedScripts := upsertAnnotation(serverConfigMap, scriptsHashKey, scriptsHash) + if updatedScripts { + log.V(1).Info("updated readiness, and liveness scripts") + serverConfigMap.Data[readinessScriptKey] = string(readiness) + serverConfigMap.Data[livenessScriptKey] = string(liveness) + } + + // If the generated config contents hash (from above) matches the hash of the current + // config contents, and we did not update the scripts contents, exit early + if !updatedScripts && !upsertAnnotation(serverConfigMap, configHashKey, newServerConfigHash) { + log.V(1).Info("server config unchanged") + return nil + } + + // Update the configMap with the generated config contents + serverConfigMap.Data[configFileKey] = newServerConfig + + // Update + if err := r.Update(ctx, serverConfigMap); err != nil { + log.Error(err, "Failed to update server configMap") + r.Recorder.Eventf(cluster, nil, corev1.EventTypeWarning, "ConfigMapUpdateFailed", "UpsertConfigMap", "Failed to update server configMap: %v", err) + return err + } + + r.Recorder.Eventf(cluster, nil, corev1.EventTypeNormal, "ConfigMapUpdated", "UpsertConfigMap", "Synchronized server configMap") + + // All is good. configMap was updated with new contents. + return nil +} + +// Helper function to write a config line in the form of "parameter value\n" to a strings.Builder +func writeConfigLine(builder *strings.Builder, name, value string) { + builder.WriteString(name) + builder.WriteString(" ") + builder.WriteString(value) + builder.WriteString("\n") +} diff --git a/internal/controller/config_test.go b/internal/controller/config_test.go new file mode 100644 index 0000000..83c6b1e --- /dev/null +++ b/internal/controller/config_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2025 Valkey Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "os" + "strings" + "testing" + + "sigs.k8s.io/yaml" + valkeyiov1alpha1 "valkey.io/valkey-operator/api/v1alpha1" +) + +// getSampleCluster parses the v1alpha1_valkeycluster.yaml sample file +// and returns the ValkeyCluster object. +func getSampleCluster(t *testing.T) *valkeyiov1alpha1.ValkeyCluster { + valkeyClusterCR := "../../config/samples/v1alpha1_valkeycluster.yaml" + + data, err := os.ReadFile(valkeyClusterCR) + if err != nil { + t.Fatalf("failed to read sample YAML file: %v", err) + } + + cluster := &valkeyiov1alpha1.ValkeyCluster{} + if err := yaml.Unmarshal(data, cluster); err != nil { + t.Fatalf("failed to unmarshal sample YAML: %v", err) + } + + return cluster +} + +func TestGetUserConfig(t *testing.T) { + + ctx := t.Context() + cluster := getSampleCluster(t) + + baseConfig := getBaseConfig() + baseConfigLen := len(baseConfig) + if baseConfigLen != 119 { + t.Fatalf("unexpected base config length: got %d, want 118", baseConfigLen) + } + + userConfig := getUserConfig(ctx, cluster) + if !strings.Contains(userConfig, "maxmemory-policy") { + t.Fatalf("unexpected user config: missing 'maxmemory-policy'") + } +} diff --git a/internal/controller/utils.go b/internal/controller/utils.go index 5903cc8..97a1580 100644 --- a/internal/controller/utils.go +++ b/internal/controller/utils.go @@ -106,7 +106,7 @@ func annotations(cluster *valkeyv1.ValkeyCluster) map[string]string { } // This function takes a K8S object reference (eg: pod, secret, configmap, etc), -// and a map of annotations to add to, or replace existing, within the object. +// and a key, and value to add to, or replace an existing, annotation within the object. // Returns true if the annotation was added, or updated func upsertAnnotation(o metav1.Object, key string, val string) bool { diff --git a/internal/controller/utils_test.go b/internal/controller/utils_test.go index cd78667..a9e02d0 100644 --- a/internal/controller/utils_test.go +++ b/internal/controller/utils_test.go @@ -72,6 +72,14 @@ func TestAnnotations(t *testing.T) { } } +func TestConfigMapName(t *testing.T) { + testMapName := "test-resource-config" + result := getConfigMapName("test-resource") + if result != testMapName { + t.Errorf("Expected '%v', got '%v'", testMapName, result) + } +} + func TestNodeRoleAndShard(t *testing.T) { nodes := &valkeyv1.ValkeyNodeList{ Items: []valkeyv1.ValkeyNode{ diff --git a/internal/controller/valkeycluster_controller.go b/internal/controller/valkeycluster_controller.go index 4458234..736c8fb 100644 --- a/internal/controller/valkeycluster_controller.go +++ b/internal/controller/valkeycluster_controller.go @@ -18,7 +18,6 @@ package controller import ( "context" - "embed" "errors" "fmt" "slices" @@ -64,9 +63,6 @@ type ValkeyClusterReconciler struct { Recorder events.EventRecorder } -//go:embed scripts/* -var scripts embed.FS - // +kubebuilder:rbac:groups=valkey.io,resources=valkeyclusters,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=valkey.io,resources=valkeyclusters/status,verbs=get;update;patch // +kubebuilder:rbac:groups=valkey.io,resources=valkeyclusters/finalizers,verbs=update @@ -362,46 +358,6 @@ func (r *ValkeyClusterReconciler) upsertService(ctx context.Context, cluster *va return nil } -// Create or update a basic valkey.conf -func (r *ValkeyClusterReconciler) upsertConfigMap(ctx context.Context, cluster *valkeyiov1alpha1.ValkeyCluster) error { - readiness, err := scripts.ReadFile("scripts/readiness-check.sh") - if err != nil { - return err - } - liveness, err := scripts.ReadFile("scripts/liveness-check.sh") - if err != nil { - return err - } - - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: cluster.Name, - Namespace: cluster.Namespace, - }, - } - result, err := controllerutil.CreateOrUpdate(ctx, r.Client, cm, func() error { - cm.Labels = labels(cluster) - cm.Data = map[string]string{ - "readiness-check.sh": string(readiness), - "liveness-check.sh": string(liveness), - "valkey.conf": ` -cluster-enabled yes -protected-mode no -cluster-node-timeout 2000 -aclfile /config/users/users.acl`, - } - return controllerutil.SetControllerReference(cluster, cm, r.Scheme) - }) - if err != nil { - r.Recorder.Eventf(cluster, cm, corev1.EventTypeWarning, "ConfigMapUpdateFailed", "UpdateConfigMap", "Failed to upsert ConfigMap: %v", err) - return err - } - if result == controllerutil.OperationResultCreated { - r.Recorder.Eventf(cluster, cm, corev1.EventTypeNormal, "ConfigMapCreated", "CreateConfigMap", "Created ConfigMap with configuration") - } - return nil -} - // reconcileValkeyNodes ensures every (shard, nodeIndex) pair has a ValkeyNode CR. // Each ValkeyNode manages exactly one Pod (Replicas=1) and is named // deterministically: @@ -524,16 +480,16 @@ func buildClusterValkeyNode(cluster *valkeyiov1alpha1.ValkeyCluster, shardIndex Labels: l, }, Spec: valkeyiov1alpha1.ValkeyNodeSpec{ - Image: cluster.Spec.Image, - WorkloadType: cluster.Spec.WorkloadType, - Resources: cluster.Spec.Resources, - NodeSelector: cluster.Spec.NodeSelector, - Affinity: cluster.Spec.Affinity, - Tolerations: cluster.Spec.Tolerations, - Exporter: cluster.Spec.Exporter, - Containers: cluster.Spec.Containers, - ScriptsConfigMapName: cluster.Name, - UsersACLSecretName: getInternalSecretName(cluster.Name), + Image: cluster.Spec.Image, + WorkloadType: cluster.Spec.WorkloadType, + Resources: cluster.Spec.Resources, + NodeSelector: cluster.Spec.NodeSelector, + Affinity: cluster.Spec.Affinity, + Tolerations: cluster.Spec.Tolerations, + Exporter: cluster.Spec.Exporter, + Containers: cluster.Spec.Containers, + UsersConfigMapName: getConfigMapName(cluster.Name), + UsersACLSecretName: getInternalSecretName(cluster.Name), }, } } diff --git a/internal/controller/valkeycluster_controller_test.go b/internal/controller/valkeycluster_controller_test.go index d7a7612..770e6c5 100644 --- a/internal/controller/valkeycluster_controller_test.go +++ b/internal/controller/valkeycluster_controller_test.go @@ -325,7 +325,6 @@ var _ = Describe("EventRecorder", func() { events := collectEvents(fakeRecorder) Expect(events).To(ContainElement(ContainSubstring("ConfigMapCreated"))) Expect(events).To(ContainElement(ContainSubstring("Normal"))) - Expect(events).To(ContainElement(ContainSubstring("Created ConfigMap with configuration"))) }) }) diff --git a/internal/controller/valkeynode_controller.go b/internal/controller/valkeynode_controller.go index 7d26c11..9092e59 100644 --- a/internal/controller/valkeynode_controller.go +++ b/internal/controller/valkeynode_controller.go @@ -153,11 +153,11 @@ func (r *ValkeyNodeReconciler) ensureDeployment(ctx context.Context, node *valke } // ensureConfigMap creates or updates the ConfigMap for the ValkeyNode. -// If ScriptsConfigMapName is set, the ConfigMap is assumed to +// If UsersConfigMapName is set, the ConfigMap is assumed to // be managed externally and this step is skipped. func (r *ValkeyNodeReconciler) ensureConfigMap(ctx context.Context, node *valkeyiov1alpha1.ValkeyNode) error { log := logf.FromContext(ctx) - if node.Spec.ScriptsConfigMapName != "" { + if node.Spec.UsersConfigMapName != "" { // ConfigMap is provided externally (e.g. by ValkeyCluster), skip creation. return nil } diff --git a/internal/controller/valkeynode_controller_test.go b/internal/controller/valkeynode_controller_test.go index 6a271f5..92dab7a 100644 --- a/internal/controller/valkeynode_controller_test.go +++ b/internal/controller/valkeynode_controller_test.go @@ -45,10 +45,14 @@ var _ = Describe("ValkeyNode Controller", func() { Name: resourceName, Namespace: "default", } - childName := types.NamespacedName{ + statefulSetName := types.NamespacedName{ Name: "valkey-" + resourceName, Namespace: "default", } + configName := types.NamespacedName{ + Name: getConfigMapName(resourceName), + Namespace: "default", + } BeforeEach(func() { By("creating the custom resource for the Kind ValkeyNode") @@ -71,11 +75,11 @@ var _ = Describe("ValkeyNode Controller", func() { AfterEach(func() { // Delete owned resources explicitly — envtest does not run garbage collection. cm := &corev1.ConfigMap{} - if err := k8sClient.Get(ctx, childName, cm); err == nil { + if err := k8sClient.Get(ctx, configName, cm); err == nil { Expect(k8sClient.Delete(ctx, cm)).To(Succeed()) } sts := &appsv1.StatefulSet{} - if err := k8sClient.Get(ctx, childName, sts); err == nil { + if err := k8sClient.Get(ctx, statefulSetName, sts); err == nil { Expect(k8sClient.Delete(ctx, sts)).To(Succeed()) } @@ -97,14 +101,14 @@ var _ = Describe("ValkeyNode Controller", func() { By("verifying the ConfigMap was created with probe scripts") cm := &corev1.ConfigMap{} - Expect(k8sClient.Get(ctx, childName, cm)).To(Succeed()) + Expect(k8sClient.Get(ctx, configName, cm)).To(Succeed()) Expect(cm.Data).To(HaveKey("valkey.conf")) Expect(cm.Data).To(HaveKey("liveness-check.sh")) Expect(cm.Data).To(HaveKey("readiness-check.sh")) By("verifying the StatefulSet was created with correct labels") sts := &appsv1.StatefulSet{} - Expect(k8sClient.Get(ctx, childName, sts)).To(Succeed()) + Expect(k8sClient.Get(ctx, statefulSetName, sts)).To(Succeed()) Expect(sts.Spec.Template.Labels).To(HaveKeyWithValue("app.kubernetes.io/instance", resourceName)) Expect(sts.Spec.Template.Labels).To(HaveKeyWithValue("app.kubernetes.io/component", "valkey-node")) }) @@ -158,7 +162,7 @@ var _ = Describe("ValkeyNode Controller", func() { By("capturing ResourceVersion after first reconcile") sts := &appsv1.StatefulSet{} - Expect(k8sClient.Get(ctx, childName, sts)).To(Succeed()) + Expect(k8sClient.Get(ctx, statefulSetName, sts)).To(Succeed()) rvAfterFirst := sts.ResourceVersion By("second reconcile with no changes") @@ -167,7 +171,7 @@ var _ = Describe("ValkeyNode Controller", func() { By("verifying ResourceVersion is unchanged") sts2 := &appsv1.StatefulSet{} - Expect(k8sClient.Get(ctx, childName, sts2)).To(Succeed()) + Expect(k8sClient.Get(ctx, statefulSetName, sts2)).To(Succeed()) Expect(sts2.ResourceVersion).To(Equal(rvAfterFirst), "StatefulSet should not be updated when nothing changed") }) }) @@ -177,8 +181,18 @@ var _ = Describe("ValkeyNode Controller", func() { ctx := context.Background() - typeNamespacedName := types.NamespacedName{Name: resourceName, Namespace: "default"} - childName := types.NamespacedName{Name: "valkey-" + resourceName, Namespace: "default"} + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + childName := types.NamespacedName{ + Name: "valkey-" + resourceName, + Namespace: "default", + } + configName := types.NamespacedName{ + Name: getConfigMapName(resourceName), + Namespace: "default", + } BeforeEach(func() { node := &valkeyiov1alpha1.ValkeyNode{} @@ -195,7 +209,7 @@ var _ = Describe("ValkeyNode Controller", func() { AfterEach(func() { cm := &corev1.ConfigMap{} - if err := k8sClient.Get(ctx, childName, cm); err == nil { + if err := k8sClient.Get(ctx, configName, cm); err == nil { Expect(k8sClient.Delete(ctx, cm)).To(Succeed()) } deploy := &appsv1.Deployment{} diff --git a/internal/controller/valkeynode_resources.go b/internal/controller/valkeynode_resources.go index d8b5731..cdd0760 100644 --- a/internal/controller/valkeynode_resources.go +++ b/internal/controller/valkeynode_resources.go @@ -51,7 +51,7 @@ func valkeyNodeLabels(node *valkeyiov1alpha1.ValkeyNode) map[string]string { // buildValkeyNodeConfigMap builds a ConfigMap containing the embedded liveness // and readiness probe scripts, plus an empty valkey.conf. -// The ConfigMap is named after valkeyNodeResourceName(node). +// The ConfigMap is named via config.go:getConfigMapName(node). func buildValkeyNodeConfigMap(node *valkeyiov1alpha1.ValkeyNode) (*corev1.ConfigMap, error) { liveness, err := scripts.ReadFile("scripts/liveness-check.sh") if err != nil { @@ -64,7 +64,7 @@ func buildValkeyNodeConfigMap(node *valkeyiov1alpha1.ValkeyNode) (*corev1.Config return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: valkeyNodeResourceName(node), + Name: getConfigMapName(node.Name), Namespace: node.Namespace, Labels: valkeyNodeLabels(node), }, @@ -232,9 +232,9 @@ func buildValkeyNodePodTemplateSpec(node *valkeyiov1alpha1.ValkeyNode, labels ma // Use the explicitly provided ConfigMap name, or fall back to the default // resource name (which the controller creates automatically). - configMapName := node.Spec.ScriptsConfigMapName + configMapName := node.Spec.UsersConfigMapName if configMapName == "" { - configMapName = valkeyNodeResourceName(node) + configMapName = getConfigMapName(node.Name) } podSpec := corev1.PodSpec{ diff --git a/internal/controller/valkeynode_resources_test.go b/internal/controller/valkeynode_resources_test.go index 2595015..7319f02 100644 --- a/internal/controller/valkeynode_resources_test.go +++ b/internal/controller/valkeynode_resources_test.go @@ -37,8 +37,8 @@ func newTestValkeyNode(name, namespace string) *valkeyv1.ValkeyNode { Namespace: namespace, }, Spec: valkeyv1.ValkeyNodeSpec{ - Image: "valkey/valkey:9.0.0", - ScriptsConfigMapName: "valkey-scripts", + Image: "valkey/valkey:9.0.0", + UsersConfigMapName: "valkey-config", }, } } @@ -119,10 +119,10 @@ func TestBuildValkeyNodePodTemplateSpec(t *testing.T) { // Volumes require.Len(t, pts.Spec.Volumes, 2) assert.Equal(t, "scripts", pts.Spec.Volumes[0].Name) - assert.Equal(t, "valkey-scripts", pts.Spec.Volumes[0].ConfigMap.Name) + assert.Equal(t, getConfigMapName("valkey"), pts.Spec.Volumes[0].ConfigMap.Name) assert.Equal(t, int32(0755), *pts.Spec.Volumes[0].ConfigMap.DefaultMode) assert.Equal(t, "valkey-conf", pts.Spec.Volumes[1].Name) - assert.Equal(t, "valkey-scripts", pts.Spec.Volumes[1].ConfigMap.Name) + assert.Equal(t, getConfigMapName("valkey"), pts.Spec.Volumes[1].ConfigMap.Name) } func TestBuildValkeyNodeDeployment(t *testing.T) { @@ -282,7 +282,7 @@ func TestBuildValkeyNodeConfigMap(t *testing.T) { cm, err := buildValkeyNodeConfigMap(node) require.NoError(t, err) - assert.Equal(t, "valkey-mynode", cm.Name) + assert.Equal(t, getConfigMapName("mynode"), cm.Name) assert.Equal(t, "test-ns", cm.Namespace) assert.Equal(t, valkeyNodeLabels(node), cm.Labels) @@ -293,21 +293,21 @@ func TestBuildValkeyNodeConfigMap(t *testing.T) { } func TestBuildValkeyNodePodTemplateSpec_ConfigMapNameFallback(t *testing.T) { - t.Run("uses ScriptsConfigMapName when set", func(t *testing.T) { - node := newTestValkeyNode("mynode", "test-ns") // ScriptsConfigMapName = "valkey-scripts" + t.Run("uses UsersConfigMapName when set", func(t *testing.T) { + node := newTestValkeyNode("mynode", "test-ns") // UsersConfigMapName = "valkey-config" pts, err := buildValkeyNodePodTemplateSpec(node, valkeyNodeLabels(node)) require.NoError(t, err) - assert.Equal(t, "valkey-scripts", pts.Spec.Volumes[0].ConfigMap.Name) - assert.Equal(t, "valkey-scripts", pts.Spec.Volumes[1].ConfigMap.Name) + assert.Equal(t, "valkey-config", pts.Spec.Volumes[0].ConfigMap.Name) + assert.Equal(t, "valkey-config", pts.Spec.Volumes[1].ConfigMap.Name) }) - t.Run("falls back to resource name when ScriptsConfigMapName is empty", func(t *testing.T) { + t.Run("falls back to resource name when UsersConfigMapName is empty", func(t *testing.T) { node := newTestValkeyNode("mynode", "test-ns") - node.Spec.ScriptsConfigMapName = "" + node.Spec.UsersConfigMapName = "" pts, err := buildValkeyNodePodTemplateSpec(node, valkeyNodeLabels(node)) require.NoError(t, err) - assert.Equal(t, "valkey-mynode", pts.Spec.Volumes[0].ConfigMap.Name) - assert.Equal(t, "valkey-mynode", pts.Spec.Volumes[1].ConfigMap.Name) + assert.Equal(t, "mynode-config", pts.Spec.Volumes[0].ConfigMap.Name) + assert.Equal(t, "mynode-config", pts.Spec.Volumes[1].ConfigMap.Name) }) } @@ -600,7 +600,7 @@ func TestBuildClusterValkeyNode_PropagatesSpecFields(t *testing.T) { assert.Equal(t, cluster.Spec.Tolerations, node.Spec.Tolerations, "Tolerations must be propagated") assert.Equal(t, cluster.Spec.Exporter, node.Spec.Exporter, "Exporter must be propagated") assert.Equal(t, cluster.Spec.Containers, node.Spec.Containers, "Containers must be propagated") - assert.Equal(t, cluster.Name, node.Spec.ScriptsConfigMapName, "ScriptsConfigMapName must be the cluster name") + assert.Equal(t, getConfigMapName(cluster.Name), node.Spec.UsersConfigMapName, "UsersConfigMapName must match configmap name") assert.Equal(t, getInternalSecretName(cluster.Name), node.Spec.UsersACLSecretName, "UsersACLSecretName must match internal secret name") } diff --git a/test/e2e/valkeycluster_test.go b/test/e2e/valkeycluster_test.go index 2365559..fc49460 100644 --- a/test/e2e/valkeycluster_test.go +++ b/test/e2e/valkeycluster_test.go @@ -76,7 +76,7 @@ var _ = Describe("ValkeyCluster", Ordered, func() { By("validating the ConfigMap") verifyConfigMapExists := func(g Gomega) { - cmd := exec.Command("kubectl", "get", "configmap", valkeyClusterName) + cmd := exec.Command("kubectl", "get", "configmap", valkeyClusterName+"-config") _, err := utils.Run(cmd) g.Expect(err).NotTo(HaveOccurred()) } @@ -240,14 +240,15 @@ var _ = Describe("ValkeyCluster", Ordered, func() { } Eventually(verifyDescribeEvents).Should(Succeed()) - By("validating cluster access") - verifyClusterAccess := func(g Gomega) { - // Start a Valkey client pod to access the cluster and get its status. + By("validating client commands") + verifyClusterAccess := func(g Gomega, expected string, command ...string) { + // Start a Valkey client pod to access the cluster and execute commands clusterFqdn := fmt.Sprintf("%s.default.svc.cluster.local", valkeyClusterName) - cmd := exec.Command("kubectl", "run", "client", + // Append the client command to the overall kubectl run command + cmd := exec.Command("kubectl", append([]string{"run", "client", fmt.Sprintf("--image=%s", valkeyClientImage), "--restart=Never", "--", - "valkey-cli", "-c", "-h", clusterFqdn, "CLUSTER", "INFO") + "valkey-cli", "-c", "-h", clusterFqdn}, command...)...) _, err := utils.Run(cmd) g.Expect(err).NotTo(HaveOccurred()) @@ -266,10 +267,15 @@ var _ = Describe("ValkeyCluster", Ordered, func() { g.Expect(err).NotTo(HaveOccurred()) // The cluster should be ok. - g.Expect(output).To(ContainSubstring("cluster_state:ok")) + g.Expect(output).To(ContainSubstring(expected)) } - Eventually(verifyClusterAccess).Should(Succeed()) + Eventually(verifyClusterAccess). + WithArguments("cluster_state:ok", "CLUSTER", "INFO"). + Should(Succeed(), "Failed CLUSTER INFO") + Eventually(verifyClusterAccess). + WithArguments("52428800", "CONFIG", "GET", "maxmemory"). + Should(Succeed(), "Failed CONFIG GET maxmemory") }) It("creates a cluster with custom users", func() { diff --git a/test/e2e/valkeynode_test.go b/test/e2e/valkeynode_test.go index 2e8856b..67cc5dd 100644 --- a/test/e2e/valkeynode_test.go +++ b/test/e2e/valkeynode_test.go @@ -80,13 +80,13 @@ spec: By("waiting for the ConfigMap to be created") Eventually(func(g Gomega) { - cmd := exec.Command("kubectl", "get", "configmap", "valkey-"+nodeName) + cmd := exec.Command("kubectl", "get", "configmap", nodeName+"-config") _, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred(), "ConfigMap valkey-%s should exist", nodeName) + g.Expect(err).NotTo(HaveOccurred(), "ConfigMap %s-config should exist", nodeName) }).Should(Succeed()) By("verifying the ConfigMap contains the required script keys") - cmd := exec.Command("kubectl", "get", "configmap", "valkey-"+nodeName, + cmd := exec.Command("kubectl", "get", "configmap", nodeName+"-config", "-o", "jsonpath={.data}") output, err := utils.Run(cmd) Expect(err).NotTo(HaveOccurred()) @@ -285,7 +285,7 @@ kind: ValkeyNode metadata: name: %s spec: - scriptsConfigMapName: %s + usersConfigMapName: %s `, nodeName, cmName) cmd = exec.Command("kubectl", "apply", "-f", "-") @@ -299,10 +299,10 @@ spec: By("verifying the controller did NOT create an owned ConfigMap") Consistently(func(g Gomega) { - cmd := exec.Command("kubectl", "get", "configmap", "valkey-"+nodeName) + cmd := exec.Command("kubectl", "get", "configmap", nodeName+"-config") _, err := utils.Run(cmd) g.Expect(err).To(HaveOccurred(), - "controller should not create a ConfigMap when scriptsConfigMapName is set") + "controller should not create a ConfigMap when UsersConfigMapName is set") }, 10*time.Second, 2*time.Second).Should(Succeed()) By("waiting for the ValkeyNode to become ready using the external ConfigMap")