From 29963167307a564351729cbdce0a3611a05550c3 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Thu, 4 Dec 2025 11:54:16 +0200 Subject: [PATCH 01/12] Add logic for ignoring external roles --- controllers/om/deployment.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/controllers/om/deployment.go b/controllers/om/deployment.go index a58e8b6d8..29a150b9a 100644 --- a/controllers/om/deployment.go +++ b/controllers/om/deployment.go @@ -616,6 +616,28 @@ func (d Deployment) GetNumberOfExcessProcesses(resourceName string) int { return excessProcesses } +func (d Deployment) EnsureRoles(roles []mdbv1.MongoDBRole, deletedRoles []string) { + roleMap := make(map[string]struct{}) + for _, r := range roles { + roleMap[r.Role+"@"+r.Db] = struct{}{} + } + + for _, r := range deletedRoles { + roleMap[r] = struct{}{} + } + + var mergedRoles []mdbv1.MongoDBRole + for _, r := range d.GetRoles() { + key := r.Role + "@" + r.Db + if _, ok := roleMap[key]; !ok { + mergedRoles = append(mergedRoles, r) + } + } + + mergedRoles = append(mergedRoles, roles...) + d.SetRoles(mergedRoles) +} + func (d Deployment) SetRoles(roles []mdbv1.MongoDBRole) { d["roles"] = roles } From 9cf55e794c61c21c9ac123256b9ebe78ee38c540 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Thu, 4 Dec 2025 11:54:53 +0200 Subject: [PATCH 02/12] Shared code --- api/v1/mdb/mongodb_types.go | 14 ++++++ controllers/operator/common_controller.go | 58 +++++++++++++++++------ pkg/util/constants.go | 1 + 3 files changed, 59 insertions(+), 14 deletions(-) diff --git a/api/v1/mdb/mongodb_types.go b/api/v1/mdb/mongodb_types.go index 42bab17a2..70584a745 100644 --- a/api/v1/mdb/mongodb_types.go +++ b/api/v1/mdb/mongodb_types.go @@ -1194,6 +1194,20 @@ func (m *MongoDB) GetLastSpec() (*MongoDbSpec, error) { return &lastSpec, nil } +func (m *MongoDB) GetLastConfiguredRoles() ([]string, error) { + lastRolesStr := annotations.GetAnnotation(m, util.LastConfiguredRoles) + if lastRolesStr == "" { + return nil, nil + } + + lastRoles := []string{} + if err := json.Unmarshal([]byte(lastRolesStr), &lastRoles); err != nil { + return nil, err + } + + return lastRoles, nil +} + func (m *MongoDB) ServiceName() string { if m.Spec.StatefulSetConfiguration != nil { svc := m.Spec.StatefulSetConfiguration.SpecWrapper.Spec.ServiceName diff --git a/controllers/operator/common_controller.go b/controllers/operator/common_controller.go index 356b7a592..1c35d4bde 100644 --- a/controllers/operator/common_controller.go +++ b/controllers/operator/common_controller.go @@ -6,7 +6,6 @@ import ( "encoding/pem" "fmt" "path/filepath" - "reflect" "github.com/blang/semver" "github.com/hashicorp/go-multierror" @@ -100,39 +99,70 @@ func NewReconcileCommonController(ctx context.Context, client client.Client) *Re } } -// ensureRoles will first check if both roles and roleRefs are populated. If both are, it will return an error, which is inline with the webhook validation rules. -// Otherwise, if roles is populated, then it will extract the list of roles and check if they are already set in Ops Manager. If they are not, it will update the roles in Ops Manager. -// If roleRefs is populated, it will extract the list of roles from the referenced resources and check if they are already set in Ops Manager. If they are not, it will update the roles in Ops Manager. -func (r *ReconcileCommonController) ensureRoles(ctx context.Context, db mdbv1.DbCommonSpec, enableClusterMongoDBRoles bool, conn om.Connection, mongodbResourceNsName types.NamespacedName, log *zap.SugaredLogger) workflow.Status { +func (r *ReconcileCommonController) getRoleAnnotation(ctx context.Context, db mdbv1.DbCommonSpec, enableClusterMongoDBRoles bool, mongodbResourceNsName types.NamespacedName) (map[string]string, []string, error) { + previousRoles, err := r.getRoleStrings(ctx, db, enableClusterMongoDBRoles, mongodbResourceNsName) + if err != nil { + return nil, nil, xerrors.Errorf("Error retrieving configured roles: %w", err) + } + + annotationToAdd := make(map[string]string) + if len(previousRoles) > 0 { + rolesBytes, err := json.Marshal(previousRoles) + if err != nil { + return nil, nil, err + } + annotationToAdd[util.LastConfiguredRoles] = string(rolesBytes) + } + return annotationToAdd, previousRoles, nil +} + +func (r *ReconcileCommonController) getRoleStrings(ctx context.Context, db mdbv1.DbCommonSpec, enableClusterMongoDBRoles bool, mongodbResourceNsName types.NamespacedName) ([]string, error) { + roles, err := r.getRoles(ctx, db, enableClusterMongoDBRoles, mongodbResourceNsName) + if err != nil { + return nil, err + } + + roleStrings := make([]string, len(roles)) + for i, r := range roles { + roleStrings[i] = fmt.Sprintf("%s@%s", r.Role, r.Db) + } + + return roleStrings, nil +} + +func (r *ReconcileCommonController) getRoles(ctx context.Context, db mdbv1.DbCommonSpec, enableClusterMongoDBRoles bool, mongodbResourceNsName types.NamespacedName) ([]mdbv1.MongoDBRole, error) { localRoles := db.GetSecurity().Roles roleRefs := db.GetSecurity().RoleRefs if len(localRoles) > 0 && len(roleRefs) > 0 { - return workflow.Failed(xerrors.Errorf("At most one one of roles or roleRefs can be non-empty.")) + return nil, xerrors.Errorf("At most one of roles or roleRefs can be non-empty.") } var roles []mdbv1.MongoDBRole if len(roleRefs) > 0 { if !enableClusterMongoDBRoles { - return workflow.Failed(xerrors.Errorf("RoleRefs are not supported when ClusterMongoDBRoles are disabled. Please enable ClusterMongoDBRoles in the operator configuration. This can be done by setting the operator.enableClusterMongoDBRoles to true in the helm values file, which will automatically installed the necessary RBAC. Alternatively, it can be enabled by adding -watch-resource=clustermongodbroles flag to the operator deployment, and manually creating the necessary RBAC.")) + return nil, xerrors.Errorf("RoleRefs are not supported when ClusterMongoDBRoles are disabled. Please enable ClusterMongoDBRoles in the operator configuration. This can be done by setting the operator.enableClusterMongoDBRoles to true in the helm values file, which will automatically installed the necessary RBAC. Alternatively, it can be enabled by adding -watch-resource=clustermongodbroles flag to the operator deployment, and manually creating the necessary RBAC.") } var err error roles, err = r.getRoleRefs(ctx, roleRefs, mongodbResourceNsName, db.Version) if err != nil { - return workflow.Failed(err) + return nil, err } } else { roles = localRoles } - d, err := conn.ReadDeployment() + return roles, nil +} + +// ensureRoles will first check if both roles and roleRefs are populated. If both are, it will return an error, which is inline with the webhook validation rules. +// Otherwise, if roles is populated, then it will extract the list of roles and check if they are already set in Ops Manager. If they are not, it will update the roles in Ops Manager. +// If roleRefs is populated, it will extract the list of roles from the referenced resources and check if they are already set in Ops Manager. If they are not, it will update the roles in Ops Manager. +func (r *ReconcileCommonController) ensureRoles(ctx context.Context, db mdbv1.DbCommonSpec, enableClusterMongoDBRoles bool, conn om.Connection, mongodbResourceNsName types.NamespacedName, previousRoles []string, log *zap.SugaredLogger) workflow.Status { + roles, err := r.getRoles(ctx, db, enableClusterMongoDBRoles, mongodbResourceNsName) if err != nil { return workflow.Failed(err) } - dRoles := d.GetRoles() - if reflect.DeepEqual(dRoles, roles) { - return workflow.OK() - } // clone roles list to avoid mutating the spec in normalizePrivilegeResource newRoles := make([]mdbv1.MongoDBRole, len(roles)) @@ -157,7 +187,7 @@ func (r *ReconcileCommonController) ensureRoles(ctx context.Context, db mdbv1.Db log.Infof("Roles have been changed. Updating deployment in Ops Manager.") err = conn.ReadUpdateDeployment( func(d om.Deployment) error { - d.SetRoles(roles) + d.EnsureRoles(roles, previousRoles) return nil }, log, diff --git a/pkg/util/constants.go b/pkg/util/constants.go index 0efc7d639..5444dff50 100644 --- a/pkg/util/constants.go +++ b/pkg/util/constants.go @@ -285,6 +285,7 @@ const ( // Annotation keys used by the operator LastAchievedSpec = "mongodb.com/v1.lastSuccessfulConfiguration" LastAchievedRsMemberIds = "mongodb.com/v1.lastAchievedRsMemberIds" + LastConfiguredRoles = "mongodb.com/v1.lastConfiguredRoles" // SecretVolumeName is the name of the volume resource. SecretVolumeName = "secret-certs" From 8f31098e5c228ebc74c223d07a2913f970c79fe5 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Thu, 4 Dec 2025 11:55:14 +0200 Subject: [PATCH 03/12] Replicaset support --- .../operator/mongodbreplicaset_controller.go | 23 +++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/controllers/operator/mongodbreplicaset_controller.go b/controllers/operator/mongodbreplicaset_controller.go index 7fb290d7d..baba9e04a 100644 --- a/controllers/operator/mongodbreplicaset_controller.go +++ b/controllers/operator/mongodbreplicaset_controller.go @@ -84,6 +84,7 @@ type ReconcileMongoDbReplicaSet struct { type replicaSetDeploymentState struct { LastAchievedSpec *mdbv1.MongoDbSpec LastReconcileMemberCount int + LastConfiguredRoles []string } var _ reconcile.Reconciler = &ReconcileMongoDbReplicaSet{} @@ -127,9 +128,15 @@ func (r *ReplicaSetReconcilerHelper) readState() (*replicaSetDeploymentState, er // reconciliation and prepares for eventually storing this in ConfigMap state instead of ephemeral status. lastReconcileMemberCount := r.resource.Status.Members + lastConfiguredRoles, err := r.resource.GetLastConfiguredRoles() + if err != nil { + return nil, err + } + return &replicaSetDeploymentState{ LastAchievedSpec: lastAchievedSpec, LastReconcileMemberCount: lastReconcileMemberCount, + LastConfiguredRoles: lastConfiguredRoles, }, nil } @@ -278,7 +285,7 @@ func (r *ReplicaSetReconcilerHelper) Reconcile(ctx context.Context) (reconcile.R if recovery.ShouldTriggerRecovery(rs.Status.Phase != mdbstatus.PhaseRunning, rs.Status.LastTransition) { log.Warnf("Triggering Automatic Recovery. The MongoDB resource %s/%s is in %s state since %s", rs.Namespace, rs.Name, rs.Status.Phase, rs.Status.LastTransition) automationConfigStatus := r.updateOmDeploymentRs(ctx, conn, r.deploymentState.LastReconcileMemberCount, tlsCertPath, internalClusterCertPath, deploymentOpts, shouldMirrorKeyfileForMongot, true).OnErrorPrepend("failed to create/update (Ops Manager reconciliation phase):") - reconcileStatus := r.reconcileMemberResources(ctx, conn, projectConfig, deploymentOpts) + reconcileStatus := r.reconcileMemberResources(ctx, conn, projectConfig, deploymentOpts, r.deploymentState.LastConfiguredRoles) if !reconcileStatus.IsOK() { log.Errorf("Recovery failed because of reconcile errors, %v", reconcileStatus) } @@ -294,7 +301,7 @@ func (r *ReplicaSetReconcilerHelper) Reconcile(ctx context.Context) (reconcile.R return r.updateOmDeploymentRs(ctx, conn, r.deploymentState.LastReconcileMemberCount, tlsCertPath, internalClusterCertPath, deploymentOpts, shouldMirrorKeyfileForMongot, false).OnErrorPrepend("failed to create/update (Ops Manager reconciliation phase):") }, func() workflow.Status { - return r.reconcileMemberResources(ctx, conn, projectConfig, deploymentOpts) + return r.reconcileMemberResources(ctx, conn, projectConfig, deploymentOpts, r.deploymentState.LastConfiguredRoles) }) if !status.IsOK() { @@ -319,6 +326,14 @@ func (r *ReplicaSetReconcilerHelper) Reconcile(ctx context.Context) (reconcile.R annotationsToAdd[k] = val } + roleAnnotation, _, err := r.reconciler.getRoleAnnotation(ctx, r.resource.Spec.DbCommonSpec, r.reconciler.enableClusterMongoDBRoles, kube.ObjectKeyFromApiObject(r.resource)) + if err != nil { + return r.updateStatus(ctx, workflow.Failed(err)) + } + for k, val := range roleAnnotation { + annotationsToAdd[k] = val + } + if err := annotations.SetAnnotations(ctx, r.resource, annotationsToAdd, r.reconciler.client); err != nil { return r.updateStatus(ctx, workflow.Failed(xerrors.Errorf("could not update resource annotations: %w", err))) } @@ -478,7 +493,7 @@ func (r *ReplicaSetReconcilerHelper) reconcileHostnameOverrideConfigMap(ctx cont // reconcileMemberResources handles the synchronization of kubernetes resources, which can be statefulsets, services etc. // All the resources required in the k8s cluster (as opposed to the automation config) for creating the replicaset // should be reconciled in this method. -func (r *ReplicaSetReconcilerHelper) reconcileMemberResources(ctx context.Context, conn om.Connection, projectConfig mdbv1.ProjectConfig, deploymentOptions deploymentOptionsRS) workflow.Status { +func (r *ReplicaSetReconcilerHelper) reconcileMemberResources(ctx context.Context, conn om.Connection, projectConfig mdbv1.ProjectConfig, deploymentOptions deploymentOptionsRS, lastConfiguredRoles []string) workflow.Status { rs := r.resource reconciler := r.reconciler log := r.log @@ -489,7 +504,7 @@ func (r *ReplicaSetReconcilerHelper) reconcileMemberResources(ctx context.Contex } // Ensure roles are properly configured - if status := reconciler.ensureRoles(ctx, rs.Spec.DbCommonSpec, reconciler.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(rs), log); !status.IsOK() { + if status := reconciler.ensureRoles(ctx, rs.Spec.DbCommonSpec, reconciler.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(rs), lastConfiguredRoles, log); !status.IsOK() { return status } From ba26d11d6d38eeba6717562fe4e823e3441ff29b Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Thu, 4 Dec 2025 12:19:16 +0200 Subject: [PATCH 04/12] Multicluster replicaset support --- .../mongodbmultireplicaset_controller.go | 28 ++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/controllers/operator/mongodbmultireplicaset_controller.go b/controllers/operator/mongodbmultireplicaset_controller.go index e0820d819..6aeff2cbd 100644 --- a/controllers/operator/mongodbmultireplicaset_controller.go +++ b/controllers/operator/mongodbmultireplicaset_controller.go @@ -386,8 +386,14 @@ func (r *ReconcileMongoDbMultiReplicaSet) reconcileMemberResources(ctx context.C return workflow.Failed(err) } } + + previousRoles, err := getPreviousRolesFromAnnotation(*mrs) + if err != nil { + return workflow.Failed(err) + } + // Ensure custom roles are created in OM - if status := r.ensureRoles(ctx, mrs.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(mrs), log); !status.IsOK() { + if status := r.ensureRoles(ctx, mrs.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(mrs), previousRoles, log); !status.IsOK() { return status } @@ -694,6 +700,15 @@ func (r *ReconcileMongoDbMultiReplicaSet) saveLastAchievedSpec(ctx context.Conte } } + // Set annotation and state for previously configured roles + roleAnnotation, _, err := r.getRoleAnnotation(ctx, mrs.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, kube.ObjectKeyFromApiObject(&mrs)) + if err != nil { + return err + } + for k, val := range roleAnnotation { + annotationsToAdd[k] = val + } + return annotations.SetAnnotations(ctx, &mrs, annotationsToAdd, r.client) } @@ -828,6 +843,17 @@ func getReplicaSetProcessIdsFromAnnotation(mrs mdbmultiv1.MongoDBMultiCluster) ( return make(map[string]int), nil } +func getPreviousRolesFromAnnotation(mrs mdbmultiv1.MongoDBMultiCluster) ([]string, error) { + if rolesString, ok := mrs.Annotations[util.LastConfiguredRoles]; ok { + var roles []string + if err := json.Unmarshal([]byte(rolesString), &roles); err != nil { + return nil, err + } + return roles, nil + } + return nil, nil +} + func getSRVService(mrs *mdbmultiv1.MongoDBMultiCluster) corev1.Service { additionalConfig := mrs.Spec.GetAdditionalMongodConfig() port := additionalConfig.GetPortOrDefault() From 34c9301b60f9d7ab489010a5aa6f014b5636dfce Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Thu, 4 Dec 2025 12:19:26 +0200 Subject: [PATCH 05/12] Standalone support --- controllers/operator/mongodbstandalone_controller.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/controllers/operator/mongodbstandalone_controller.go b/controllers/operator/mongodbstandalone_controller.go index 56f84fc97..619b7ba07 100644 --- a/controllers/operator/mongodbstandalone_controller.go +++ b/controllers/operator/mongodbstandalone_controller.go @@ -220,7 +220,7 @@ func (r *ReconcileMongoDbStandalone) Reconcile(ctx context.Context, request reco return r.updateStatus(ctx, s, status, log) } - if status := r.ensureRoles(ctx, s.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(s), log); !status.IsOK() { + if status := r.ensureRoles(ctx, s.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(s), nil, log); !status.IsOK() { return r.updateStatus(ctx, s, status, log) } @@ -317,6 +317,15 @@ func (r *ReconcileMongoDbStandalone) Reconcile(ctx context.Context, request reco annotationsToAdd[k] = val } } + + roleAnnotation, _, err := r.getRoleAnnotation(ctx, s.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, kube.ObjectKeyFromApiObject(s)) + if err != nil { + return r.updateStatus(ctx, s, workflow.Failed(err), log) + } + for k, val := range roleAnnotation { + annotationsToAdd[k] = val + } + if err := annotations.SetAnnotations(ctx, s, annotationsToAdd, r.client); err != nil { return r.updateStatus(ctx, s, workflow.Failed(err), log) } From 67759c5b4ea92e6ac3c8663b7a084e76f4e1a2cb Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Thu, 4 Dec 2025 12:19:31 +0200 Subject: [PATCH 06/12] Sharded support --- .../operator/mongodbshardedcluster_controller.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/controllers/operator/mongodbshardedcluster_controller.go b/controllers/operator/mongodbshardedcluster_controller.go index c7298bc37..2c5cb301f 100644 --- a/controllers/operator/mongodbshardedcluster_controller.go +++ b/controllers/operator/mongodbshardedcluster_controller.go @@ -109,6 +109,7 @@ func newShardedClusterReconciler(ctx context.Context, kubeClient client.Client, type ShardedClusterDeploymentState struct { CommonDeploymentState `json:",inline"` LastAchievedSpec *mdbv1.MongoDbSpec `json:"lastAchievedSpec"` + LastConfiguredRoles []string `json:"lastConfiguredRoles"` Status *mdbv1.MongoDbStatus `json:"status"` } @@ -131,6 +132,7 @@ func NewShardedClusterDeploymentState() *ShardedClusterDeploymentState { return &ShardedClusterDeploymentState{ CommonDeploymentState: CommonDeploymentState{ClusterMapping: map[string]int{}}, LastAchievedSpec: &mdbv1.MongoDbSpec{}, + LastConfiguredRoles: []string{}, Status: &mdbv1.MongoDbStatus{}, } } @@ -954,6 +956,17 @@ func (r *ShardedClusterReconcileHelper) Reconcile(ctx context.Context, log *zap. annotationsToAdd[k] = val } } + + // Set annotation and state for previously configured roles + roleAnnotation, roleStrings, err := r.commonController.getRoleAnnotation(ctx, r.sc.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, kube.ObjectKeyFromApiObject(r.sc)) + if err != nil { + return r.updateStatus(ctx, sc, workflow.Failed(err), log) + } + for k, val := range roleAnnotation { + annotationsToAdd[k] = val + } + r.deploymentState.LastConfiguredRoles = roleStrings + // Set annotations that should be saved at the end of the reconciliation, e.g lastAchievedSpec if err := annotations.SetAnnotations(ctx, sc, annotationsToAdd, r.commonController.client); err != nil { return r.updateStatus(ctx, sc, workflow.Failed(err), log) @@ -1059,7 +1072,7 @@ func (r *ShardedClusterReconcileHelper) doShardedClusterProcessing(ctx context.C } } - if workflowStatus := r.commonController.ensureRoles(ctx, sc.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(sc), log); !workflowStatus.IsOK() { + if workflowStatus := r.commonController.ensureRoles(ctx, sc.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(sc), r.deploymentState.LastConfiguredRoles, log); !workflowStatus.IsOK() { return workflowStatus } From 7ee01a730a9fffb2cab0dac3dbfa9bead4fc309c Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Thu, 4 Dec 2025 16:31:54 +0200 Subject: [PATCH 07/12] Move merging logic to common controller --- controllers/om/deployment.go | 22 --------- controllers/operator/common_controller.go | 60 ++++++++++++++++++----- 2 files changed, 49 insertions(+), 33 deletions(-) diff --git a/controllers/om/deployment.go b/controllers/om/deployment.go index 29a150b9a..a58e8b6d8 100644 --- a/controllers/om/deployment.go +++ b/controllers/om/deployment.go @@ -616,28 +616,6 @@ func (d Deployment) GetNumberOfExcessProcesses(resourceName string) int { return excessProcesses } -func (d Deployment) EnsureRoles(roles []mdbv1.MongoDBRole, deletedRoles []string) { - roleMap := make(map[string]struct{}) - for _, r := range roles { - roleMap[r.Role+"@"+r.Db] = struct{}{} - } - - for _, r := range deletedRoles { - roleMap[r] = struct{}{} - } - - var mergedRoles []mdbv1.MongoDBRole - for _, r := range d.GetRoles() { - key := r.Role + "@" + r.Db - if _, ok := roleMap[key]; !ok { - mergedRoles = append(mergedRoles, r) - } - } - - mergedRoles = append(mergedRoles, roles...) - d.SetRoles(mergedRoles) -} - func (d Deployment) SetRoles(roles []mdbv1.MongoDBRole) { d["roles"] = roles } diff --git a/controllers/operator/common_controller.go b/controllers/operator/common_controller.go index 1c35d4bde..4e6651214 100644 --- a/controllers/operator/common_controller.go +++ b/controllers/operator/common_controller.go @@ -6,6 +6,7 @@ import ( "encoding/pem" "fmt" "path/filepath" + "reflect" "github.com/blang/semver" "github.com/hashicorp/go-multierror" @@ -106,20 +107,19 @@ func (r *ReconcileCommonController) getRoleAnnotation(ctx context.Context, db md } annotationToAdd := make(map[string]string) - if len(previousRoles) > 0 { - rolesBytes, err := json.Marshal(previousRoles) - if err != nil { - return nil, nil, err - } - annotationToAdd[util.LastConfiguredRoles] = string(rolesBytes) + rolesBytes, err := json.Marshal(previousRoles) + if err != nil { + return nil, nil, err } + annotationToAdd[util.LastConfiguredRoles] = string(rolesBytes) + return annotationToAdd, previousRoles, nil } func (r *ReconcileCommonController) getRoleStrings(ctx context.Context, db mdbv1.DbCommonSpec, enableClusterMongoDBRoles bool, mongodbResourceNsName types.NamespacedName) ([]string, error) { roles, err := r.getRoles(ctx, db, enableClusterMongoDBRoles, mongodbResourceNsName) if err != nil { - return nil, err + return []string{}, err } roleStrings := make([]string, len(roles)) @@ -164,10 +164,21 @@ func (r *ReconcileCommonController) ensureRoles(ctx context.Context, db mdbv1.Db return workflow.Failed(err) } + d, err := conn.ReadDeployment() + if err != nil { + return workflow.Failed(err) + } + dRoles := d.GetRoles() + mergedRoles := mergeRoles(dRoles, roles, previousRoles) + + if reflect.DeepEqual(dRoles, mergedRoles) { + return workflow.OK() + } + // clone roles list to avoid mutating the spec in normalizePrivilegeResource - newRoles := make([]mdbv1.MongoDBRole, len(roles)) - for i := range roles { - newRoles[i] = *roles[i].DeepCopy() + newRoles := make([]mdbv1.MongoDBRole, len(mergedRoles)) + for i := range mergedRoles { + newRoles[i] = *mergedRoles[i].DeepCopy() } roles = newRoles @@ -187,7 +198,7 @@ func (r *ReconcileCommonController) ensureRoles(ctx context.Context, db mdbv1.Db log.Infof("Roles have been changed. Updating deployment in Ops Manager.") err = conn.ReadUpdateDeployment( func(d om.Deployment) error { - d.EnsureRoles(roles, previousRoles) + d.SetRoles(roles) return nil }, log, @@ -219,6 +230,33 @@ func normalizePrivilegeResource(resource mdbv1.Resource) mdbv1.Resource { return resource } +// mergeRoles merges the deployed roles with the current roles and previously configured roles. +// It ensures that roles configured outside the operator are not removed. +// This is achieved by removing currently configured roles from the deployed roles. +// To ensure that roles removed from the spec are also removed from OM, we also remove the previously configured roles. +// Finally, we add back the currently configured roles. +func mergeRoles(deployed []mdbv1.MongoDBRole, current []mdbv1.MongoDBRole, previous []string) []mdbv1.MongoDBRole { + roleMap := make(map[string]struct{}) + for _, r := range current { + roleMap[r.Role+"@"+r.Db] = struct{}{} + } + + for _, r := range previous { + roleMap[r] = struct{}{} + } + + mergedRoles := make([]mdbv1.MongoDBRole, 0) + for _, r := range deployed { + key := r.Role + "@" + r.Db + if _, ok := roleMap[key]; !ok { + mergedRoles = append(mergedRoles, r) + } + } + + mergedRoles = append(mergedRoles, current...) + return mergedRoles +} + // getRoleRefs retrieves the roles from the referenced resources. It will return an error if any of the referenced resources are not found. // It will also add the referenced resources to the resource watcher, so that they are watched for changes. // The referenced resources are expected to be of kind ClusterMongoDBRole. From 65049a2b820f279a76ca7d6659d320a74169128c Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Thu, 4 Dec 2025 16:32:25 +0200 Subject: [PATCH 08/12] Unit tests --- api/v1/mdbmulti/mongodbmultibuilder.go | 8 +++ controllers/om/mockedomclient.go | 10 +++ .../operator/common_controller_test.go | 62 +++++++++++++++++-- .../mongodbmultireplicaset_controller_test.go | 37 +++++++++++ .../mongodbreplicaset_controller_test.go | 38 ++++++++++++ ...odbshardedcluster_controller_multi_test.go | 6 +- .../mongodbshardedcluster_controller_test.go | 39 ++++++++++++ .../operator/mongodbstandalone_controller.go | 7 ++- .../mongodbstandalone_controller_test.go | 47 ++++++++++++++ pkg/test/sharded_cluster_builder.go | 8 +++ 10 files changed, 253 insertions(+), 9 deletions(-) diff --git a/api/v1/mdbmulti/mongodbmultibuilder.go b/api/v1/mdbmulti/mongodbmultibuilder.go index 619598712..1c7d7491c 100644 --- a/api/v1/mdbmulti/mongodbmultibuilder.go +++ b/api/v1/mdbmulti/mongodbmultibuilder.go @@ -73,6 +73,14 @@ func (m *MultiReplicaSetBuilder) SetSecurity(s *mdbv1.Security) *MultiReplicaSet return m } +func (m *MultiReplicaSetBuilder) SetRoles(roles []mdbv1.MongoDBRole) *MultiReplicaSetBuilder { + if m.Spec.Security == nil { + m.Spec.Security = &mdbv1.Security{} + } + m.Spec.Security.Roles = roles + return m +} + func (m *MultiReplicaSetBuilder) SetRoleRefs(roleRefs []mdbv1.MongoDBRoleRef) *MultiReplicaSetBuilder { if m.Spec.Security == nil { m.Spec.Security = &mdbv1.Security{} diff --git a/controllers/om/mockedomclient.go b/controllers/om/mockedomclient.go index 51c3f844d..2257ed0fc 100644 --- a/controllers/om/mockedomclient.go +++ b/controllers/om/mockedomclient.go @@ -933,6 +933,16 @@ func (oc *MockedOmConnection) AddPreferredHostname(agentApiKey string, value str return nil } +func (oc *MockedOmConnection) AddRole(role mdbv1.MongoDBRole) { + roles := oc.deployment.GetRoles() + roles = append(roles, role) + oc.deployment.SetRoles(roles) +} + +func (oc *MockedOmConnection) GetRoles() []mdbv1.MongoDBRole { + return oc.deployment.GetRoles() +} + // updateAutoAuthMechanism simulates the changes made by Ops Manager and the agents in deciding which // mechanism will be specified as the "autoAuthMechanisms" func updateAutoAuthMechanism(ac *AutomationConfig) { diff --git a/controllers/operator/common_controller_test.go b/controllers/operator/common_controller_test.go index 608a80bb9..976e8f5b0 100644 --- a/controllers/operator/common_controller_test.go +++ b/controllers/operator/common_controller_test.go @@ -290,7 +290,7 @@ func TestFailWhenRoleAndRoleRefsAreConfigured(t *testing.T) { controller := NewReconcileCommonController(ctx, kubeClient) mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) - result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), nil, zap.S()) assert.False(t, result.IsOK()) assert.Equal(t, status.PhaseFailed, result.Phase()) @@ -318,7 +318,7 @@ func TestRoleRefsAreAdded(t *testing.T) { _ = kubeClient.Create(ctx, roleResource) - controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), nil, zap.S()) ac, err := mockOm.ReadAutomationConfig() assert.NoError(t, err) @@ -345,7 +345,7 @@ func TestErrorWhenRoleRefIsWrong(t *testing.T) { _ = kubeClient.Create(ctx, roleResource) - result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), nil, zap.S()) assert.False(t, result.IsOK()) assert.Equal(t, status.PhaseFailed, result.Phase()) @@ -371,7 +371,7 @@ func TestErrorWhenRoleDoesNotExist(t *testing.T) { controller := NewReconcileCommonController(ctx, kubeClient) mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) - result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), nil, zap.S()) assert.False(t, result.IsOK()) assert.Equal(t, status.PhaseFailed, result.Phase()) @@ -398,7 +398,7 @@ func TestDontSendNilPrivileges(t *testing.T) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient() controller := NewReconcileCommonController(ctx, kubeClient) mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) - controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), nil, zap.S()) ac, err := mockOm.ReadAutomationConfig() assert.NoError(t, err) roles, ok := ac.Deployment["roles"].([]mdbv1.MongoDBRole) @@ -498,7 +498,7 @@ func TestCheckEmptyStringsInPrivilegesEquivalentToNotPassingFields(t *testing.T) controller := NewReconcileCommonController(ctx, kubeClient) mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) - controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), nil, zap.S()) ac, err := mockOm.ReadAutomationConfig() assert.NoError(t, err) @@ -528,6 +528,56 @@ func TestCheckEmptyStringsInPrivilegesEquivalentToNotPassingFields(t *testing.T) } } +func TestExternalRoleIsNotRemoved(t *testing.T) { + ctx := context.Background() + + role := mdbv1.MongoDBRole{ + Role: "embedded-role", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "read", + }}, + } + + rs := DefaultReplicaSetBuilder().SetRoles([]mdbv1.MongoDBRole{role}).Build() + kubeClient, omConnectionFactory := mock.NewDefaultFakeClient() + controller := NewReconcileCommonController(ctx, kubeClient) + mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) + + // Create deployment with one embedded role + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), nil, zap.S()) + + roles := mockOm.GetRoles() + require.Len(t, roles, 1) + + // Add external role directly to OM (via UI/API) + externalRole := mdbv1.MongoDBRole{ + Role: "external-role", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "read", + }}, + } + mockOm.AddRole(externalRole) + + // Reconcile again - role created from the UI should still be there + roleStrings, _ := controller.getRoleStrings(ctx, rs.Spec.DbCommonSpec, true, kube.ObjectKeyFromApiObject(rs)) + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), roleStrings, zap.S()) + + roles = mockOm.GetRoles() + require.Len(t, roles, 2) + + // Delete embedded role, only the external should remain + rs = DefaultReplicaSetBuilder().Build() + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), roleStrings, zap.S()) + + roles = mockOm.GetRoles() + require.Len(t, roles, 1) + assert.Equal(t, roles[0].Role, "external-role") +} + func TestSecretWatcherWithAllResources(t *testing.T) { ctx := context.Background() caName := "custom-ca" diff --git a/controllers/operator/mongodbmultireplicaset_controller_test.go b/controllers/operator/mongodbmultireplicaset_controller_test.go index 5c27d8606..0f98b1522 100644 --- a/controllers/operator/mongodbmultireplicaset_controller_test.go +++ b/controllers/operator/mongodbmultireplicaset_controller_test.go @@ -1425,6 +1425,43 @@ func TestValidationsRunOnReconcile(t *testing.T) { assert.Equal(t, fmt.Sprintf("Multiple clusters with the same name (%s) are not allowed", duplicateName), mrs.Status.Message) } +func TestRoleAnnotationIsSet(t *testing.T) { + ctx := context.Background() + + role := mdb.MongoDBRole{ + Role: "embedded-role", + Db: "admin", + Roles: []mdb.InheritedRole{{ + Db: "admin", + Role: "read", + }}, + } + + mrs := mdbmulti.DefaultMultiReplicaSetBuilder().SetClusterSpecList(clusters).SetRoles([]mdb.MongoDBRole{role}).Build() + reconciler, client, _, omConnectionFactory := defaultMultiReplicaSetReconciler(ctx, nil, "", "", mrs) + checkMultiReconcileSuccessful(ctx, t, reconciler, mrs, client, false) + + roleString, _ := json.Marshal([]string{"embedded-role@admin"}) + + // Assert that the member ids are saved in the annotation + assert.Equal(t, mrs.GetAnnotations()[util.LastConfiguredRoles], string(roleString)) + + roles := omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 1) + + mrs.GetSecurity().Roles = []mdb.MongoDBRole{} + err := client.Update(ctx, mrs) + assert.NoError(t, err) + + checkMultiReconcileSuccessful(ctx, t, reconciler, mrs, client, false) + + // Assert that the roles annotation is updated and role is removed + assert.Equal(t, mrs.GetAnnotations()[util.LastConfiguredRoles], "[]") + + roles = omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 0) +} + func assertClusterpresent(t *testing.T, m map[string]int, specs mdb.ClusterSpecList, arr []int) { tmp := make([]int, 0) for _, s := range specs { diff --git a/controllers/operator/mongodbreplicaset_controller_test.go b/controllers/operator/mongodbreplicaset_controller_test.go index a7794f07a..21c8e9c87 100644 --- a/controllers/operator/mongodbreplicaset_controller_test.go +++ b/controllers/operator/mongodbreplicaset_controller_test.go @@ -983,6 +983,44 @@ func TestVaultAnnotations_NotWrittenWhenDisabled(t *testing.T) { } } +func TestReplicasetRoleAnnotationIsSet(t *testing.T) { + ctx := context.Background() + + role := mdbv1.MongoDBRole{ + Role: "embedded-role", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "read", + }}, + } + + rs := DefaultReplicaSetBuilder().SetRoles([]mdbv1.MongoDBRole{role}).Build() + reconciler, client, omConnectionFactory := defaultReplicaSetReconciler(ctx, nil, "", "", rs) + + checkReconcileSuccessful(ctx, t, reconciler, rs, client) + + roleString, _ := json.Marshal([]string{"embedded-role@admin"}) + + // Assert that the member ids are saved in the annotation + assert.Equal(t, rs.GetAnnotations()[util.LastConfiguredRoles], string(roleString)) + + roles := omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 1) + + rs.GetSecurity().Roles = []mdbv1.MongoDBRole{} + err := client.Update(ctx, rs) + assert.NoError(t, err) + + checkReconcileSuccessful(ctx, t, reconciler, rs, client) + + // Assert that the roles annotation is updated and role is removed + assert.Equal(t, rs.GetAnnotations()[util.LastConfiguredRoles], "[]") + + roles = omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 0) +} + func testPVCFinishedResizing(t *testing.T, ctx context.Context, memberClient kubernetesClient.Client, p *corev1.PersistentVolumeClaim, reconciledResource *mdbv1.MongoDB, statefulSet *appsv1.StatefulSet, logger *zap.SugaredLogger) { // Simulate that the PVC has finished resizing setPVCWithUpdatedResource(ctx, t, memberClient, p) diff --git a/controllers/operator/mongodbshardedcluster_controller_multi_test.go b/controllers/operator/mongodbshardedcluster_controller_multi_test.go index 616d57bfa..3d8591764 100644 --- a/controllers/operator/mongodbshardedcluster_controller_multi_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_multi_test.go @@ -3863,12 +3863,14 @@ func getMultiClusterFQDN(stsName string, namespace string, clusterIdx int, podId func generateExpectedDeploymentState(t *testing.T, sc *mdbv1.MongoDB) string { lastSpec, _ := sc.GetLastSpec() + lastConfiguredRoles, _ := sc.GetLastConfiguredRoles() expectedState := ShardedClusterDeploymentState{ CommonDeploymentState: CommonDeploymentState{ ClusterMapping: map[string]int{}, }, - LastAchievedSpec: lastSpec, - Status: &sc.Status, + LastAchievedSpec: lastSpec, + LastConfiguredRoles: lastConfiguredRoles, + Status: &sc.Status, } lastSpecBytes, err := json.Marshal(expectedState) require.NoError(t, err) diff --git a/controllers/operator/mongodbshardedcluster_controller_test.go b/controllers/operator/mongodbshardedcluster_controller_test.go index ee5d87894..d994af783 100644 --- a/controllers/operator/mongodbshardedcluster_controller_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_test.go @@ -2,6 +2,7 @@ package operator import ( "context" + "encoding/json" "fmt" "reflect" "strings" @@ -1874,6 +1875,44 @@ func TestSingleClusterShardedScalingWithOverrides(t *testing.T) { } } +func TestSharderClusterRoleAnnotationIsSet(t *testing.T) { + ctx := context.Background() + + role := mdbv1.MongoDBRole{ + Role: "embedded-role", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "read", + }}, + } + + sc := test.DefaultClusterBuilder().SetRoles([]mdbv1.MongoDBRole{role}).Build() + reconciler, _, cl, omConnectionFactory, err := defaultShardedClusterReconciler(ctx, nil, "", "", sc, nil) + require.NoError(t, err) + checkReconcileSuccessful(ctx, t, reconciler, sc, cl) + + roleString, _ := json.Marshal([]string{"embedded-role@admin"}) + + // Assert that the member ids are saved in the annotation + assert.Equal(t, sc.GetAnnotations()[util.LastConfiguredRoles], string(roleString)) + + roles := omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 1) + + sc.GetSecurity().Roles = []mdbv1.MongoDBRole{} + err = cl.Update(ctx, sc) + assert.NoError(t, err) + + checkReconcileSuccessful(ctx, t, reconciler, sc, cl) + + // Assert that the roles annotation is updated and role is removed + assert.Equal(t, sc.GetAnnotations()[util.LastConfiguredRoles], "[]") + + roles = omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 0) +} + func generateHostsWithDistributionSingleCluster(stsName string, namespace string, memberCount int, clusterDomain string, externalClusterDomain string) ([]string, []string) { var hosts []string var podNames []string diff --git a/controllers/operator/mongodbstandalone_controller.go b/controllers/operator/mongodbstandalone_controller.go index 619b7ba07..56b4531ca 100644 --- a/controllers/operator/mongodbstandalone_controller.go +++ b/controllers/operator/mongodbstandalone_controller.go @@ -220,7 +220,12 @@ func (r *ReconcileMongoDbStandalone) Reconcile(ctx context.Context, request reco return r.updateStatus(ctx, s, status, log) } - if status := r.ensureRoles(ctx, s.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(s), nil, log); !status.IsOK() { + previousRoles, err := s.GetLastConfiguredRoles() + if err != nil { + return r.updateStatus(ctx, s, workflow.Failed(xerrors.Errorf("Failed to get last configured roles: %w", err)), log) + } + + if status := r.ensureRoles(ctx, s.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(s), previousRoles, log); !status.IsOK() { return r.updateStatus(ctx, s, status, log) } diff --git a/controllers/operator/mongodbstandalone_controller_test.go b/controllers/operator/mongodbstandalone_controller_test.go index cfa032870..134561f90 100644 --- a/controllers/operator/mongodbstandalone_controller_test.go +++ b/controllers/operator/mongodbstandalone_controller_test.go @@ -2,6 +2,7 @@ package operator import ( "context" + "encoding/json" "fmt" "reflect" "testing" @@ -323,6 +324,44 @@ func TestStandaloneAgentVersionMapping(t *testing.T) { agentVersionMappingTest(ctx, t, defaultResources, overriddenResources) } +func TestStandaloneRoleAnnotationIsSet(t *testing.T) { + ctx := context.Background() + + role := mdbv1.MongoDBRole{ + Role: "embedded-role", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "read", + }}, + } + + st := DefaultStandaloneBuilder().SetRoles([]mdbv1.MongoDBRole{role}).Build() + reconciler, client, omConnectionFactory := defaultStandaloneReconciler(ctx, nil, "", "", om.NewEmptyMockedOmConnection, st) + + checkReconcileSuccessful(ctx, t, reconciler, st, client) + + roleString, _ := json.Marshal([]string{"embedded-role@admin"}) + + // Assert that the member ids are saved in the annotation + assert.Equal(t, st.GetAnnotations()[util.LastConfiguredRoles], string(roleString)) + + roles := omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 1) + + st.GetSecurity().Roles = []mdbv1.MongoDBRole{} + err := client.Update(ctx, st) + assert.NoError(t, err) + + checkReconcileSuccessful(ctx, t, reconciler, st, client) + + // Assert that the roles annotation is updated and role is removed + assert.Equal(t, st.GetAnnotations()[util.LastConfiguredRoles], "[]") + + roles = omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 0) +} + // defaultStandaloneReconciler is the standalone reconciler used in unit test. It "adds" necessary // additional K8s objects (st, connection config map and secrets) necessary for reconciliation, // so it's possible to call 'reconcileAppDB()' on it right away @@ -385,6 +424,14 @@ func (b *StandaloneBuilder) SetService(s string) *StandaloneBuilder { return b } +func (b *StandaloneBuilder) SetRoles(roles []mdbv1.MongoDBRole) *StandaloneBuilder { + if b.Spec.Security == nil { + b.Spec.Security = &mdbv1.Security{} + } + b.Spec.Security.Roles = roles + return b +} + func (b *StandaloneBuilder) SetPodSpecTemplate(spec corev1.PodTemplateSpec) *StandaloneBuilder { if b.Spec.PodSpec == nil { b.Spec.PodSpec = &mdbv1.MongoDbPodSpec{} diff --git a/pkg/test/sharded_cluster_builder.go b/pkg/test/sharded_cluster_builder.go index 253126056..4e8afbb9f 100644 --- a/pkg/test/sharded_cluster_builder.go +++ b/pkg/test/sharded_cluster_builder.go @@ -126,6 +126,14 @@ func (b *ClusterBuilder) SetSecurity(security mdb.Security) *ClusterBuilder { return b } +func (b *ClusterBuilder) SetRoles(roles []mdb.MongoDBRole) *ClusterBuilder { + if b.Spec.Security == nil { + b.Spec.Security = &mdb.Security{} + } + b.Spec.Security.Roles = roles + return b +} + func (b *ClusterBuilder) EnableTLS() *ClusterBuilder { if b.Spec.Security == nil || b.Spec.Security.TLSConfig == nil { return b.SetSecurity(mdb.Security{TLSConfig: &mdb.TLSConfig{Enabled: true}}) From 2750631b1f7b89c847fd744e74ae4b75f2f4fee2 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Thu, 4 Dec 2025 18:49:10 +0200 Subject: [PATCH 09/12] Changelog --- ...ix_roles_configured_via_ops_manager_ui_or_api_will_no.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 changelog/20251204_fix_roles_configured_via_ops_manager_ui_or_api_will_no.md diff --git a/changelog/20251204_fix_roles_configured_via_ops_manager_ui_or_api_will_no.md b/changelog/20251204_fix_roles_configured_via_ops_manager_ui_or_api_will_no.md new file mode 100644 index 000000000..63e13df96 --- /dev/null +++ b/changelog/20251204_fix_roles_configured_via_ops_manager_ui_or_api_will_no.md @@ -0,0 +1,6 @@ +--- +kind: fix +date: 2025-12-04 +--- + +* Roles configured via Ops Manager UI or API will no longer be removed by the operator From 8f32c9aa0da0d9351801ae1bc2ef3683aa769892 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Wed, 10 Dec 2025 14:14:36 +0100 Subject: [PATCH 10/12] PR feedback --- .../operator/common_controller_test.go | 95 ++++++++++++++++++- .../mongodbshardedcluster_controller.go | 5 +- 2 files changed, 95 insertions(+), 5 deletions(-) diff --git a/controllers/operator/common_controller_test.go b/controllers/operator/common_controller_test.go index 976e8f5b0..4f7c9f8de 100644 --- a/controllers/operator/common_controller_test.go +++ b/controllers/operator/common_controller_test.go @@ -528,6 +528,95 @@ func TestCheckEmptyStringsInPrivilegesEquivalentToNotPassingFields(t *testing.T) } } +func TestMergeRoles(t *testing.T) { + externalRole := mdbv1.MongoDBRole{ + Role: "ext_role", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "read", + }}, + } + role1 := mdbv1.MongoDBRole{ + Role: "role1", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "readWrite", + }}, + } + + role2 := mdbv1.MongoDBRole{ + Role: "role2", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "readWrite", + }}, + } + + tests := []struct { + name string + deployedRoles []mdbv1.MongoDBRole + currentRoles []mdbv1.MongoDBRole + previousRoles []string + expectedRoles []mdbv1.MongoDBRole + }{ + // externalRole was added via UI + // role1 and role2 were defined in the CR + // role2 was removed from the CR + { + name: "Removing role from resource", + deployedRoles: []mdbv1.MongoDBRole{externalRole, role1, role2}, + currentRoles: []mdbv1.MongoDBRole{role1}, + previousRoles: []string{"role1@admin", "role2@admin"}, + expectedRoles: []mdbv1.MongoDBRole{externalRole, role1}, + }, + // externalRole was added via UI + // role1 was defined in the CR + // role2 was added in the CR + { + name: "Adding role in resource", + deployedRoles: []mdbv1.MongoDBRole{externalRole, role1}, + currentRoles: []mdbv1.MongoDBRole{role1, role2}, + previousRoles: []string{"role1@admin"}, + expectedRoles: []mdbv1.MongoDBRole{externalRole, role1, role2}, + }, + { + name: "Idempotency", + deployedRoles: []mdbv1.MongoDBRole{externalRole, role1, role2}, + currentRoles: []mdbv1.MongoDBRole{role1, role2}, + previousRoles: []string{"role1@admin", "role2@admin"}, + expectedRoles: []mdbv1.MongoDBRole{externalRole, role1, role2}, + }, + { + name: "Nil previous roles - adding all defined roles", + deployedRoles: []mdbv1.MongoDBRole{externalRole, role1, role2}, + currentRoles: []mdbv1.MongoDBRole{role1, role2}, + previousRoles: nil, + expectedRoles: []mdbv1.MongoDBRole{externalRole, role1, role2}, + }, + { + name: "Nil current roles - removing all defined roles", + deployedRoles: []mdbv1.MongoDBRole{externalRole, role1, role2}, + currentRoles: nil, + previousRoles: []string{"role1@admin", "role2@admin"}, + expectedRoles: []mdbv1.MongoDBRole{externalRole}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mergedRoles := mergeRoles(tc.deployedRoles, tc.currentRoles, tc.previousRoles) + + require.Len(t, mergedRoles, len(tc.expectedRoles)) + for _, r := range tc.expectedRoles { + assert.Contains(t, mergedRoles, r) + } + }) + } +} + func TestExternalRoleIsNotRemoved(t *testing.T) { ctx := context.Background() @@ -562,6 +651,10 @@ func TestExternalRoleIsNotRemoved(t *testing.T) { } mockOm.AddRole(externalRole) + // Ensure external role is added + roles = mockOm.GetRoles() + require.Len(t, roles, 2) + // Reconcile again - role created from the UI should still be there roleStrings, _ := controller.getRoleStrings(ctx, rs.Spec.DbCommonSpec, true, kube.ObjectKeyFromApiObject(rs)) controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), roleStrings, zap.S()) @@ -570,7 +663,7 @@ func TestExternalRoleIsNotRemoved(t *testing.T) { require.Len(t, roles, 2) // Delete embedded role, only the external should remain - rs = DefaultReplicaSetBuilder().Build() + rs.Spec.Security.Roles = nil controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), roleStrings, zap.S()) roles = mockOm.GetRoles() diff --git a/controllers/operator/mongodbshardedcluster_controller.go b/controllers/operator/mongodbshardedcluster_controller.go index 2c5cb301f..904398a6e 100644 --- a/controllers/operator/mongodbshardedcluster_controller.go +++ b/controllers/operator/mongodbshardedcluster_controller.go @@ -958,13 +958,10 @@ func (r *ShardedClusterReconcileHelper) Reconcile(ctx context.Context, log *zap. } // Set annotation and state for previously configured roles - roleAnnotation, roleStrings, err := r.commonController.getRoleAnnotation(ctx, r.sc.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, kube.ObjectKeyFromApiObject(r.sc)) + _, roleStrings, err := r.commonController.getRoleAnnotation(ctx, r.sc.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, kube.ObjectKeyFromApiObject(r.sc)) if err != nil { return r.updateStatus(ctx, sc, workflow.Failed(err), log) } - for k, val := range roleAnnotation { - annotationsToAdd[k] = val - } r.deploymentState.LastConfiguredRoles = roleStrings // Set annotations that should be saved at the end of the reconciliation, e.g lastAchievedSpec From 7ca350220841e4f1ad9a2ad488c1b21dd0a4eb71 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Wed, 10 Dec 2025 16:05:33 +0100 Subject: [PATCH 11/12] Fix unit tests --- ...odbshardedcluster_controller_multi_test.go | 7 +++--- .../mongodbshardedcluster_controller_test.go | 23 +++++++++++++++---- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/controllers/operator/mongodbshardedcluster_controller_multi_test.go b/controllers/operator/mongodbshardedcluster_controller_multi_test.go index 3d8591764..f53015d86 100644 --- a/controllers/operator/mongodbshardedcluster_controller_multi_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_multi_test.go @@ -12,6 +12,7 @@ import ( "time" "github.com/ghodss/yaml" + "github.com/mongodb/mongodb-kubernetes/pkg/kube" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/yudai/gojsondiff" @@ -1362,7 +1363,7 @@ func TestMigrateToNewDeploymentState(t *testing.T) { err = kubeClient.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: sc.Namespace}, stateConfigMap) require.NoError(t, err) - expectedDeploymentState := generateExpectedDeploymentState(t, sc) + expectedDeploymentState := generateExpectedDeploymentState(ctx, t, sc, reconciler.ReconcileCommonController) require.Contains(t, stateConfigMap.Data, stateKey) require.JSONEq(t, expectedDeploymentState, stateConfigMap.Data[stateKey]) @@ -3861,9 +3862,9 @@ func getMultiClusterFQDN(stsName string, namespace string, clusterIdx int, podId return fmt.Sprintf("%s-svc.%s.svc.%s", getPodName(stsName, clusterIdx, podIdx), namespace, clusterDomain) } -func generateExpectedDeploymentState(t *testing.T, sc *mdbv1.MongoDB) string { +func generateExpectedDeploymentState(ctx context.Context, t *testing.T, sc *mdbv1.MongoDB, r *ReconcileCommonController) string { lastSpec, _ := sc.GetLastSpec() - lastConfiguredRoles, _ := sc.GetLastConfiguredRoles() + lastConfiguredRoles, _ := r.getRoleStrings(ctx, sc.Spec.DbCommonSpec, true, kube.ObjectKeyFromApiObject(sc)) expectedState := ShardedClusterDeploymentState{ CommonDeploymentState: CommonDeploymentState{ ClusterMapping: map[string]int{}, diff --git a/controllers/operator/mongodbshardedcluster_controller_test.go b/controllers/operator/mongodbshardedcluster_controller_test.go index d994af783..9dc384f41 100644 --- a/controllers/operator/mongodbshardedcluster_controller_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_test.go @@ -1892,10 +1892,16 @@ func TestSharderClusterRoleAnnotationIsSet(t *testing.T) { require.NoError(t, err) checkReconcileSuccessful(ctx, t, reconciler, sc, cl) - roleString, _ := json.Marshal([]string{"embedded-role@admin"}) + // Assert that the roles are saved in the state configmap + configMapName := fmt.Sprintf("%s-state", sc.Name) + stateConfigMap := &corev1.ConfigMap{} + err = cl.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: sc.Namespace}, stateConfigMap) + require.NoError(t, err) - // Assert that the member ids are saved in the annotation - assert.Equal(t, sc.GetAnnotations()[util.LastConfiguredRoles], string(roleString)) + deploymentState := ShardedClusterDeploymentState{} + err = json.Unmarshal([]byte(stateConfigMap.Data[stateKey]), &deploymentState) + require.NoError(t, err) + assert.Equal(t, []string{"embedded-role@admin"}, deploymentState.LastConfiguredRoles) roles := omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() assert.Len(t, roles, 1) @@ -1906,8 +1912,15 @@ func TestSharderClusterRoleAnnotationIsSet(t *testing.T) { checkReconcileSuccessful(ctx, t, reconciler, sc, cl) - // Assert that the roles annotation is updated and role is removed - assert.Equal(t, sc.GetAnnotations()[util.LastConfiguredRoles], "[]") + // Assert that the state configmap is updated and role is removed + stateConfigMap = &corev1.ConfigMap{} + err = cl.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: sc.Namespace}, stateConfigMap) + require.NoError(t, err) + + deploymentState = ShardedClusterDeploymentState{} + err = json.Unmarshal([]byte(stateConfigMap.Data[stateKey]), &deploymentState) + require.NoError(t, err) + assert.Equal(t, []string{}, deploymentState.LastConfiguredRoles) roles = omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() assert.Len(t, roles, 0) From 71ceec3e04302679c2fe980ae8d6b00193182ef3 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Wed, 10 Dec 2025 16:23:11 +0100 Subject: [PATCH 12/12] Lint --- .../operator/mongodbshardedcluster_controller_multi_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/operator/mongodbshardedcluster_controller_multi_test.go b/controllers/operator/mongodbshardedcluster_controller_multi_test.go index f53015d86..20e5eeee9 100644 --- a/controllers/operator/mongodbshardedcluster_controller_multi_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_multi_test.go @@ -12,7 +12,6 @@ import ( "time" "github.com/ghodss/yaml" - "github.com/mongodb/mongodb-kubernetes/pkg/kube" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/yudai/gojsondiff" @@ -38,6 +37,7 @@ import ( "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/api/v1/common" kubernetesClient "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/pkg/kube/client" "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/pkg/kube/configmap" + "github.com/mongodb/mongodb-kubernetes/pkg/kube" "github.com/mongodb/mongodb-kubernetes/pkg/multicluster" "github.com/mongodb/mongodb-kubernetes/pkg/test" "github.com/mongodb/mongodb-kubernetes/pkg/util"