diff --git a/api/v1/mdb/mongodb_types.go b/api/v1/mdb/mongodb_types.go index 42bab17a2..70584a745 100644 --- a/api/v1/mdb/mongodb_types.go +++ b/api/v1/mdb/mongodb_types.go @@ -1194,6 +1194,20 @@ func (m *MongoDB) GetLastSpec() (*MongoDbSpec, error) { return &lastSpec, nil } +func (m *MongoDB) GetLastConfiguredRoles() ([]string, error) { + lastRolesStr := annotations.GetAnnotation(m, util.LastConfiguredRoles) + if lastRolesStr == "" { + return nil, nil + } + + lastRoles := []string{} + if err := json.Unmarshal([]byte(lastRolesStr), &lastRoles); err != nil { + return nil, err + } + + return lastRoles, nil +} + func (m *MongoDB) ServiceName() string { if m.Spec.StatefulSetConfiguration != nil { svc := m.Spec.StatefulSetConfiguration.SpecWrapper.Spec.ServiceName diff --git a/api/v1/mdbmulti/mongodbmultibuilder.go b/api/v1/mdbmulti/mongodbmultibuilder.go index 619598712..1c7d7491c 100644 --- a/api/v1/mdbmulti/mongodbmultibuilder.go +++ b/api/v1/mdbmulti/mongodbmultibuilder.go @@ -73,6 +73,14 @@ func (m *MultiReplicaSetBuilder) SetSecurity(s *mdbv1.Security) *MultiReplicaSet return m } +func (m *MultiReplicaSetBuilder) SetRoles(roles []mdbv1.MongoDBRole) *MultiReplicaSetBuilder { + if m.Spec.Security == nil { + m.Spec.Security = &mdbv1.Security{} + } + m.Spec.Security.Roles = roles + return m +} + func (m *MultiReplicaSetBuilder) SetRoleRefs(roleRefs []mdbv1.MongoDBRoleRef) *MultiReplicaSetBuilder { if m.Spec.Security == nil { m.Spec.Security = &mdbv1.Security{} diff --git a/changelog/20251204_fix_roles_configured_via_ops_manager_ui_or_api_will_no.md b/changelog/20251204_fix_roles_configured_via_ops_manager_ui_or_api_will_no.md new file mode 100644 index 000000000..63e13df96 --- /dev/null +++ b/changelog/20251204_fix_roles_configured_via_ops_manager_ui_or_api_will_no.md @@ -0,0 +1,6 @@ +--- +kind: fix +date: 2025-12-04 +--- + +* Roles configured via Ops Manager UI or API will no longer be removed by the operator diff --git a/controllers/om/mockedomclient.go b/controllers/om/mockedomclient.go index 51c3f844d..2257ed0fc 100644 --- a/controllers/om/mockedomclient.go +++ b/controllers/om/mockedomclient.go @@ -933,6 +933,16 @@ func (oc *MockedOmConnection) AddPreferredHostname(agentApiKey string, value str return nil } +func (oc *MockedOmConnection) AddRole(role mdbv1.MongoDBRole) { + roles := oc.deployment.GetRoles() + roles = append(roles, role) + oc.deployment.SetRoles(roles) +} + +func (oc *MockedOmConnection) GetRoles() []mdbv1.MongoDBRole { + return oc.deployment.GetRoles() +} + // updateAutoAuthMechanism simulates the changes made by Ops Manager and the agents in deciding which // mechanism will be specified as the "autoAuthMechanisms" func updateAutoAuthMechanism(ac *AutomationConfig) { diff --git a/controllers/operator/common_controller.go b/controllers/operator/common_controller.go index 356b7a592..4e6651214 100644 --- a/controllers/operator/common_controller.go +++ b/controllers/operator/common_controller.go @@ -100,44 +100,85 @@ func NewReconcileCommonController(ctx context.Context, client client.Client) *Re } } -// ensureRoles will first check if both roles and roleRefs are populated. If both are, it will return an error, which is inline with the webhook validation rules. -// Otherwise, if roles is populated, then it will extract the list of roles and check if they are already set in Ops Manager. If they are not, it will update the roles in Ops Manager. -// If roleRefs is populated, it will extract the list of roles from the referenced resources and check if they are already set in Ops Manager. If they are not, it will update the roles in Ops Manager. -func (r *ReconcileCommonController) ensureRoles(ctx context.Context, db mdbv1.DbCommonSpec, enableClusterMongoDBRoles bool, conn om.Connection, mongodbResourceNsName types.NamespacedName, log *zap.SugaredLogger) workflow.Status { +func (r *ReconcileCommonController) getRoleAnnotation(ctx context.Context, db mdbv1.DbCommonSpec, enableClusterMongoDBRoles bool, mongodbResourceNsName types.NamespacedName) (map[string]string, []string, error) { + previousRoles, err := r.getRoleStrings(ctx, db, enableClusterMongoDBRoles, mongodbResourceNsName) + if err != nil { + return nil, nil, xerrors.Errorf("Error retrieving configured roles: %w", err) + } + + annotationToAdd := make(map[string]string) + rolesBytes, err := json.Marshal(previousRoles) + if err != nil { + return nil, nil, err + } + annotationToAdd[util.LastConfiguredRoles] = string(rolesBytes) + + return annotationToAdd, previousRoles, nil +} + +func (r *ReconcileCommonController) getRoleStrings(ctx context.Context, db mdbv1.DbCommonSpec, enableClusterMongoDBRoles bool, mongodbResourceNsName types.NamespacedName) ([]string, error) { + roles, err := r.getRoles(ctx, db, enableClusterMongoDBRoles, mongodbResourceNsName) + if err != nil { + return []string{}, err + } + + roleStrings := make([]string, len(roles)) + for i, r := range roles { + roleStrings[i] = fmt.Sprintf("%s@%s", r.Role, r.Db) + } + + return roleStrings, nil +} + +func (r *ReconcileCommonController) getRoles(ctx context.Context, db mdbv1.DbCommonSpec, enableClusterMongoDBRoles bool, mongodbResourceNsName types.NamespacedName) ([]mdbv1.MongoDBRole, error) { localRoles := db.GetSecurity().Roles roleRefs := db.GetSecurity().RoleRefs if len(localRoles) > 0 && len(roleRefs) > 0 { - return workflow.Failed(xerrors.Errorf("At most one one of roles or roleRefs can be non-empty.")) + return nil, xerrors.Errorf("At most one of roles or roleRefs can be non-empty.") } var roles []mdbv1.MongoDBRole if len(roleRefs) > 0 { if !enableClusterMongoDBRoles { - return workflow.Failed(xerrors.Errorf("RoleRefs are not supported when ClusterMongoDBRoles are disabled. Please enable ClusterMongoDBRoles in the operator configuration. This can be done by setting the operator.enableClusterMongoDBRoles to true in the helm values file, which will automatically installed the necessary RBAC. Alternatively, it can be enabled by adding -watch-resource=clustermongodbroles flag to the operator deployment, and manually creating the necessary RBAC.")) + return nil, xerrors.Errorf("RoleRefs are not supported when ClusterMongoDBRoles are disabled. Please enable ClusterMongoDBRoles in the operator configuration. This can be done by setting the operator.enableClusterMongoDBRoles to true in the helm values file, which will automatically installed the necessary RBAC. Alternatively, it can be enabled by adding -watch-resource=clustermongodbroles flag to the operator deployment, and manually creating the necessary RBAC.") } var err error roles, err = r.getRoleRefs(ctx, roleRefs, mongodbResourceNsName, db.Version) if err != nil { - return workflow.Failed(err) + return nil, err } } else { roles = localRoles } + return roles, nil +} + +// ensureRoles will first check if both roles and roleRefs are populated. If both are, it will return an error, which is inline with the webhook validation rules. +// Otherwise, if roles is populated, then it will extract the list of roles and check if they are already set in Ops Manager. If they are not, it will update the roles in Ops Manager. +// If roleRefs is populated, it will extract the list of roles from the referenced resources and check if they are already set in Ops Manager. If they are not, it will update the roles in Ops Manager. +func (r *ReconcileCommonController) ensureRoles(ctx context.Context, db mdbv1.DbCommonSpec, enableClusterMongoDBRoles bool, conn om.Connection, mongodbResourceNsName types.NamespacedName, previousRoles []string, log *zap.SugaredLogger) workflow.Status { + roles, err := r.getRoles(ctx, db, enableClusterMongoDBRoles, mongodbResourceNsName) + if err != nil { + return workflow.Failed(err) + } + d, err := conn.ReadDeployment() if err != nil { return workflow.Failed(err) } dRoles := d.GetRoles() - if reflect.DeepEqual(dRoles, roles) { + mergedRoles := mergeRoles(dRoles, roles, previousRoles) + + if reflect.DeepEqual(dRoles, mergedRoles) { return workflow.OK() } // clone roles list to avoid mutating the spec in normalizePrivilegeResource - newRoles := make([]mdbv1.MongoDBRole, len(roles)) - for i := range roles { - newRoles[i] = *roles[i].DeepCopy() + newRoles := make([]mdbv1.MongoDBRole, len(mergedRoles)) + for i := range mergedRoles { + newRoles[i] = *mergedRoles[i].DeepCopy() } roles = newRoles @@ -189,6 +230,33 @@ func normalizePrivilegeResource(resource mdbv1.Resource) mdbv1.Resource { return resource } +// mergeRoles merges the deployed roles with the current roles and previously configured roles. +// It ensures that roles configured outside the operator are not removed. +// This is achieved by removing currently configured roles from the deployed roles. +// To ensure that roles removed from the spec are also removed from OM, we also remove the previously configured roles. +// Finally, we add back the currently configured roles. +func mergeRoles(deployed []mdbv1.MongoDBRole, current []mdbv1.MongoDBRole, previous []string) []mdbv1.MongoDBRole { + roleMap := make(map[string]struct{}) + for _, r := range current { + roleMap[r.Role+"@"+r.Db] = struct{}{} + } + + for _, r := range previous { + roleMap[r] = struct{}{} + } + + mergedRoles := make([]mdbv1.MongoDBRole, 0) + for _, r := range deployed { + key := r.Role + "@" + r.Db + if _, ok := roleMap[key]; !ok { + mergedRoles = append(mergedRoles, r) + } + } + + mergedRoles = append(mergedRoles, current...) + return mergedRoles +} + // getRoleRefs retrieves the roles from the referenced resources. It will return an error if any of the referenced resources are not found. // It will also add the referenced resources to the resource watcher, so that they are watched for changes. // The referenced resources are expected to be of kind ClusterMongoDBRole. diff --git a/controllers/operator/common_controller_test.go b/controllers/operator/common_controller_test.go index 608a80bb9..4f7c9f8de 100644 --- a/controllers/operator/common_controller_test.go +++ b/controllers/operator/common_controller_test.go @@ -290,7 +290,7 @@ func TestFailWhenRoleAndRoleRefsAreConfigured(t *testing.T) { controller := NewReconcileCommonController(ctx, kubeClient) mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) - result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), nil, zap.S()) assert.False(t, result.IsOK()) assert.Equal(t, status.PhaseFailed, result.Phase()) @@ -318,7 +318,7 @@ func TestRoleRefsAreAdded(t *testing.T) { _ = kubeClient.Create(ctx, roleResource) - controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), nil, zap.S()) ac, err := mockOm.ReadAutomationConfig() assert.NoError(t, err) @@ -345,7 +345,7 @@ func TestErrorWhenRoleRefIsWrong(t *testing.T) { _ = kubeClient.Create(ctx, roleResource) - result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), nil, zap.S()) assert.False(t, result.IsOK()) assert.Equal(t, status.PhaseFailed, result.Phase()) @@ -371,7 +371,7 @@ func TestErrorWhenRoleDoesNotExist(t *testing.T) { controller := NewReconcileCommonController(ctx, kubeClient) mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) - result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), nil, zap.S()) assert.False(t, result.IsOK()) assert.Equal(t, status.PhaseFailed, result.Phase()) @@ -398,7 +398,7 @@ func TestDontSendNilPrivileges(t *testing.T) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient() controller := NewReconcileCommonController(ctx, kubeClient) mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) - controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), nil, zap.S()) ac, err := mockOm.ReadAutomationConfig() assert.NoError(t, err) roles, ok := ac.Deployment["roles"].([]mdbv1.MongoDBRole) @@ -498,7 +498,7 @@ func TestCheckEmptyStringsInPrivilegesEquivalentToNotPassingFields(t *testing.T) controller := NewReconcileCommonController(ctx, kubeClient) mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) - controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), nil, zap.S()) ac, err := mockOm.ReadAutomationConfig() assert.NoError(t, err) @@ -528,6 +528,149 @@ func TestCheckEmptyStringsInPrivilegesEquivalentToNotPassingFields(t *testing.T) } } +func TestMergeRoles(t *testing.T) { + externalRole := mdbv1.MongoDBRole{ + Role: "ext_role", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "read", + }}, + } + role1 := mdbv1.MongoDBRole{ + Role: "role1", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "readWrite", + }}, + } + + role2 := mdbv1.MongoDBRole{ + Role: "role2", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "readWrite", + }}, + } + + tests := []struct { + name string + deployedRoles []mdbv1.MongoDBRole + currentRoles []mdbv1.MongoDBRole + previousRoles []string + expectedRoles []mdbv1.MongoDBRole + }{ + // externalRole was added via UI + // role1 and role2 were defined in the CR + // role2 was removed from the CR + { + name: "Removing role from resource", + deployedRoles: []mdbv1.MongoDBRole{externalRole, role1, role2}, + currentRoles: []mdbv1.MongoDBRole{role1}, + previousRoles: []string{"role1@admin", "role2@admin"}, + expectedRoles: []mdbv1.MongoDBRole{externalRole, role1}, + }, + // externalRole was added via UI + // role1 was defined in the CR + // role2 was added in the CR + { + name: "Adding role in resource", + deployedRoles: []mdbv1.MongoDBRole{externalRole, role1}, + currentRoles: []mdbv1.MongoDBRole{role1, role2}, + previousRoles: []string{"role1@admin"}, + expectedRoles: []mdbv1.MongoDBRole{externalRole, role1, role2}, + }, + { + name: "Idempotency", + deployedRoles: []mdbv1.MongoDBRole{externalRole, role1, role2}, + currentRoles: []mdbv1.MongoDBRole{role1, role2}, + previousRoles: []string{"role1@admin", "role2@admin"}, + expectedRoles: []mdbv1.MongoDBRole{externalRole, role1, role2}, + }, + { + name: "Nil previous roles - adding all defined roles", + deployedRoles: []mdbv1.MongoDBRole{externalRole, role1, role2}, + currentRoles: []mdbv1.MongoDBRole{role1, role2}, + previousRoles: nil, + expectedRoles: []mdbv1.MongoDBRole{externalRole, role1, role2}, + }, + { + name: "Nil current roles - removing all defined roles", + deployedRoles: []mdbv1.MongoDBRole{externalRole, role1, role2}, + currentRoles: nil, + previousRoles: []string{"role1@admin", "role2@admin"}, + expectedRoles: []mdbv1.MongoDBRole{externalRole}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mergedRoles := mergeRoles(tc.deployedRoles, tc.currentRoles, tc.previousRoles) + + require.Len(t, mergedRoles, len(tc.expectedRoles)) + for _, r := range tc.expectedRoles { + assert.Contains(t, mergedRoles, r) + } + }) + } +} + +func TestExternalRoleIsNotRemoved(t *testing.T) { + ctx := context.Background() + + role := mdbv1.MongoDBRole{ + Role: "embedded-role", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "read", + }}, + } + + rs := DefaultReplicaSetBuilder().SetRoles([]mdbv1.MongoDBRole{role}).Build() + kubeClient, omConnectionFactory := mock.NewDefaultFakeClient() + controller := NewReconcileCommonController(ctx, kubeClient) + mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) + + // Create deployment with one embedded role + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), nil, zap.S()) + + roles := mockOm.GetRoles() + require.Len(t, roles, 1) + + // Add external role directly to OM (via UI/API) + externalRole := mdbv1.MongoDBRole{ + Role: "external-role", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "read", + }}, + } + mockOm.AddRole(externalRole) + + // Ensure external role is added + roles = mockOm.GetRoles() + require.Len(t, roles, 2) + + // Reconcile again - role created from the UI should still be there + roleStrings, _ := controller.getRoleStrings(ctx, rs.Spec.DbCommonSpec, true, kube.ObjectKeyFromApiObject(rs)) + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), roleStrings, zap.S()) + + roles = mockOm.GetRoles() + require.Len(t, roles, 2) + + // Delete embedded role, only the external should remain + rs.Spec.Security.Roles = nil + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), roleStrings, zap.S()) + + roles = mockOm.GetRoles() + require.Len(t, roles, 1) + assert.Equal(t, roles[0].Role, "external-role") +} + func TestSecretWatcherWithAllResources(t *testing.T) { ctx := context.Background() caName := "custom-ca" diff --git a/controllers/operator/mongodbmultireplicaset_controller.go b/controllers/operator/mongodbmultireplicaset_controller.go index e0820d819..6aeff2cbd 100644 --- a/controllers/operator/mongodbmultireplicaset_controller.go +++ b/controllers/operator/mongodbmultireplicaset_controller.go @@ -386,8 +386,14 @@ func (r *ReconcileMongoDbMultiReplicaSet) reconcileMemberResources(ctx context.C return workflow.Failed(err) } } + + previousRoles, err := getPreviousRolesFromAnnotation(*mrs) + if err != nil { + return workflow.Failed(err) + } + // Ensure custom roles are created in OM - if status := r.ensureRoles(ctx, mrs.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(mrs), log); !status.IsOK() { + if status := r.ensureRoles(ctx, mrs.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(mrs), previousRoles, log); !status.IsOK() { return status } @@ -694,6 +700,15 @@ func (r *ReconcileMongoDbMultiReplicaSet) saveLastAchievedSpec(ctx context.Conte } } + // Set annotation and state for previously configured roles + roleAnnotation, _, err := r.getRoleAnnotation(ctx, mrs.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, kube.ObjectKeyFromApiObject(&mrs)) + if err != nil { + return err + } + for k, val := range roleAnnotation { + annotationsToAdd[k] = val + } + return annotations.SetAnnotations(ctx, &mrs, annotationsToAdd, r.client) } @@ -828,6 +843,17 @@ func getReplicaSetProcessIdsFromAnnotation(mrs mdbmultiv1.MongoDBMultiCluster) ( return make(map[string]int), nil } +func getPreviousRolesFromAnnotation(mrs mdbmultiv1.MongoDBMultiCluster) ([]string, error) { + if rolesString, ok := mrs.Annotations[util.LastConfiguredRoles]; ok { + var roles []string + if err := json.Unmarshal([]byte(rolesString), &roles); err != nil { + return nil, err + } + return roles, nil + } + return nil, nil +} + func getSRVService(mrs *mdbmultiv1.MongoDBMultiCluster) corev1.Service { additionalConfig := mrs.Spec.GetAdditionalMongodConfig() port := additionalConfig.GetPortOrDefault() diff --git a/controllers/operator/mongodbmultireplicaset_controller_test.go b/controllers/operator/mongodbmultireplicaset_controller_test.go index 5c27d8606..0f98b1522 100644 --- a/controllers/operator/mongodbmultireplicaset_controller_test.go +++ b/controllers/operator/mongodbmultireplicaset_controller_test.go @@ -1425,6 +1425,43 @@ func TestValidationsRunOnReconcile(t *testing.T) { assert.Equal(t, fmt.Sprintf("Multiple clusters with the same name (%s) are not allowed", duplicateName), mrs.Status.Message) } +func TestRoleAnnotationIsSet(t *testing.T) { + ctx := context.Background() + + role := mdb.MongoDBRole{ + Role: "embedded-role", + Db: "admin", + Roles: []mdb.InheritedRole{{ + Db: "admin", + Role: "read", + }}, + } + + mrs := mdbmulti.DefaultMultiReplicaSetBuilder().SetClusterSpecList(clusters).SetRoles([]mdb.MongoDBRole{role}).Build() + reconciler, client, _, omConnectionFactory := defaultMultiReplicaSetReconciler(ctx, nil, "", "", mrs) + checkMultiReconcileSuccessful(ctx, t, reconciler, mrs, client, false) + + roleString, _ := json.Marshal([]string{"embedded-role@admin"}) + + // Assert that the member ids are saved in the annotation + assert.Equal(t, mrs.GetAnnotations()[util.LastConfiguredRoles], string(roleString)) + + roles := omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 1) + + mrs.GetSecurity().Roles = []mdb.MongoDBRole{} + err := client.Update(ctx, mrs) + assert.NoError(t, err) + + checkMultiReconcileSuccessful(ctx, t, reconciler, mrs, client, false) + + // Assert that the roles annotation is updated and role is removed + assert.Equal(t, mrs.GetAnnotations()[util.LastConfiguredRoles], "[]") + + roles = omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 0) +} + func assertClusterpresent(t *testing.T, m map[string]int, specs mdb.ClusterSpecList, arr []int) { tmp := make([]int, 0) for _, s := range specs { diff --git a/controllers/operator/mongodbreplicaset_controller.go b/controllers/operator/mongodbreplicaset_controller.go index 7fb290d7d..baba9e04a 100644 --- a/controllers/operator/mongodbreplicaset_controller.go +++ b/controllers/operator/mongodbreplicaset_controller.go @@ -84,6 +84,7 @@ type ReconcileMongoDbReplicaSet struct { type replicaSetDeploymentState struct { LastAchievedSpec *mdbv1.MongoDbSpec LastReconcileMemberCount int + LastConfiguredRoles []string } var _ reconcile.Reconciler = &ReconcileMongoDbReplicaSet{} @@ -127,9 +128,15 @@ func (r *ReplicaSetReconcilerHelper) readState() (*replicaSetDeploymentState, er // reconciliation and prepares for eventually storing this in ConfigMap state instead of ephemeral status. lastReconcileMemberCount := r.resource.Status.Members + lastConfiguredRoles, err := r.resource.GetLastConfiguredRoles() + if err != nil { + return nil, err + } + return &replicaSetDeploymentState{ LastAchievedSpec: lastAchievedSpec, LastReconcileMemberCount: lastReconcileMemberCount, + LastConfiguredRoles: lastConfiguredRoles, }, nil } @@ -278,7 +285,7 @@ func (r *ReplicaSetReconcilerHelper) Reconcile(ctx context.Context) (reconcile.R if recovery.ShouldTriggerRecovery(rs.Status.Phase != mdbstatus.PhaseRunning, rs.Status.LastTransition) { log.Warnf("Triggering Automatic Recovery. The MongoDB resource %s/%s is in %s state since %s", rs.Namespace, rs.Name, rs.Status.Phase, rs.Status.LastTransition) automationConfigStatus := r.updateOmDeploymentRs(ctx, conn, r.deploymentState.LastReconcileMemberCount, tlsCertPath, internalClusterCertPath, deploymentOpts, shouldMirrorKeyfileForMongot, true).OnErrorPrepend("failed to create/update (Ops Manager reconciliation phase):") - reconcileStatus := r.reconcileMemberResources(ctx, conn, projectConfig, deploymentOpts) + reconcileStatus := r.reconcileMemberResources(ctx, conn, projectConfig, deploymentOpts, r.deploymentState.LastConfiguredRoles) if !reconcileStatus.IsOK() { log.Errorf("Recovery failed because of reconcile errors, %v", reconcileStatus) } @@ -294,7 +301,7 @@ func (r *ReplicaSetReconcilerHelper) Reconcile(ctx context.Context) (reconcile.R return r.updateOmDeploymentRs(ctx, conn, r.deploymentState.LastReconcileMemberCount, tlsCertPath, internalClusterCertPath, deploymentOpts, shouldMirrorKeyfileForMongot, false).OnErrorPrepend("failed to create/update (Ops Manager reconciliation phase):") }, func() workflow.Status { - return r.reconcileMemberResources(ctx, conn, projectConfig, deploymentOpts) + return r.reconcileMemberResources(ctx, conn, projectConfig, deploymentOpts, r.deploymentState.LastConfiguredRoles) }) if !status.IsOK() { @@ -319,6 +326,14 @@ func (r *ReplicaSetReconcilerHelper) Reconcile(ctx context.Context) (reconcile.R annotationsToAdd[k] = val } + roleAnnotation, _, err := r.reconciler.getRoleAnnotation(ctx, r.resource.Spec.DbCommonSpec, r.reconciler.enableClusterMongoDBRoles, kube.ObjectKeyFromApiObject(r.resource)) + if err != nil { + return r.updateStatus(ctx, workflow.Failed(err)) + } + for k, val := range roleAnnotation { + annotationsToAdd[k] = val + } + if err := annotations.SetAnnotations(ctx, r.resource, annotationsToAdd, r.reconciler.client); err != nil { return r.updateStatus(ctx, workflow.Failed(xerrors.Errorf("could not update resource annotations: %w", err))) } @@ -478,7 +493,7 @@ func (r *ReplicaSetReconcilerHelper) reconcileHostnameOverrideConfigMap(ctx cont // reconcileMemberResources handles the synchronization of kubernetes resources, which can be statefulsets, services etc. // All the resources required in the k8s cluster (as opposed to the automation config) for creating the replicaset // should be reconciled in this method. -func (r *ReplicaSetReconcilerHelper) reconcileMemberResources(ctx context.Context, conn om.Connection, projectConfig mdbv1.ProjectConfig, deploymentOptions deploymentOptionsRS) workflow.Status { +func (r *ReplicaSetReconcilerHelper) reconcileMemberResources(ctx context.Context, conn om.Connection, projectConfig mdbv1.ProjectConfig, deploymentOptions deploymentOptionsRS, lastConfiguredRoles []string) workflow.Status { rs := r.resource reconciler := r.reconciler log := r.log @@ -489,7 +504,7 @@ func (r *ReplicaSetReconcilerHelper) reconcileMemberResources(ctx context.Contex } // Ensure roles are properly configured - if status := reconciler.ensureRoles(ctx, rs.Spec.DbCommonSpec, reconciler.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(rs), log); !status.IsOK() { + if status := reconciler.ensureRoles(ctx, rs.Spec.DbCommonSpec, reconciler.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(rs), lastConfiguredRoles, log); !status.IsOK() { return status } diff --git a/controllers/operator/mongodbreplicaset_controller_test.go b/controllers/operator/mongodbreplicaset_controller_test.go index a7794f07a..21c8e9c87 100644 --- a/controllers/operator/mongodbreplicaset_controller_test.go +++ b/controllers/operator/mongodbreplicaset_controller_test.go @@ -983,6 +983,44 @@ func TestVaultAnnotations_NotWrittenWhenDisabled(t *testing.T) { } } +func TestReplicasetRoleAnnotationIsSet(t *testing.T) { + ctx := context.Background() + + role := mdbv1.MongoDBRole{ + Role: "embedded-role", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "read", + }}, + } + + rs := DefaultReplicaSetBuilder().SetRoles([]mdbv1.MongoDBRole{role}).Build() + reconciler, client, omConnectionFactory := defaultReplicaSetReconciler(ctx, nil, "", "", rs) + + checkReconcileSuccessful(ctx, t, reconciler, rs, client) + + roleString, _ := json.Marshal([]string{"embedded-role@admin"}) + + // Assert that the member ids are saved in the annotation + assert.Equal(t, rs.GetAnnotations()[util.LastConfiguredRoles], string(roleString)) + + roles := omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 1) + + rs.GetSecurity().Roles = []mdbv1.MongoDBRole{} + err := client.Update(ctx, rs) + assert.NoError(t, err) + + checkReconcileSuccessful(ctx, t, reconciler, rs, client) + + // Assert that the roles annotation is updated and role is removed + assert.Equal(t, rs.GetAnnotations()[util.LastConfiguredRoles], "[]") + + roles = omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 0) +} + func testPVCFinishedResizing(t *testing.T, ctx context.Context, memberClient kubernetesClient.Client, p *corev1.PersistentVolumeClaim, reconciledResource *mdbv1.MongoDB, statefulSet *appsv1.StatefulSet, logger *zap.SugaredLogger) { // Simulate that the PVC has finished resizing setPVCWithUpdatedResource(ctx, t, memberClient, p) diff --git a/controllers/operator/mongodbshardedcluster_controller.go b/controllers/operator/mongodbshardedcluster_controller.go index c7298bc37..904398a6e 100644 --- a/controllers/operator/mongodbshardedcluster_controller.go +++ b/controllers/operator/mongodbshardedcluster_controller.go @@ -109,6 +109,7 @@ func newShardedClusterReconciler(ctx context.Context, kubeClient client.Client, type ShardedClusterDeploymentState struct { CommonDeploymentState `json:",inline"` LastAchievedSpec *mdbv1.MongoDbSpec `json:"lastAchievedSpec"` + LastConfiguredRoles []string `json:"lastConfiguredRoles"` Status *mdbv1.MongoDbStatus `json:"status"` } @@ -131,6 +132,7 @@ func NewShardedClusterDeploymentState() *ShardedClusterDeploymentState { return &ShardedClusterDeploymentState{ CommonDeploymentState: CommonDeploymentState{ClusterMapping: map[string]int{}}, LastAchievedSpec: &mdbv1.MongoDbSpec{}, + LastConfiguredRoles: []string{}, Status: &mdbv1.MongoDbStatus{}, } } @@ -954,6 +956,14 @@ func (r *ShardedClusterReconcileHelper) Reconcile(ctx context.Context, log *zap. annotationsToAdd[k] = val } } + + // Set annotation and state for previously configured roles + _, roleStrings, err := r.commonController.getRoleAnnotation(ctx, r.sc.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, kube.ObjectKeyFromApiObject(r.sc)) + if err != nil { + return r.updateStatus(ctx, sc, workflow.Failed(err), log) + } + r.deploymentState.LastConfiguredRoles = roleStrings + // Set annotations that should be saved at the end of the reconciliation, e.g lastAchievedSpec if err := annotations.SetAnnotations(ctx, sc, annotationsToAdd, r.commonController.client); err != nil { return r.updateStatus(ctx, sc, workflow.Failed(err), log) @@ -1059,7 +1069,7 @@ func (r *ShardedClusterReconcileHelper) doShardedClusterProcessing(ctx context.C } } - if workflowStatus := r.commonController.ensureRoles(ctx, sc.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(sc), log); !workflowStatus.IsOK() { + if workflowStatus := r.commonController.ensureRoles(ctx, sc.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(sc), r.deploymentState.LastConfiguredRoles, log); !workflowStatus.IsOK() { return workflowStatus } diff --git a/controllers/operator/mongodbshardedcluster_controller_multi_test.go b/controllers/operator/mongodbshardedcluster_controller_multi_test.go index 616d57bfa..20e5eeee9 100644 --- a/controllers/operator/mongodbshardedcluster_controller_multi_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_multi_test.go @@ -37,6 +37,7 @@ import ( "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/api/v1/common" kubernetesClient "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/pkg/kube/client" "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/pkg/kube/configmap" + "github.com/mongodb/mongodb-kubernetes/pkg/kube" "github.com/mongodb/mongodb-kubernetes/pkg/multicluster" "github.com/mongodb/mongodb-kubernetes/pkg/test" "github.com/mongodb/mongodb-kubernetes/pkg/util" @@ -1362,7 +1363,7 @@ func TestMigrateToNewDeploymentState(t *testing.T) { err = kubeClient.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: sc.Namespace}, stateConfigMap) require.NoError(t, err) - expectedDeploymentState := generateExpectedDeploymentState(t, sc) + expectedDeploymentState := generateExpectedDeploymentState(ctx, t, sc, reconciler.ReconcileCommonController) require.Contains(t, stateConfigMap.Data, stateKey) require.JSONEq(t, expectedDeploymentState, stateConfigMap.Data[stateKey]) @@ -3861,14 +3862,16 @@ func getMultiClusterFQDN(stsName string, namespace string, clusterIdx int, podId return fmt.Sprintf("%s-svc.%s.svc.%s", getPodName(stsName, clusterIdx, podIdx), namespace, clusterDomain) } -func generateExpectedDeploymentState(t *testing.T, sc *mdbv1.MongoDB) string { +func generateExpectedDeploymentState(ctx context.Context, t *testing.T, sc *mdbv1.MongoDB, r *ReconcileCommonController) string { lastSpec, _ := sc.GetLastSpec() + lastConfiguredRoles, _ := r.getRoleStrings(ctx, sc.Spec.DbCommonSpec, true, kube.ObjectKeyFromApiObject(sc)) expectedState := ShardedClusterDeploymentState{ CommonDeploymentState: CommonDeploymentState{ ClusterMapping: map[string]int{}, }, - LastAchievedSpec: lastSpec, - Status: &sc.Status, + LastAchievedSpec: lastSpec, + LastConfiguredRoles: lastConfiguredRoles, + Status: &sc.Status, } lastSpecBytes, err := json.Marshal(expectedState) require.NoError(t, err) diff --git a/controllers/operator/mongodbshardedcluster_controller_test.go b/controllers/operator/mongodbshardedcluster_controller_test.go index ee5d87894..9dc384f41 100644 --- a/controllers/operator/mongodbshardedcluster_controller_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_test.go @@ -2,6 +2,7 @@ package operator import ( "context" + "encoding/json" "fmt" "reflect" "strings" @@ -1874,6 +1875,57 @@ func TestSingleClusterShardedScalingWithOverrides(t *testing.T) { } } +func TestSharderClusterRoleAnnotationIsSet(t *testing.T) { + ctx := context.Background() + + role := mdbv1.MongoDBRole{ + Role: "embedded-role", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "read", + }}, + } + + sc := test.DefaultClusterBuilder().SetRoles([]mdbv1.MongoDBRole{role}).Build() + reconciler, _, cl, omConnectionFactory, err := defaultShardedClusterReconciler(ctx, nil, "", "", sc, nil) + require.NoError(t, err) + checkReconcileSuccessful(ctx, t, reconciler, sc, cl) + + // Assert that the roles are saved in the state configmap + configMapName := fmt.Sprintf("%s-state", sc.Name) + stateConfigMap := &corev1.ConfigMap{} + err = cl.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: sc.Namespace}, stateConfigMap) + require.NoError(t, err) + + deploymentState := ShardedClusterDeploymentState{} + err = json.Unmarshal([]byte(stateConfigMap.Data[stateKey]), &deploymentState) + require.NoError(t, err) + assert.Equal(t, []string{"embedded-role@admin"}, deploymentState.LastConfiguredRoles) + + roles := omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 1) + + sc.GetSecurity().Roles = []mdbv1.MongoDBRole{} + err = cl.Update(ctx, sc) + assert.NoError(t, err) + + checkReconcileSuccessful(ctx, t, reconciler, sc, cl) + + // Assert that the state configmap is updated and role is removed + stateConfigMap = &corev1.ConfigMap{} + err = cl.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: sc.Namespace}, stateConfigMap) + require.NoError(t, err) + + deploymentState = ShardedClusterDeploymentState{} + err = json.Unmarshal([]byte(stateConfigMap.Data[stateKey]), &deploymentState) + require.NoError(t, err) + assert.Equal(t, []string{}, deploymentState.LastConfiguredRoles) + + roles = omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 0) +} + func generateHostsWithDistributionSingleCluster(stsName string, namespace string, memberCount int, clusterDomain string, externalClusterDomain string) ([]string, []string) { var hosts []string var podNames []string diff --git a/controllers/operator/mongodbstandalone_controller.go b/controllers/operator/mongodbstandalone_controller.go index 56f84fc97..56b4531ca 100644 --- a/controllers/operator/mongodbstandalone_controller.go +++ b/controllers/operator/mongodbstandalone_controller.go @@ -220,7 +220,12 @@ func (r *ReconcileMongoDbStandalone) Reconcile(ctx context.Context, request reco return r.updateStatus(ctx, s, status, log) } - if status := r.ensureRoles(ctx, s.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(s), log); !status.IsOK() { + previousRoles, err := s.GetLastConfiguredRoles() + if err != nil { + return r.updateStatus(ctx, s, workflow.Failed(xerrors.Errorf("Failed to get last configured roles: %w", err)), log) + } + + if status := r.ensureRoles(ctx, s.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(s), previousRoles, log); !status.IsOK() { return r.updateStatus(ctx, s, status, log) } @@ -317,6 +322,15 @@ func (r *ReconcileMongoDbStandalone) Reconcile(ctx context.Context, request reco annotationsToAdd[k] = val } } + + roleAnnotation, _, err := r.getRoleAnnotation(ctx, s.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, kube.ObjectKeyFromApiObject(s)) + if err != nil { + return r.updateStatus(ctx, s, workflow.Failed(err), log) + } + for k, val := range roleAnnotation { + annotationsToAdd[k] = val + } + if err := annotations.SetAnnotations(ctx, s, annotationsToAdd, r.client); err != nil { return r.updateStatus(ctx, s, workflow.Failed(err), log) } diff --git a/controllers/operator/mongodbstandalone_controller_test.go b/controllers/operator/mongodbstandalone_controller_test.go index cfa032870..134561f90 100644 --- a/controllers/operator/mongodbstandalone_controller_test.go +++ b/controllers/operator/mongodbstandalone_controller_test.go @@ -2,6 +2,7 @@ package operator import ( "context" + "encoding/json" "fmt" "reflect" "testing" @@ -323,6 +324,44 @@ func TestStandaloneAgentVersionMapping(t *testing.T) { agentVersionMappingTest(ctx, t, defaultResources, overriddenResources) } +func TestStandaloneRoleAnnotationIsSet(t *testing.T) { + ctx := context.Background() + + role := mdbv1.MongoDBRole{ + Role: "embedded-role", + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "read", + }}, + } + + st := DefaultStandaloneBuilder().SetRoles([]mdbv1.MongoDBRole{role}).Build() + reconciler, client, omConnectionFactory := defaultStandaloneReconciler(ctx, nil, "", "", om.NewEmptyMockedOmConnection, st) + + checkReconcileSuccessful(ctx, t, reconciler, st, client) + + roleString, _ := json.Marshal([]string{"embedded-role@admin"}) + + // Assert that the member ids are saved in the annotation + assert.Equal(t, st.GetAnnotations()[util.LastConfiguredRoles], string(roleString)) + + roles := omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 1) + + st.GetSecurity().Roles = []mdbv1.MongoDBRole{} + err := client.Update(ctx, st) + assert.NoError(t, err) + + checkReconcileSuccessful(ctx, t, reconciler, st, client) + + // Assert that the roles annotation is updated and role is removed + assert.Equal(t, st.GetAnnotations()[util.LastConfiguredRoles], "[]") + + roles = omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetRoles() + assert.Len(t, roles, 0) +} + // defaultStandaloneReconciler is the standalone reconciler used in unit test. It "adds" necessary // additional K8s objects (st, connection config map and secrets) necessary for reconciliation, // so it's possible to call 'reconcileAppDB()' on it right away @@ -385,6 +424,14 @@ func (b *StandaloneBuilder) SetService(s string) *StandaloneBuilder { return b } +func (b *StandaloneBuilder) SetRoles(roles []mdbv1.MongoDBRole) *StandaloneBuilder { + if b.Spec.Security == nil { + b.Spec.Security = &mdbv1.Security{} + } + b.Spec.Security.Roles = roles + return b +} + func (b *StandaloneBuilder) SetPodSpecTemplate(spec corev1.PodTemplateSpec) *StandaloneBuilder { if b.Spec.PodSpec == nil { b.Spec.PodSpec = &mdbv1.MongoDbPodSpec{} diff --git a/pkg/test/sharded_cluster_builder.go b/pkg/test/sharded_cluster_builder.go index 253126056..4e8afbb9f 100644 --- a/pkg/test/sharded_cluster_builder.go +++ b/pkg/test/sharded_cluster_builder.go @@ -126,6 +126,14 @@ func (b *ClusterBuilder) SetSecurity(security mdb.Security) *ClusterBuilder { return b } +func (b *ClusterBuilder) SetRoles(roles []mdb.MongoDBRole) *ClusterBuilder { + if b.Spec.Security == nil { + b.Spec.Security = &mdb.Security{} + } + b.Spec.Security.Roles = roles + return b +} + func (b *ClusterBuilder) EnableTLS() *ClusterBuilder { if b.Spec.Security == nil || b.Spec.Security.TLSConfig == nil { return b.SetSecurity(mdb.Security{TLSConfig: &mdb.TLSConfig{Enabled: true}}) diff --git a/pkg/util/constants.go b/pkg/util/constants.go index 0efc7d639..5444dff50 100644 --- a/pkg/util/constants.go +++ b/pkg/util/constants.go @@ -285,6 +285,7 @@ const ( // Annotation keys used by the operator LastAchievedSpec = "mongodb.com/v1.lastSuccessfulConfiguration" LastAchievedRsMemberIds = "mongodb.com/v1.lastAchievedRsMemberIds" + LastConfiguredRoles = "mongodb.com/v1.lastConfiguredRoles" // SecretVolumeName is the name of the volume resource. SecretVolumeName = "secret-certs"