diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index 5ef570cbe..b3bf0b6f7 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -1169,11 +1169,11 @@ func (r *Reconciler) reconcileInstance( ) if err == nil { - instanceConfigMap, err = r.reconcileInstanceConfigMap(ctx, cluster, spec, instance, otelConfig) + instanceConfigMap, err = r.reconcileInstanceConfigMap(ctx, cluster, spec, instance, otelConfig, backupsSpecFound) } if err == nil { instanceCertificates, err = r.reconcileInstanceCertificates( - ctx, cluster, spec, instance, rootCA) + ctx, cluster, spec, instance, rootCA, backupsSpecFound) } if err == nil { postgresDataVolume, err = r.reconcilePostgresDataVolume(ctx, cluster, spec, instance, clusterVolumes, nil) @@ -1398,10 +1398,8 @@ func addPGBackRestToInstancePodSpec( ctx context.Context, cluster *v1beta1.PostgresCluster, instanceCertificates *corev1.Secret, instancePod *corev1.PodSpec, ) { - if pgbackrest.RepoHostVolumeDefined(cluster) { - pgbackrest.AddServerToInstancePod(ctx, cluster, instancePod, - instanceCertificates.Name) - } + pgbackrest.AddServerToInstancePod(ctx, cluster, instancePod, + instanceCertificates.Name) pgbackrest.AddConfigToInstancePod(cluster, instancePod) } @@ -1412,7 +1410,7 @@ func addPGBackRestToInstancePodSpec( // files (etc) that apply to instance of cluster. func (r *Reconciler) reconcileInstanceConfigMap( ctx context.Context, cluster *v1beta1.PostgresCluster, spec *v1beta1.PostgresInstanceSetSpec, - instance *appsv1.StatefulSet, otelConfig *collector.Config, + instance *appsv1.StatefulSet, otelConfig *collector.Config, backupsSpecFound bool, ) (*corev1.ConfigMap, error) { instanceConfigMap := &corev1.ConfigMap{ObjectMeta: naming.InstanceConfigMap(instance)} instanceConfigMap.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) @@ -1439,11 +1437,9 @@ func (r *Reconciler) reconcileInstanceConfigMap( err = collector.AddToConfigMap(ctx, otelConfig, instanceConfigMap) // Add pgbackrest logrotate if OpenTelemetryLogs is enabled and - // local volumes are available + // backups are enabled if err == nil && - feature.Enabled(ctx, feature.OpenTelemetryLogs) && - pgbackrest.RepoHostVolumeDefined(cluster) && - cluster.Spec.Instrumentation != nil { + collector.OpenTelemetryLogsEnabled(ctx, cluster) && backupsSpecFound { collector.AddLogrotateConfigs(ctx, cluster.Spec.Instrumentation, instanceConfigMap, @@ -1470,7 +1466,7 @@ func (r *Reconciler) reconcileInstanceConfigMap( func (r *Reconciler) reconcileInstanceCertificates( ctx context.Context, cluster *v1beta1.PostgresCluster, spec *v1beta1.PostgresInstanceSetSpec, instance *appsv1.StatefulSet, - root *pki.RootCertificateAuthority, + root *pki.RootCertificateAuthority, backupsSpecFound bool, ) (*corev1.Secret, error) { existing := &corev1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} err := errors.WithStack(client.IgnoreNotFound( @@ -1513,7 +1509,7 @@ func (r *Reconciler) reconcileInstanceCertificates( root.Certificate, leafCert.Certificate, leafCert.PrivateKey, instanceCerts) } - if err == nil { + if err == nil && backupsSpecFound { err = pgbackrest.InstanceCertificates(ctx, cluster, root.Certificate, leafCert.Certificate, leafCert.PrivateKey, instanceCerts) diff --git a/internal/controller/postgrescluster/instance_test.go b/internal/controller/postgrescluster/instance_test.go index 3316cbbe2..83afc6d20 100644 --- a/internal/controller/postgrescluster/instance_test.go +++ b/internal/controller/postgrescluster/instance_test.go @@ -32,7 +32,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/crunchydata/postgres-operator/internal/collector" "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" @@ -544,49 +546,7 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { }, } - t.Run("NoVolumeRepo", func(t *testing.T) { - cluster := cluster.DeepCopy() - cluster.Spec.Backups.PGBackRest.Repos = nil - - out := pod.DeepCopy() - addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) - - // Only Containers and Volumes fields have changed. - assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) - - // Only database container has mounts. - // Other containers are ignored. - assert.Assert(t, cmp.MarshalMatches(out.Containers, ` -- name: database - resources: {} - volumeMounts: - - mountPath: /etc/pgbackrest/conf.d - name: pgbackrest-config - readOnly: true -- name: other - resources: {} - `)) - - // Instance configuration files but no certificates. - // Other volumes are ignored. - assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` -- name: other -- name: postgres-data -- name: postgres-wal -- name: pgbackrest-config - projected: - sources: - - configMap: - items: - - key: pgbackrest_instance.conf - path: pgbackrest_instance.conf - - key: config-hash - path: config-hash - name: hippo-pgbackrest-config - `)) - }) - - t.Run("OneVolumeRepo", func(t *testing.T) { + t.Run("CloudOrVolumeSameBehavior", func(t *testing.T) { alwaysExpect := func(t testing.TB, result *corev1.PodSpec) { // Only Containers and Volumes fields have changed. assert.DeepEqual(t, pod, *result, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) @@ -635,21 +595,31 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { `)) } - cluster := cluster.DeepCopy() - cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + clusterWithVolume := cluster.DeepCopy() + clusterWithVolume.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ { Name: "repo1", Volume: new(v1beta1.RepoPVC), }, } - out := pod.DeepCopy() - addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) - alwaysExpect(t, out) + clusterWithCloudRepo := cluster.DeepCopy() + clusterWithCloudRepo.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + GCS: new(v1beta1.RepoGCS), + }, + } - // The TLS server is added and configuration mounted. - // It has PostgreSQL volumes mounted while other volumes are ignored. - assert.Assert(t, cmp.MarshalMatches(out.Containers, ` + outWithVolume := pod.DeepCopy() + addPGBackRestToInstancePodSpec(ctx, clusterWithVolume, &certificates, outWithVolume) + alwaysExpect(t, outWithVolume) + + outWithCloudRepo := pod.DeepCopy() + addPGBackRestToInstancePodSpec(ctx, clusterWithCloudRepo, &certificates, outWithCloudRepo) + alwaysExpect(t, outWithCloudRepo) + + outContainers := ` - name: database resources: {} volumeMounts: @@ -737,7 +707,12 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { - mountPath: /etc/pgbackrest/conf.d name: pgbackrest-config readOnly: true - `)) + ` + + // The TLS server is added and configuration mounted. + // It has PostgreSQL volumes mounted while other volumes are ignored. + assert.Assert(t, cmp.MarshalMatches(outWithVolume.Containers, outContainers)) + assert.Assert(t, cmp.MarshalMatches(outWithCloudRepo.Containers, outContainers)) t.Run("CustomResources", func(t *testing.T) { cluster := cluster.DeepCopy() @@ -754,7 +729,7 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { }, } - before := out.DeepCopy() + before := outWithVolume.DeepCopy() out := pod.DeepCopy() addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) alwaysExpect(t, out) @@ -2045,3 +2020,286 @@ func TestCleanupDisruptionBudgets(t *testing.T) { }) }) } + +func TestReconcileInstanceConfigMap(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + + t.Run("LocalVolumeOtelDisabled", func(t *testing.T) { + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Name = "test-hippo-1" + assert.NilError(t, cc.Create(ctx, cluster)) + + spec := &v1beta1.PostgresInstanceSetSpec{} + instance := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-instance", + Namespace: ns.Name, + }, + } + pgParameters := r.generatePostgresParameters(ctx, cluster, true) + otelConfig := collector.NewConfigForPostgresPod(ctx, cluster, pgParameters) + + cm, err := r.reconcileInstanceConfigMap(ctx, cluster, spec, instance, otelConfig, true) + assert.NilError(t, err) + assert.Equal(t, cm.Name, "test-hippo-1-instance-config") + assert.Equal(t, cm.Data["collector.yaml"], "") + assert.Equal(t, cm.Data["logrotate.conf"], "") + }) + + t.Run("CloudRepoOtelDisabled", func(t *testing.T) { + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Name = "test-hippo-2" + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{{ + Name: "repo1", + GCS: &v1beta1.RepoGCS{ + Bucket: "test-bucket", + }, + }} + assert.NilError(t, cc.Create(ctx, cluster)) + + spec := &v1beta1.PostgresInstanceSetSpec{} + instance := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-instance", + Namespace: ns.Name, + }, + } + pgParameters := r.generatePostgresParameters(ctx, cluster, true) + otelConfig := collector.NewConfigForPostgresPod(ctx, cluster, pgParameters) + + cm, err := r.reconcileInstanceConfigMap(ctx, cluster, spec, instance, otelConfig, true) + assert.NilError(t, err) + assert.Equal(t, cm.Name, "test-hippo-2-instance-config") + assert.Equal(t, cm.Data["collector.yaml"], "") + assert.Equal(t, cm.Data["logrotate.conf"], "") + }) + + t.Run("LocalVolumeOtelMetricsEnabled", func(t *testing.T) { + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.OpenTelemetryMetrics: true, + })) + ctx := feature.NewContext(context.Background(), gate) + + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Name = "test-hippo-3" + cluster.Spec.Instrumentation = &v1beta1.InstrumentationSpec{} + assert.NilError(t, cc.Create(ctx, cluster)) + + spec := &v1beta1.PostgresInstanceSetSpec{} + instance := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-instance", + Namespace: ns.Name, + }, + } + pgParameters := r.generatePostgresParameters(ctx, cluster, true) + otelConfig := collector.NewConfigForPostgresPod(ctx, cluster, pgParameters) + + cm, err := r.reconcileInstanceConfigMap(ctx, cluster, spec, instance, otelConfig, true) + assert.NilError(t, err) + assert.Equal(t, cm.Name, "test-hippo-3-instance-config") + // We test the contents of the collector yaml elsewhere, I just want to + // make sure that it isn't empty here + assert.Assert(t, len(cm.Data["collector.yaml"]) > 0) + assert.Equal(t, cm.Data["logrotate.conf"], "") + }) + + t.Run("LocalVolumeOtelLogsEnabled", func(t *testing.T) { + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.OpenTelemetryLogs: true, + })) + ctx := feature.NewContext(context.Background(), gate) + + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Name = "test-hippo-4" + cluster.Spec.Instrumentation = &v1beta1.InstrumentationSpec{} + assert.NilError(t, cc.Create(ctx, cluster)) + + spec := &v1beta1.PostgresInstanceSetSpec{} + instance := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-instance", + Namespace: ns.Name, + }, + } + pgParameters := r.generatePostgresParameters(ctx, cluster, true) + otelConfig := collector.NewConfigForPostgresPod(ctx, cluster, pgParameters) + + cm, err := r.reconcileInstanceConfigMap(ctx, cluster, spec, instance, otelConfig, true) + assert.NilError(t, err) + assert.Equal(t, cm.Name, "test-hippo-4-instance-config") + // We test the contents of the collector and logrotate configs elsewhere, + // I just want to test that they aren't empty here + assert.Assert(t, len(cm.Data["collector.yaml"]) > 0) + assert.Assert(t, len(cm.Data["logrotate.conf"]) > 0) + }) + + t.Run("CloudRepoOtelMetricsEnabled", func(t *testing.T) { + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.OpenTelemetryMetrics: true, + })) + ctx := feature.NewContext(context.Background(), gate) + + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Name = "test-hippo-5" + cluster.Spec.Instrumentation = &v1beta1.InstrumentationSpec{} + assert.NilError(t, cc.Create(ctx, cluster)) + + spec := &v1beta1.PostgresInstanceSetSpec{} + instance := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-instance", + Namespace: ns.Name, + }, + } + pgParameters := r.generatePostgresParameters(ctx, cluster, true) + otelConfig := collector.NewConfigForPostgresPod(ctx, cluster, pgParameters) + + cm, err := r.reconcileInstanceConfigMap(ctx, cluster, spec, instance, otelConfig, true) + assert.NilError(t, err) + assert.Equal(t, cm.Name, "test-hippo-5-instance-config") + // We test the contents of the collector yaml elsewhere, I just want to + // make sure that it isn't empty here + assert.Assert(t, len(cm.Data["collector.yaml"]) > 0) + assert.Equal(t, cm.Data["logrotate.conf"], "") + }) + + t.Run("CloudRepoOtelLogsEnabled", func(t *testing.T) { + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.OpenTelemetryLogs: true, + })) + ctx := feature.NewContext(context.Background(), gate) + + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Name = "test-hippo-6" + cluster.Spec.Instrumentation = &v1beta1.InstrumentationSpec{} + assert.NilError(t, cc.Create(ctx, cluster)) + + spec := &v1beta1.PostgresInstanceSetSpec{} + instance := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-instance", + Namespace: ns.Name, + }, + } + pgParameters := r.generatePostgresParameters(ctx, cluster, true) + otelConfig := collector.NewConfigForPostgresPod(ctx, cluster, pgParameters) + + cm, err := r.reconcileInstanceConfigMap(ctx, cluster, spec, instance, otelConfig, true) + assert.NilError(t, err) + assert.Equal(t, cm.Name, "test-hippo-6-instance-config") + // We test the contents of the collector and logrotate configs elsewhere, + // I just want to test that they aren't empty here + assert.Assert(t, len(cm.Data["collector.yaml"]) > 0) + assert.Assert(t, len(cm.Data["logrotate.conf"]) > 0) + }) + + t.Run("BackupsDisabledOtelDisabled", func(t *testing.T) { + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Name = "test-hippo-7" + assert.NilError(t, cc.Create(ctx, cluster)) + + spec := &v1beta1.PostgresInstanceSetSpec{} + instance := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-instance", + Namespace: ns.Name, + }, + } + pgParameters := r.generatePostgresParameters(ctx, cluster, false) + otelConfig := collector.NewConfigForPostgresPod(ctx, cluster, pgParameters) + + cm, err := r.reconcileInstanceConfigMap(ctx, cluster, spec, instance, otelConfig, false) + assert.NilError(t, err) + assert.Equal(t, cm.Name, "test-hippo-7-instance-config") + assert.Equal(t, cm.Data["collector.yaml"], "") + assert.Equal(t, cm.Data["logrotate.conf"], "") + }) + + t.Run("BackupsDisabledOtelMetricsEnabled", func(t *testing.T) { + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.OpenTelemetryMetrics: true, + })) + ctx := feature.NewContext(context.Background(), gate) + + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Name = "test-hippo-8" + cluster.Spec.Instrumentation = &v1beta1.InstrumentationSpec{} + assert.NilError(t, cc.Create(ctx, cluster)) + + spec := &v1beta1.PostgresInstanceSetSpec{} + instance := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-instance", + Namespace: ns.Name, + }, + } + pgParameters := r.generatePostgresParameters(ctx, cluster, false) + otelConfig := collector.NewConfigForPostgresPod(ctx, cluster, pgParameters) + + cm, err := r.reconcileInstanceConfigMap(ctx, cluster, spec, instance, otelConfig, false) + assert.NilError(t, err) + assert.Equal(t, cm.Name, "test-hippo-8-instance-config") + assert.Assert(t, len(cm.Data["collector.yaml"]) > 0) + assert.Equal(t, cm.Data["logrotate.conf"], "") + }) + + t.Run("BackupsDisabledOtelLogsEnabled", func(t *testing.T) { + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.OpenTelemetryLogs: true, + })) + ctx := feature.NewContext(context.Background(), gate) + + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Name = "test-hippo-9" + cluster.Spec.Instrumentation = &v1beta1.InstrumentationSpec{} + assert.NilError(t, cc.Create(ctx, cluster)) + + spec := &v1beta1.PostgresInstanceSetSpec{} + instance := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name + "-instance", + Namespace: ns.Name, + }, + } + pgParameters := r.generatePostgresParameters(ctx, cluster, false) + otelConfig := collector.NewConfigForPostgresPod(ctx, cluster, pgParameters) + + cm, err := r.reconcileInstanceConfigMap(ctx, cluster, spec, instance, otelConfig, false) + assert.NilError(t, err) + assert.Equal(t, cm.Name, "test-hippo-9-instance-config") + assert.Assert(t, len(cm.Data["collector.yaml"]) > 0) + assert.Equal(t, cm.Data["logrotate.conf"], "") + }) +} diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index 2c0d3d296..aada99ec5 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -23,7 +23,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -774,12 +773,7 @@ func (r *Reconciler) generateRepoVolumeIntent(postgresCluster *v1beta1.PostgresC // generateBackupJobSpecIntent generates a JobSpec for a pgBackRest backup job func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, repo v1beta1.PGBackRestRepo, serviceAccountName string, - labels, annotations map[string]string, opts ...string) (*batchv1.JobSpec, error) { - - selector, containerName, err := getPGBackRestExecSelector(postgresCluster, repo) - if err != nil { - return nil, errors.WithStack(err) - } + labels, annotations map[string]string, opts ...string) *batchv1.JobSpec { repoIndex := regexRepoIndex.FindString(repo.Name) cmdOpts := []string{ @@ -794,21 +788,31 @@ func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.P cmdOpts = append(cmdOpts, opts...) container := corev1.Container{ - Command: []string{"/opt/crunchy/bin/pgbackrest"}, - Env: []corev1.EnvVar{ - {Name: "COMMAND", Value: "backup"}, - {Name: "COMMAND_OPTS", Value: strings.Join(cmdOpts, " ")}, - {Name: "COMPARE_HASH", Value: "true"}, - {Name: "CONTAINER", Value: containerName}, - {Name: "NAMESPACE", Value: postgresCluster.GetNamespace()}, - {Name: "SELECTOR", Value: selector.String()}, - }, Image: config.PGBackRestContainerImage(postgresCluster), ImagePullPolicy: postgresCluster.Spec.ImagePullPolicy, Name: naming.PGBackRestRepoContainerName, SecurityContext: initialize.RestrictedSecurityContext(), } + // If the repo that we are backing up to is a local volume, we will configure + // the job to use the pgbackrest go binary to exec into the repo host and run + // the backup. If the repo is a cloud-based repo, we will run the pgbackrest + // backup command directly in the job pod. + if repo.Volume != nil { + container.Command = []string{"/opt/crunchy/bin/pgbackrest"} + container.Env = []corev1.EnvVar{ + {Name: "COMMAND", Value: "backup"}, + {Name: "COMMAND_OPTS", Value: strings.Join(cmdOpts, " ")}, + {Name: "COMPARE_HASH", Value: "true"}, + {Name: "CONTAINER", Value: naming.PGBackRestRepoContainerName}, + {Name: "NAMESPACE", Value: postgresCluster.GetNamespace()}, + {Name: "SELECTOR", Value: naming.PGBackRestDedicatedSelector(postgresCluster.GetName()).String()}, + } + } else { + container.Command = []string{"/bin/pgbackrest", "backup"} + container.Command = append(container.Command, cmdOpts...) + } + if postgresCluster.Spec.Backups.PGBackRest.Jobs != nil { container.Resources = postgresCluster.Spec.Backups.PGBackRest.Jobs.Resources } @@ -862,13 +866,16 @@ func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.P jobSpec.Template.Spec.ImagePullSecrets = postgresCluster.Spec.ImagePullSecrets // add pgBackRest configs to template - if containerName == naming.PGBackRestRepoContainerName { + if repo.Volume != nil { pgbackrest.AddConfigToRepoPod(postgresCluster, &jobSpec.Template.Spec) } else { - pgbackrest.AddConfigToInstancePod(postgresCluster, &jobSpec.Template.Spec) + // If we are doing a cloud repo backup, we need to give pgbackrest proper permissions + // to read certificate files + jobSpec.Template.Spec.SecurityContext = postgres.PodSecurityContext(postgresCluster) + pgbackrest.AddConfigToCloudBackupJob(postgresCluster, &jobSpec.Template) } - return jobSpec, nil + return jobSpec } // +kubebuilder:rbac:groups="",resources="configmaps",verbs={delete,list} @@ -2027,14 +2034,12 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, return nil } -// reconcilePGBackRestConfig is responsible for reconciling the pgBackRest ConfigMaps and Secrets. +// reconcilePGBackRestConfig is responsible for reconciling the pgBackRest ConfigMaps. func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, repoHostName, configHash, serviceName, serviceNamespace string, instanceNames []string) error { - log := logging.FromContext(ctx).WithValues("reconcileResource", "repoConfig") - backrestConfig, err := pgbackrest.CreatePGBackRestConfigMapIntent(ctx, postgresCluster, repoHostName, configHash, serviceName, serviceNamespace, instanceNames) if err != nil { @@ -2048,12 +2053,6 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, return errors.WithStack(err) } - repoHostConfigured := pgbackrest.RepoHostVolumeDefined(postgresCluster) - if !repoHostConfigured { - log.V(1).Info("skipping SSH reconciliation, no repo hosts configured") - return nil - } - return nil } @@ -2455,11 +2454,8 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, backupJob.Labels = labels backupJob.Annotations = annotations - spec, err := generateBackupJobSpecIntent(ctx, postgresCluster, repo, + spec := generateBackupJobSpecIntent(ctx, postgresCluster, repo, serviceAccount.GetName(), labels, annotations, backupOpts...) - if err != nil { - return errors.WithStack(err) - } backupJob.Spec = *spec @@ -2547,11 +2543,15 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, replicaRepoReady = (condition.Status == metav1.ConditionTrue) } - // get pod name and container name as needed to exec into the proper pod and create - // the pgBackRest backup - _, containerName, err := getPGBackRestExecSelector(postgresCluster, replicaCreateRepo) - if err != nil { - return errors.WithStack(err) + // TODO: Since we now only exec into the repo host when backing up to a local volume and + // run the backup in the job pod when backing up to a cloud-based repo, we should consider + // using a different value than the container name for the "pgbackrest-config" annotation + // that we attach to these backups + var containerName string + if replicaCreateRepo.Volume != nil { + containerName = naming.PGBackRestRepoContainerName + } else { + containerName = naming.ContainerDatabase } // determine if the dedicated repository host is ready using the repo host ready status @@ -2603,10 +2603,10 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, } } - dedicatedEnabled := pgbackrest.RepoHostVolumeDefined(postgresCluster) // return if no job has been created and the replica repo or the dedicated // repo host is not ready - if job == nil && ((dedicatedEnabled && !dedicatedRepoReady) || !replicaRepoReady) { + if job == nil && ((pgbackrest.RepoHostVolumeDefined(postgresCluster) && !dedicatedRepoReady) || + !replicaRepoReady) { return nil } @@ -2631,11 +2631,8 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, backupJob.Labels = labels backupJob.Annotations = annotations - spec, err := generateBackupJobSpecIntent(ctx, postgresCluster, replicaCreateRepo, + spec := generateBackupJobSpecIntent(ctx, postgresCluster, replicaCreateRepo, serviceAccount.GetName(), labels, annotations) - if err != nil { - return errors.WithStack(err) - } backupJob.Spec = *spec @@ -2817,27 +2814,6 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context, return false, nil } -// getPGBackRestExecSelector returns a selector and container name that allows the proper -// Pod (along with a specific container within it) to be found within the Kubernetes -// cluster as needed to exec into the container and run a pgBackRest command. -func getPGBackRestExecSelector(postgresCluster *v1beta1.PostgresCluster, - repo v1beta1.PGBackRestRepo) (labels.Selector, string, error) { - - var err error - var podSelector labels.Selector - var containerName string - - if repo.Volume != nil { - podSelector = naming.PGBackRestDedicatedSelector(postgresCluster.GetName()) - containerName = naming.PGBackRestRepoContainerName - } else { - podSelector, err = naming.AsSelector(naming.ClusterPrimary(postgresCluster.GetName())) - containerName = naming.ContainerDatabase - } - - return podSelector, containerName, err -} - // getRepoHostStatus is responsible for returning the pgBackRest status for the // provided pgBackRest repository host func getRepoHostStatus(repoHost *appsv1.StatefulSet) *v1beta1.RepoHostStatus { @@ -3082,11 +3058,8 @@ func (r *Reconciler) reconcilePGBackRestCronJob( // set backup type (i.e. "full", "diff", "incr") backupOpts := []string{"--type=" + backupType} - jobSpec, err := generateBackupJobSpecIntent(ctx, cluster, repo, + jobSpec := generateBackupJobSpecIntent(ctx, cluster, repo, serviceAccount.GetName(), labels, annotations, backupOpts...) - if err != nil { - return errors.WithStack(err) - } // Suspend cronjobs when shutdown or read-only. Any jobs that have already // started will continue. @@ -3119,7 +3092,7 @@ func (r *Reconciler) reconcilePGBackRestCronJob( // set metadata pgBackRestCronJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("CronJob")) - err = errors.WithStack(r.setControllerReference(cluster, pgBackRestCronJob)) + err := errors.WithStack(r.setControllerReference(cluster, pgBackRestCronJob)) if err == nil { err = r.apply(ctx, pgBackRestCronJob) diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index 1bb08a846..6c5747927 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -887,52 +887,6 @@ func TestReconcileStanzaCreate(t *testing.T) { } } -func TestGetPGBackRestExecSelector(t *testing.T) { - - testCases := []struct { - cluster *v1beta1.PostgresCluster - repo v1beta1.PGBackRestRepo - desc string - expectedSelector string - expectedContainer string - }{{ - desc: "volume repo defined dedicated repo host enabled", - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "hippo"}, - }, - repo: v1beta1.PGBackRestRepo{ - Name: "repo1", - Volume: &v1beta1.RepoPVC{}, - }, - expectedSelector: "postgres-operator.crunchydata.com/cluster=hippo," + - "postgres-operator.crunchydata.com/pgbackrest=," + - "postgres-operator.crunchydata.com/pgbackrest-dedicated=", - expectedContainer: "pgbackrest", - }, { - desc: "cloud repo defined no repo host enabled", - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "hippo"}, - }, - repo: v1beta1.PGBackRestRepo{ - Name: "repo1", - S3: &v1beta1.RepoS3{}, - }, - expectedSelector: "postgres-operator.crunchydata.com/cluster=hippo," + - "postgres-operator.crunchydata.com/instance," + - "postgres-operator.crunchydata.com/role=master", - expectedContainer: "database", - }} - - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - selector, container, err := getPGBackRestExecSelector(tc.cluster, tc.repo) - assert.NilError(t, err) - assert.Assert(t, selector.String() == tc.expectedSelector) - assert.Assert(t, container == tc.expectedContainer) - }) - } -} - func TestReconcileReplicaCreateBackup(t *testing.T) { // Garbage collector cleans up test resources before the test completes if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { @@ -2648,13 +2602,83 @@ func TestCopyConfigurationResources(t *testing.T) { func TestGenerateBackupJobIntent(t *testing.T) { ctx := context.Background() + cluster := v1beta1.PostgresCluster{} + cluster.Name = "hippo-test" + cluster.Default() + + // If repo.Volume is nil, the code interprets this as a cloud repo backup, + // therefore, an "empty" input results in a job spec for a cloud repo backup t.Run("empty", func(t *testing.T) { - spec, err := generateBackupJobSpecIntent(ctx, - &v1beta1.PostgresCluster{}, v1beta1.PGBackRestRepo{}, + spec := generateBackupJobSpecIntent(ctx, + &cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` +containers: +- command: + - /bin/pgbackrest + - backup + - --stanza=db + - --repo= + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp +enableServiceLinks: false +restartPolicy: Never +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +volumes: +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-test-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-test-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp + `)) + }) + + t.Run("volumeRepo", func(t *testing.T) { + spec := generateBackupJobSpecIntent(ctx, + &cluster, v1beta1.PGBackRestRepo{ + Volume: &v1beta1.RepoPVC{ + VolumeClaimSpec: v1beta1.VolumeClaimSpec{}, + }, + }, "", nil, nil, ) - assert.NilError(t, err) assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` containers: - command: @@ -2667,10 +2691,10 @@ containers: - name: COMPARE_HASH value: "true" - name: CONTAINER - value: database + value: pgbackrest - name: NAMESPACE - name: SELECTOR - value: postgres-operator.crunchydata.com/cluster=,postgres-operator.crunchydata.com/instance,postgres-operator.crunchydata.com/role=master + value: postgres-operator.crunchydata.com/cluster=hippo-test,postgres-operator.crunchydata.com/pgbackrest=,postgres-operator.crunchydata.com/pgbackrest-dedicated= name: pgbackrest resources: {} securityContext: @@ -2697,11 +2721,23 @@ volumes: sources: - configMap: items: - - key: pgbackrest_instance.conf - path: pgbackrest_instance.conf + - key: pgbackrest_repo.conf + path: pgbackrest_repo.conf - key: config-hash path: config-hash - name: -pgbackrest-config + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf + name: hippo-test-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-test-pgbackrest `)) }) @@ -2711,12 +2747,11 @@ volumes: ImagePullPolicy: corev1.PullAlways, }, } - job, err := generateBackupJobSpecIntent(ctx, + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Equal(t, job.Template.Spec.Containers[0].ImagePullPolicy, corev1.PullAlways) }) @@ -2727,12 +2762,11 @@ volumes: cluster.Spec.Backups = v1beta1.Backups{ PGBackRest: v1beta1.PGBackRestArchive{}, } - job, err := generateBackupJobSpecIntent(ctx, + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.DeepEqual(t, job.Template.Spec.Containers[0].Resources, corev1.ResourceRequirements{}) }) @@ -2745,12 +2779,11 @@ volumes: }, }, } - job, err := generateBackupJobSpecIntent(ctx, + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.DeepEqual(t, job.Template.Spec.Containers[0].Resources, corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -2785,12 +2818,11 @@ volumes: }, }, } - job, err := generateBackupJobSpecIntent(ctx, + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Equal(t, job.Template.Spec.Affinity, affinity) }) @@ -2799,12 +2831,11 @@ volumes: cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ PriorityClassName: initialize.String("some-priority-class"), } - job, err := generateBackupJobSpecIntent(ctx, + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Equal(t, job.Template.Spec.PriorityClassName, "some-priority-class") }) @@ -2818,12 +2849,11 @@ volumes: cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ Tolerations: tolerations, } - job, err := generateBackupJobSpecIntent(ctx, + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.DeepEqual(t, job.Template.Spec.Tolerations, tolerations) }) @@ -2833,18 +2863,16 @@ volumes: t.Run("Undefined", func(t *testing.T) { cluster.Spec.Backups.PGBackRest.Jobs = nil - spec, err := generateBackupJobSpecIntent(ctx, + spec := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Assert(t, spec.TTLSecondsAfterFinished == nil) cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{} - spec, err = generateBackupJobSpecIntent(ctx, + spec = generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Assert(t, spec.TTLSecondsAfterFinished == nil) }) @@ -2853,10 +2881,9 @@ volumes: TTLSecondsAfterFinished: initialize.Int32(0), } - spec, err := generateBackupJobSpecIntent(ctx, + spec := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) if assert.Check(t, spec.TTLSecondsAfterFinished != nil) { assert.Equal(t, *spec.TTLSecondsAfterFinished, int32(0)) } @@ -2867,10 +2894,9 @@ volumes: TTLSecondsAfterFinished: initialize.Int32(100), } - spec, err := generateBackupJobSpecIntent(ctx, + spec := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) if assert.Check(t, spec.TTLSecondsAfterFinished != nil) { assert.Equal(t, *spec.TTLSecondsAfterFinished, int32(100)) } diff --git a/internal/pgbackrest/config.go b/internal/pgbackrest/config.go index 0dd69bbf4..0fdb407ff 100644 --- a/internal/pgbackrest/config.go +++ b/internal/pgbackrest/config.go @@ -38,6 +38,10 @@ const ( // repository host CMRepoKey = "pgbackrest_repo.conf" + // CMCloudRepoKey is the name of the pgBackRest configuration file used by backup jobs + // for cloud repos + CMCloudRepoKey = "pgbackrest_cloud.conf" + // configDirectory is the pgBackRest configuration directory. configDirectory = "/etc/pgbackrest/conf.d" @@ -69,6 +73,7 @@ const ( // pgbackrest_job.conf is used by certain jobs, such as stanza create and backup // pgbackrest_primary.conf is used by the primary database pod // pgbackrest_repo.conf is used by the pgBackRest repository pod +// pgbackrest_cloud.conf is used by cloud repo backup jobs func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, repoHostName, configHash, serviceName, serviceNamespace string, instanceNames []string) (*corev1.ConfigMap, error) { @@ -96,7 +101,6 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet // create an empty map for the config data initialize.Map(&cm.Data) - addDedicatedHost := RepoHostVolumeDefined(postgresCluster) pgdataDir := postgres.DataDirectory(postgresCluster) // Port will always be populated, since the API will set a default of 5432 if not provided pgPort := *postgresCluster.Spec.Port @@ -113,12 +117,10 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet // PostgreSQL instances that have not rolled out expect to mount a server // config file. Always populate that file so those volumes stay valid and // Kubernetes propagates their contents to those pods. - cm.Data[serverConfigMapKey] = "" - - if addDedicatedHost && repoHostName != "" { - cm.Data[serverConfigMapKey] = iniGeneratedWarning + - serverConfig(postgresCluster).String() + cm.Data[serverConfigMapKey] = iniGeneratedWarning + + serverConfig(postgresCluster).String() + if RepoHostVolumeDefined(postgresCluster) && repoHostName != "" { cm.Data[CMRepoKey] = iniGeneratedWarning + populateRepoHostConfigurationMap( serviceName, serviceNamespace, @@ -129,8 +131,7 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet postgresCluster.Spec.Backups.PGBackRest.Global, ).String() - if RepoHostVolumeDefined(postgresCluster) && - collector.OpenTelemetryLogsOrMetricsEnabled(ctx, postgresCluster) { + if collector.OpenTelemetryLogsOrMetricsEnabled(ctx, postgresCluster) { err = collector.AddToConfigMap(ctx, collector.NewConfigForPgBackrestRepoHostPod( ctx, @@ -156,6 +157,18 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet } } + if CloudRepoDefined(postgresCluster) { + cm.Data[CMCloudRepoKey] = iniGeneratedWarning + + populateCloudRepoConfigurationMap( + serviceName, serviceNamespace, pgdataDir, + config.FetchKeyCommand(&postgresCluster.Spec), + strconv.Itoa(postgresCluster.Spec.PostgresVersion), + pgPort, instanceNames, + postgresCluster.Spec.Backups.PGBackRest.Repos, + postgresCluster.Spec.Backups.PGBackRest.Global, + ).String() + } + cm.Data[ConfigHashKey] = configHash return cm, err @@ -504,6 +517,64 @@ func populateRepoHostConfigurationMap( } } +func populateCloudRepoConfigurationMap( + serviceName, serviceNamespace, pgdataDir, + fetchKeyCommand, postgresVersion string, + pgPort int32, pgHosts []string, repos []v1beta1.PGBackRestRepo, + globalConfig map[string]string, +) iniSectionSet { + + global := iniMultiSet{} + stanza := iniMultiSet{} + + for _, repo := range repos { + if repo.Volume != nil { + continue + } + + global.Set(repo.Name+"-path", defaultRepo1Path+repo.Name) + + for option, val := range getExternalRepoConfigs(repo) { + global.Set(option, val) + } + } + + global.Set("log-level-file", "off") + + for option, val := range globalConfig { + global.Set(option, val) + } + + // set the configs for all PG hosts + for i, pgHost := range pgHosts { + // TODO(cbandy): pass a FQDN in already. + pgHostFQDN := pgHost + "-0." + + serviceName + "." + serviceNamespace + ".svc." + + naming.KubernetesClusterDomain(context.Background()) + + stanza.Set(fmt.Sprintf("pg%d-host", i+1), pgHostFQDN) + stanza.Set(fmt.Sprintf("pg%d-host-type", i+1), "tls") + stanza.Set(fmt.Sprintf("pg%d-host-ca-file", i+1), certAuthorityAbsolutePath) + stanza.Set(fmt.Sprintf("pg%d-host-cert-file", i+1), certClientAbsolutePath) + stanza.Set(fmt.Sprintf("pg%d-host-key-file", i+1), certClientPrivateKeyAbsolutePath) + + stanza.Set(fmt.Sprintf("pg%d-path", i+1), pgdataDir) + stanza.Set(fmt.Sprintf("pg%d-port", i+1), fmt.Sprint(pgPort)) + stanza.Set(fmt.Sprintf("pg%d-socket-path", i+1), postgres.SocketDirectory) + + if fetchKeyCommand != "" { + stanza.Set("archive-header-check", "n") + stanza.Set("page-header-check", "n") + stanza.Set("pg-version-force", postgresVersion) + } + } + + return iniSectionSet{ + "global": global, + DefaultStanzaName: stanza, + } +} + // getExternalRepoConfigs returns a map containing the configuration settings for an external // pgBackRest repository as defined in the PostgresCluster spec func getExternalRepoConfigs(repo v1beta1.PGBackRestRepo) map[string]string { diff --git a/internal/pgbackrest/config_test.go b/internal/pgbackrest/config_test.go index cdbaa725a..b56beaa8c 100644 --- a/internal/pgbackrest/config_test.go +++ b/internal/pgbackrest/config_test.go @@ -33,9 +33,11 @@ func TestCreatePGBackRestConfigMapIntent(t *testing.T) { domain := naming.KubernetesClusterDomain(context.Background()) - t.Run("NoVolumeRepo", func(t *testing.T) { + t.Run("NoRepos", func(t *testing.T) { + // We always create the config for the pgbackrest instance and server cluster := cluster.DeepCopy() cluster.Spec.Backups.PGBackRest.Repos = nil + cluster.UID = "piano" configmap, err := CreatePGBackRestConfigMapIntent(context.Background(), cluster, "", "number", "pod-service-name", "test-ns", @@ -43,11 +45,46 @@ func TestCreatePGBackRestConfigMapIntent(t *testing.T) { assert.NilError(t, err) assert.Equal(t, configmap.Data["config-hash"], "number") - assert.Equal(t, configmap.Data["pgbackrest-server.conf"], "") + assert.Equal(t, configmap.Data["pgbackrest-server.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +tls-server-address = 0.0.0.0 +tls-server-auth = pgbackrest@piano=* +tls-server-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +tls-server-cert-file = /etc/pgbackrest/server/server-tls.crt +tls-server-key-file = /etc/pgbackrest/server/server-tls.key + +[global:server] +log-level-console = detail +log-level-file = off +log-level-stderr = error +log-timestamp = n + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_instance.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +archive-async = y +log-path = /pgdata/pgbackrest/log +spool-path = /pgdata/pgbackrest-spool + +[db] +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_repo.conf"], "") + assert.Equal(t, configmap.Data["pgbackrest_cloud.conf"], "") }) - t.Run("NoVolumeRepoCloudRepoPresent", func(t *testing.T) { + t.Run("CloudRepoPresentNoVolumeRepo", func(t *testing.T) { cluster := cluster.DeepCopy() + cluster.UID = "ukulele" cluster.Spec.Backups.PGBackRest.Global = map[string]string{ "repo1-test": "something", } @@ -71,8 +108,23 @@ func TestCreatePGBackRestConfigMapIntent(t *testing.T) { }) assert.Equal(t, configmap.Data["config-hash"], "anumber") - assert.Equal(t, configmap.Data["pgbackrest-server.conf"], "") - assert.Equal(t, configmap.Data["pgbackrest_repo.conf"], "") + assert.Equal(t, configmap.Data["pgbackrest-server.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +tls-server-address = 0.0.0.0 +tls-server-auth = pgbackrest@ukulele=* +tls-server-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +tls-server-cert-file = /etc/pgbackrest/server/server-tls.crt +tls-server-key-file = /etc/pgbackrest/server/server-tls.key + +[global:server] +log-level-console = detail +log-level-file = off +log-level-stderr = error +log-timestamp = n + `, "\t\n")+"\n") assert.Equal(t, configmap.Data["pgbackrest_instance.conf"], strings.Trim(` # Generated by postgres-operator. DO NOT EDIT. @@ -92,10 +144,120 @@ pg1-path = /pgdata/pg12 pg1-port = 2345 pg1-socket-path = /tmp/postgres `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_cloud.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +log-level-file = off +repo1-gcs-bucket = g-bucket +repo1-path = /pgbackrest/repo1 +repo1-test = something +repo1-type = gcs + +[db] +pg1-host = some-instance-0.pod-service-name.test-ns.svc.`+domain+` +pg1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +pg1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +pg1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +pg1-host-type = tls +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_repo.conf"], "") + }) + + t.Run("VolumeRepoPresentNoCloudRepo", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.UID = "guitar" + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + } + + configmap, err := CreatePGBackRestConfigMapIntent(context.Background(), cluster, + "repo-hostname", "anumber", "pod-service-name", "test-ns", + []string{"some-instance"}) + + assert.NilError(t, err) + assert.DeepEqual(t, configmap.Annotations, map[string]string{}) + assert.DeepEqual(t, configmap.Labels, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "hippo-dance", + "postgres-operator.crunchydata.com/pgbackrest": "", + "postgres-operator.crunchydata.com/pgbackrest-config": "", + }) + + assert.Equal(t, configmap.Data["config-hash"], "anumber") + assert.Equal(t, configmap.Data["pgbackrest-server.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +tls-server-address = 0.0.0.0 +tls-server-auth = pgbackrest@guitar=* +tls-server-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +tls-server-cert-file = /etc/pgbackrest/server/server-tls.crt +tls-server-key-file = /etc/pgbackrest/server/server-tls.key + +[global:server] +log-level-console = detail +log-level-file = off +log-level-stderr = error +log-timestamp = n + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_instance.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +archive-async = y +log-path = /pgdata/pgbackrest/log +repo1-host = repo-hostname-0.pod-service-name.test-ns.svc.`+domain+` +repo1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +repo1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +repo1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +repo1-host-type = tls +repo1-host-user = postgres +repo1-path = /pgbackrest/repo1 +spool-path = /pgdata/pgbackrest-spool + +[db] +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_repo.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +log-path = /pgbackrest/repo1/log +repo1-path = /pgbackrest/repo1 + +[db] +pg1-host = some-instance-0.pod-service-name.test-ns.svc.`+domain+` +pg1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +pg1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +pg1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +pg1-host-type = tls +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_cloud.conf"], "") }) - t.Run("DedicatedRepoHost", func(t *testing.T) { + t.Run("DedicatedRepoHostAndCloudRepos", func(t *testing.T) { cluster := cluster.DeepCopy() + cluster.UID = "bass" cluster.Spec.Backups.PGBackRest.Global = map[string]string{ "repo3-test": "something", } @@ -133,6 +295,25 @@ pg1-socket-path = /tmp/postgres }) assert.Equal(t, configmap.Data["config-hash"], "abcde12345") + + assert.Equal(t, configmap.Data["pgbackrest-server.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +tls-server-address = 0.0.0.0 +tls-server-auth = pgbackrest@bass=* +tls-server-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +tls-server-cert-file = /etc/pgbackrest/server/server-tls.crt +tls-server-key-file = /etc/pgbackrest/server/server-tls.key + +[global:server] +log-level-console = detail +log-level-file = off +log-level-stderr = error +log-timestamp = n + `, "\t\n")+"\n") + assert.Equal(t, configmap.Data["pgbackrest_repo.conf"], strings.Trim(` # Generated by postgres-operator. DO NOT EDIT. # Your changes will not be saved. @@ -195,6 +376,36 @@ spool-path = /pgdata/pgbackrest-spool [db] pg1-path = /pgdata/pg12 pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_cloud.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +log-level-file = off +repo2-azure-container = a-container +repo2-path = /pgbackrest/repo2 +repo2-type = azure +repo3-gcs-bucket = g-bucket +repo3-path = /pgbackrest/repo3 +repo3-test = something +repo3-type = gcs +repo4-path = /pgbackrest/repo4 +repo4-s3-bucket = s-bucket +repo4-s3-endpoint = endpoint-s +repo4-s3-region = earth +repo4-type = s3 + +[db] +pg1-host = some-instance-0.pod-service-name.test-ns.svc.`+domain+` +pg1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +pg1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +pg1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +pg1-host-type = tls +pg1-path = /pgdata/pg12 +pg1-port = 2345 pg1-socket-path = /tmp/postgres `, "\t\n")+"\n") }) diff --git a/internal/pgbackrest/reconcile.go b/internal/pgbackrest/reconcile.go index 907012ac1..426e1312f 100644 --- a/internal/pgbackrest/reconcile.go +++ b/internal/pgbackrest/reconcile.go @@ -103,6 +103,7 @@ func AddConfigToInstancePod( configmap.ConfigMap.Items = []corev1.KeyToPath{ {Key: CMInstanceKey, Path: CMInstanceKey}, {Key: ConfigHashKey, Path: ConfigHashKey}, + {Key: serverConfigMapKey, Path: serverConfigProjectionPath}, } // As the cluster transitions from having a repository host to having none, @@ -111,17 +112,9 @@ func AddConfigToInstancePod( // volumes stay valid and Kubernetes propagates their contents to those pods. secret := corev1.VolumeProjection{Secret: &corev1.SecretProjection{}} secret.Secret.Name = naming.PGBackRestSecret(cluster).Name + secret.Secret.Items = append(secret.Secret.Items, clientCertificates()...) secret.Secret.Optional = initialize.Bool(true) - if RepoHostVolumeDefined(cluster) { - configmap.ConfigMap.Items = append( - configmap.ConfigMap.Items, corev1.KeyToPath{ - Key: serverConfigMapKey, - Path: serverConfigProjectionPath, - }) - secret.Secret.Items = append(secret.Secret.Items, clientCertificates()...) - } - // Start with a copy of projections specified in the cluster. Items later in // the list take precedence over earlier items (that is, last write wins). // - https://kubernetes.io/docs/concepts/storage/volumes/#projected @@ -137,7 +130,7 @@ func AddConfigToInstancePod( addConfigVolumeAndMounts(pod, sources) } -// AddConfigToRepoPod adds and mounts the pgBackRest configuration volume for +// AddConfigToRepoPod adds and mounts the pgBackRest configuration volumes for // the dedicated repository host of cluster to pod. The pgBackRest containers // must already be in pod. func AddConfigToRepoPod( @@ -164,6 +157,33 @@ func AddConfigToRepoPod( addConfigVolumeAndMounts(pod, append(sources, configmap, secret)) } +// AddConfigToCloudBackupJob adds and mounts the pgBackRest configuration volumes +// to the backup job for creating a backup to a cloud repo. +func AddConfigToCloudBackupJob( + cluster *v1beta1.PostgresCluster, podTemplateSpec *corev1.PodTemplateSpec, +) { + configmap := corev1.VolumeProjection{ConfigMap: &corev1.ConfigMapProjection{}} + configmap.ConfigMap.Name = naming.PGBackRestConfig(cluster).Name + configmap.ConfigMap.Items = []corev1.KeyToPath{ + {Key: CMCloudRepoKey, Path: CMCloudRepoKey}, + } + + secret := corev1.VolumeProjection{Secret: &corev1.SecretProjection{}} + secret.Secret.Name = naming.PGBackRestSecret(cluster).Name + secret.Secret.Items = append(secret.Secret.Items, clientCertificates()...) + + // Start with a copy of projections specified in the cluster. Items later in + // the list take precedence over earlier items (that is, last write wins). + // - https://kubernetes.io/docs/concepts/storage/volumes/#projected + sources := append([]corev1.VolumeProjection{}, + cluster.Spec.Backups.PGBackRest.Configuration...) + + addConfigVolumeAndMounts(&podTemplateSpec.Spec, append(sources, configmap, secret)) + + // Add tmp directory for pgbackrest lock files + AddTMPEmptyDir(podTemplateSpec) +} + // AddConfigToRestorePod adds and mounts the pgBackRest configuration volume // for the restore job of cluster to pod. The pgBackRest containers must // already be in pod. @@ -413,15 +433,13 @@ func InstanceCertificates(ctx context.Context, ) error { var err error - if RepoHostVolumeDefined(inCluster) { - initialize.Map(&outInstanceCertificates.Data) + initialize.Map(&outInstanceCertificates.Data) - if err == nil { - outInstanceCertificates.Data[certInstanceSecretKey], err = certFile(inDNS) - } - if err == nil { - outInstanceCertificates.Data[certInstancePrivateKeySecretKey], err = certFile(inDNSKey) - } + if err == nil { + outInstanceCertificates.Data[certInstanceSecretKey], err = certFile(inDNS) + } + if err == nil { + outInstanceCertificates.Data[certInstancePrivateKeySecretKey], err = certFile(inDNSKey) } return err @@ -517,38 +535,36 @@ func Secret(ctx context.Context, var err error // Save the CA and generate a TLS client certificate for the entire cluster. - if inRepoHost != nil { - initialize.Map(&outSecret.Data) - - // The server verifies its "tls-server-auth" option contains the common - // name (CN) of the certificate presented by a client. The entire - // cluster uses a single client certificate so the "tls-server-auth" - // option can stay the same when PostgreSQL instances and repository - // hosts are added or removed. - leaf := &pki.LeafCertificate{} - commonName := clientCommonName(inCluster) - dnsNames := []string{commonName} - - if err == nil { - // Unmarshal and validate the stored leaf. These first errors can - // be ignored because they result in an invalid leaf which is then - // correctly regenerated. - _ = leaf.Certificate.UnmarshalText(inSecret.Data[certClientSecretKey]) - _ = leaf.PrivateKey.UnmarshalText(inSecret.Data[certClientPrivateKeySecretKey]) - - leaf, err = inRoot.RegenerateLeafWhenNecessary(leaf, commonName, dnsNames) - err = errors.WithStack(err) - } + initialize.Map(&outSecret.Data) + + // The server verifies its "tls-server-auth" option contains the common + // name (CN) of the certificate presented by a client. The entire + // cluster uses a single client certificate so the "tls-server-auth" + // option can stay the same when PostgreSQL instances and repository + // hosts are added or removed. + leaf := &pki.LeafCertificate{} + commonName := clientCommonName(inCluster) + dnsNames := []string{commonName} + + if err == nil { + // Unmarshal and validate the stored leaf. These first errors can + // be ignored because they result in an invalid leaf which is then + // correctly regenerated. + _ = leaf.Certificate.UnmarshalText(inSecret.Data[certClientSecretKey]) + _ = leaf.PrivateKey.UnmarshalText(inSecret.Data[certClientPrivateKeySecretKey]) + + leaf, err = inRoot.RegenerateLeafWhenNecessary(leaf, commonName, dnsNames) + err = errors.WithStack(err) + } - if err == nil { - outSecret.Data[certAuthoritySecretKey], err = certFile(inRoot.Certificate) - } - if err == nil { - outSecret.Data[certClientPrivateKeySecretKey], err = certFile(leaf.PrivateKey) - } - if err == nil { - outSecret.Data[certClientSecretKey], err = certFile(leaf.Certificate) - } + if err == nil { + outSecret.Data[certAuthoritySecretKey], err = certFile(inRoot.Certificate) + } + if err == nil { + outSecret.Data[certClientPrivateKeySecretKey], err = certFile(leaf.PrivateKey) + } + if err == nil { + outSecret.Data[certClientSecretKey], err = certFile(leaf.Certificate) } // Generate a TLS server certificate for each repository host. diff --git a/internal/pgbackrest/reconcile_test.go b/internal/pgbackrest/reconcile_test.go index 530541706..fbd146475 100644 --- a/internal/pgbackrest/reconcile_test.go +++ b/internal/pgbackrest/reconcile_test.go @@ -231,7 +231,20 @@ func TestAddConfigToInstancePod(t *testing.T) { path: pgbackrest_instance.conf - key: config-hash path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest + optional: true `)) }) @@ -254,7 +267,20 @@ func TestAddConfigToInstancePod(t *testing.T) { path: pgbackrest_instance.conf - key: config-hash path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest + optional: true `)) }) @@ -373,6 +399,84 @@ func TestAddConfigToRepoPod(t *testing.T) { }) } +func TestAddConfigToCloudBackupJob(t *testing.T) { + cluster := v1beta1.PostgresCluster{} + cluster.Name = "hippo" + cluster.Default() + + podTemplate := corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "other"}, + {Name: "pgbackrest"}, + }, + }, + } + + alwaysExpect := func(t testing.TB, result *corev1.PodSpec) { + // Only Containers and Volumes fields have changed. + assert.DeepEqual(t, podTemplate.Spec, *result, cmpopts.IgnoreFields(podTemplate.Spec, "Containers", "Volumes")) + + // Only pgBackRest container has config mount, but tmp dir is mounted to all containers + assert.Assert(t, cmp.MarshalMatches(result.Containers, ` +- name: other + resources: {} + volumeMounts: + - mountPath: /tmp + name: tmp +- name: pgbackrest + resources: {} + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp + `)) + } + + t.Run("CustomProjections", func(t *testing.T) { + custom := corev1.ConfigMapProjection{} + custom.Name = "custom-configmap" + + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Configuration = []corev1.VolumeProjection{ + {ConfigMap: &custom}, + } + + out := podTemplate.DeepCopy() + AddConfigToCloudBackupJob(cluster, out) + alwaysExpect(t, &out.Spec) + + // Cloud backup configuration files and client certificates + // after custom projections. + assert.Assert(t, cmp.MarshalMatches(out.Spec.Volumes, ` +- name: pgbackrest-config + projected: + sources: + - configMap: + name: custom-configmap + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp`)) + }) +} + func TestAddConfigToRestorePod(t *testing.T) { cluster := v1beta1.PostgresCluster{} cluster.Name = "source" @@ -1004,10 +1108,13 @@ func TestSecret(t *testing.T) { assert.NilError(t, err) t.Run("NoRepoHost", func(t *testing.T) { - // Nothing happens when there is no repository host. - constant := intent.DeepCopy() + // We always add the pgbackrest server certs assert.NilError(t, Secret(ctx, cluster, nil, root, existing, intent)) - assert.DeepEqual(t, constant, intent) + assert.Assert(t, len(intent.Data["pgbackrest-client.crt"]) > 0) + assert.Assert(t, len(intent.Data["pgbackrest-client.key"]) > 0) + assert.Assert(t, len(intent.Data["pgbackrest.ca-roots"]) > 0) + assert.Assert(t, len(intent.Data["pgbackrest-repo-host.crt"]) == 0) + assert.Assert(t, len(intent.Data["pgbackrest-repo-host.key"]) == 0) }) host := new(appsv1.StatefulSet) diff --git a/internal/pgbackrest/util.go b/internal/pgbackrest/util.go index a3b515ec5..cd5fd1126 100644 --- a/internal/pgbackrest/util.go +++ b/internal/pgbackrest/util.go @@ -10,16 +10,21 @@ import ( "io" "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/rand" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +// TODO: Provide explanation for this specific size. Should a tmp dir ever be smaller or larger? +var tmpDirSizeLimit = resource.MustParse("16Mi") + // maxPGBackrestRepos is the maximum number of repositories that can be configured according to the // multi-repository solution implemented within pgBackRest const maxPGBackrestRepos = 4 -// RepoHostVolumeDefined determines whether not at least one pgBackRest dedicated +// RepoHostVolumeDefined determines whether or not at least one pgBackRest dedicated // repository host volume has been defined in the PostgresCluster manifest. func RepoHostVolumeDefined(postgresCluster *v1beta1.PostgresCluster) bool { for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { @@ -30,6 +35,17 @@ func RepoHostVolumeDefined(postgresCluster *v1beta1.PostgresCluster) bool { return false } +// CloudRepoDefined determines whether or not at least one pgBackRest cloud-based +// repository has been defined in the PostgresCluster manifest. +func CloudRepoDefined(postgresCluster *v1beta1.PostgresCluster) bool { + for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { + if repo.Volume == nil { + return true + } + } + return false +} + // CalculateConfigHashes calculates hashes for any external pgBackRest repository configuration // present in the PostgresCluster spec (e.g. configuration for Azure, GCR and/or S3 repositories). // Additionally it returns a hash of the hashes for each external repository. @@ -100,3 +116,39 @@ func safeHash32(content func(w io.Writer) error) (string, error) { } return rand.SafeEncodeString(fmt.Sprint(hash.Sum32())), nil } + +// AddTMPEmptyDir adds a "tmp" EmptyDir volume to the provided Pod template, while then also adding a +// volume mount at /tmp for all containers defined within the Pod template +// The '/tmp' directory is currently utilized for the following: +// - As the pgBackRest lock directory (this is the default lock location for pgBackRest) +// - The location where the replication client certificates can be loaded with the proper +// permissions set +// +// This function was copied from the postgrescluster package. +func AddTMPEmptyDir(template *corev1.PodTemplateSpec) { + + template.Spec.Volumes = append(template.Spec.Volumes, corev1.Volume{ + Name: "tmp", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + SizeLimit: &tmpDirSizeLimit, + }, + }, + }) + + for i := range template.Spec.Containers { + template.Spec.Containers[i].VolumeMounts = append(template.Spec.Containers[i].VolumeMounts, + corev1.VolumeMount{ + Name: "tmp", + MountPath: "/tmp", + }) + } + + for i := range template.Spec.InitContainers { + template.Spec.InitContainers[i].VolumeMounts = append(template.Spec.InitContainers[i].VolumeMounts, + corev1.VolumeMount{ + Name: "tmp", + MountPath: "/tmp", + }) + } +}