diff --git a/docs/administrator.md b/docs/administrator.md index eeccadb50..eadf38edb 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -603,9 +603,9 @@ spec: The operator will assign a set of environment variables to the database pods that cannot be overridden to guarantee core functionality. Only variables with -'WAL_' and 'LOG_' prefixes can be customized, to allow backup and log shipping -to be specified differently. There are three ways to specify extra environment -variables (or override existing ones) for database pods: +'WAL_' and 'LOG_' prefixes can be customized to allow for backup and log +shipping to be specified differently. There are three ways to specify extra +environment variables (or override existing ones) for database pods: * [Via ConfigMap](#via-configmap) * [Via Secret](#via-secret) @@ -975,7 +975,7 @@ generated automatically. `WALG_S3_PREFIX` is identical to `WALE_S3_PREFIX`. `SCOPE` is the Postgres cluster name. :warning: If both `AWS_REGION` and `AWS_ENDPOINT` or `WALE_S3_ENDPOINT` are -defined, backups with WAL-E will fail. You can fix it by switching to WAL-G +defined backups with WAL-E will fail. You can fix it by switching to WAL-G with `USE_WALG_BACKUP: "true"`. ### Google Cloud Platform setup diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index d337eda79..08cb37e03 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -648,7 +648,7 @@ yet officially supported. AWS region used to store EBS volumes. The default is `eu-central-1`. Note, this option is not meant for specifying the AWS region for backups and restore, since it can be separate from the EBS region. You have to define - AWS_REGION as a [custom environment variable](https://github.com/zalando/postgres-operator/blob/master/docs/administrator.md#custom-pod-environment-variables). + AWS_REGION as a [custom environment variable](../administrator.md#custom-pod-environment-variables). * **additional_secret_mount** Additional Secret (aws or gcp credentials) to mount in the pod. diff --git a/docs/user.md b/docs/user.md index 99ceb0090..dd9af564b 100644 --- a/docs/user.md +++ b/docs/user.md @@ -770,8 +770,7 @@ spec: Here `cluster` is a name of a source cluster that is going to be cloned. A new cluster will be cloned from S3, using the latest backup before the `timestamp`. -Note, that a time zone is required for `timestamp` in the format of `+00:00` -which is UTC. +Note, a time zone is required for `timestamp` in the format of `+00:00` (UTC). The operator will try to find the WAL location based on the configured `wal_[s3|gs]_bucket` or `wal_az_storage_account` and the specified `uid`. @@ -790,7 +789,7 @@ configuration you can specify the full path under `s3_wal_path`. For [Google Cloud Plattform](administrator.md#google-cloud-platform-setup) or [Azure](administrator.md#azure-setup) it can only be set globally with [custom Pod environment variables](administrator.md#custom-pod-environment-variables) -or locally in the Postgres manifest's [`env`]() section. +or locally in the Postgres manifest's [`env`](administrator.md#via-postgres-cluster-manifest) section. For non AWS S3 following settings can be set to support cloning from other S3 diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 0e6cf3a03..636e5edde 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -937,14 +937,14 @@ func (c *Cluster) generateSpiloPodEnvVars( } func appendEnvVars(envs []v1.EnvVar, appEnv ...v1.EnvVar) []v1.EnvVar { - jenvs := envs + collectedEnvs := envs for _, env := range appEnv { env.Name = strings.ToUpper(env.Name) - if !isEnvVarPresent(jenvs, env.Name) { - jenvs = append(jenvs, env) + if !isEnvVarPresent(collectedEnvs, env.Name) { + collectedEnvs = append(collectedEnvs, env) } } - return jenvs + return collectedEnvs } func isEnvVarPresent(envs []v1.EnvVar, key string) bool { @@ -961,7 +961,7 @@ func (c *Cluster) getPodEnvironmentConfigMapVariables() ([]v1.EnvVar, error) { configMapPodEnvVarsList := make([]v1.EnvVar, 0) if c.OpConfig.PodEnvironmentConfigMap.Name == "" { - return configMapPodEnvVarsList, nil + return nil, nil } cm, err := c.KubeClient.ConfigMaps(c.OpConfig.PodEnvironmentConfigMap.Namespace).Get( @@ -977,7 +977,7 @@ func (c *Cluster) getPodEnvironmentConfigMapVariables() ([]v1.EnvVar, error) { metav1.GetOptions{}) } if err != nil { - return configMapPodEnvVarsList, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err) + return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err) } } @@ -993,7 +993,7 @@ func (c *Cluster) getPodEnvironmentSecretVariables() ([]v1.EnvVar, error) { secretPodEnvVarsList := make([]v1.EnvVar, 0) if c.OpConfig.PodEnvironmentSecret == "" { - return secretPodEnvVarsList, nil + return nil, nil } secret := &v1.Secret{} @@ -1019,7 +1019,7 @@ func (c *Cluster) getPodEnvironmentSecretVariables() ([]v1.EnvVar, error) { err = errors.Wrap(notFoundErr, err.Error()) } if err != nil { - return secretPodEnvVarsList, errors.Wrap(err, "could not read Secret PodEnvironmentSecretName") + return nil, errors.Wrap(err, "could not read Secret PodEnvironmentSecretName") } for k := range secret.Data { @@ -1859,7 +1859,7 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription) c.logger.Debugf("found WALAZStorageAccount %s - will set CLONE_AZURE_STORAGE_ACCOUNT", c.OpConfig.WALAZStorageAccount) result = append(result, v1.EnvVar{Name: "CLONE_AZURE_STORAGE_ACCOUNT", Value: c.OpConfig.WALAZStorageAccount}) } else { - c.logger.Error("cannot figure out S3 or GS bucket or AZ storage account. All are empty in config.") + c.logger.Error("cannot figure out S3 or GS bucket or AZ storage account. All options are empty in the config.") } // append suffix because WAL location name is not the whole path @@ -1915,7 +1915,7 @@ func (c *Cluster) generateStandbyEnvironment(description *acidv1.StandbyDescript result := make([]v1.EnvVar, 0) if description.StandbyHost != "" { - c.logger.Info("preparing standby streaming from remote primary") + c.logger.Info("standby cluster streaming from remote primary") result = append(result, v1.EnvVar{ Name: "STANDBY_HOST", Value: description.StandbyHost, @@ -1927,7 +1927,7 @@ func (c *Cluster) generateStandbyEnvironment(description *acidv1.StandbyDescript }) } } else { - c.logger.Info("preparing standby streaming from WAL location") + c.logger.Info("standby cluster streaming from WAL location") if description.S3WalPath != "" { result = append(result, v1.EnvVar{ Name: "STANDBY_WALE_S3_PREFIX", diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index 9aef0f4fe..bf975b84f 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -250,7 +250,6 @@ func TestPodEnvironmentConfigMapVariables(t *testing.T) { }{ { subTest: "no PodEnvironmentConfigMap", - envVars: []v1.EnvVar{}, }, { subTest: "missing PodEnvironmentConfigMap", @@ -261,8 +260,7 @@ func TestPodEnvironmentConfigMapVariables(t *testing.T) { }, }, }, - envVars: []v1.EnvVar{}, - err: fmt.Errorf("could not read PodEnvironmentConfigMap: NotFound"), + err: fmt.Errorf("could not read PodEnvironmentConfigMap: NotFound"), }, { subTest: "Pod environment vars configured by PodEnvironmentConfigMap", @@ -326,7 +324,6 @@ func TestPodEnvironmentSecretVariables(t *testing.T) { }{ { subTest: "No PodEnvironmentSecret configured", - envVars: []v1.EnvVar{}, }, { subTest: "Secret referenced by PodEnvironmentSecret does not exist", @@ -337,8 +334,7 @@ func TestPodEnvironmentSecretVariables(t *testing.T) { ResourceCheckTimeout: time.Duration(testResourceCheckTimeout), }, }, - envVars: []v1.EnvVar{}, - err: fmt.Errorf("could not read Secret PodEnvironmentSecretName: still failing after %d retries: secret.core %q not found", maxRetries, testPodEnvironmentObjectNotExists), + err: fmt.Errorf("could not read Secret PodEnvironmentSecretName: still failing after %d retries: secret.core %q not found", maxRetries, testPodEnvironmentObjectNotExists), }, { subTest: "API error during PodEnvironmentSecret retrieval", @@ -349,8 +345,7 @@ func TestPodEnvironmentSecretVariables(t *testing.T) { ResourceCheckTimeout: time.Duration(testResourceCheckTimeout), }, }, - envVars: []v1.EnvVar{}, - err: fmt.Errorf("could not read Secret PodEnvironmentSecretName: Secret PodEnvironmentSecret API error"), + err: fmt.Errorf("could not read Secret PodEnvironmentSecretName: Secret PodEnvironmentSecret API error"), }, { subTest: "Pod environment vars reference all keys from secret configured by PodEnvironmentSecret", @@ -945,6 +940,79 @@ func TestCloneEnv(t *testing.T) { } } +func TestAppendEnvVar(t *testing.T) { + testName := "TestAppendEnvVar" + tests := []struct { + subTest string + envs []v1.EnvVar + envsToAppend []v1.EnvVar + expectedSize int + }{ + { + subTest: "append two variables - one with same key that should get rejected", + envs: []v1.EnvVar{ + { + Name: "CUSTOM_VARIABLE", + Value: "test", + }, + }, + envsToAppend: []v1.EnvVar{ + { + Name: "CUSTOM_VARIABLE", + Value: "new-test", + }, + { + Name: "ANOTHER_CUSTOM_VARIABLE", + Value: "another-test", + }, + }, + expectedSize: 2, + }, + { + subTest: "append empty slice", + envs: []v1.EnvVar{ + { + Name: "CUSTOM_VARIABLE", + Value: "test", + }, + }, + envsToAppend: []v1.EnvVar{}, + expectedSize: 1, + }, + { + subTest: "append nil", + envs: []v1.EnvVar{ + { + Name: "CUSTOM_VARIABLE", + Value: "test", + }, + }, + envsToAppend: nil, + expectedSize: 1, + }, + } + + for _, tt := range tests { + finalEnvs := appendEnvVars(tt.envs, tt.envsToAppend...) + + if len(finalEnvs) != tt.expectedSize { + t.Errorf("%s %s: expected %d env variables, got %d", + testName, tt.subTest, tt.expectedSize, len(finalEnvs)) + } + + for _, env := range tt.envs { + for _, finalEnv := range finalEnvs { + if env.Name == finalEnv.Name { + if env.Value != finalEnv.Value { + t.Errorf("%s %s: expected env value %s of variable %s, got %s instead", + testName, tt.subTest, env.Value, env.Name, finalEnv.Value) + } + } + } + } + } +} + func TestStandbyEnv(t *testing.T) { testName := "TestStandbyEnv" tests := []struct {