From eecd13169cf27fab06cd51fdfc06aa035424122a Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Thu, 14 Apr 2022 11:47:33 +0200 Subject: [PATCH] refactor spilo env var generation (#1848) * refactor spilo env generation * enhance docs on env vars * add unit test for appendEnvVar --- docs/administrator.md | 46 +- docs/reference/operator_parameters.md | 5 +- docs/user.md | 31 +- pkg/cluster/cluster.go | 2 +- pkg/cluster/cluster_test.go | 12 +- pkg/cluster/k8sres.go | 182 +-- pkg/cluster/k8sres_test.go | 1863 ++++++++++++++----------- 7 files changed, 1231 insertions(+), 910 deletions(-) diff --git a/docs/administrator.md b/docs/administrator.md index 061f9184e..aa08f549e 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -601,15 +601,39 @@ spec: ## Custom Pod Environment Variables -It is possible to configure a ConfigMap as well as a Secret which are used by -the Postgres pods as an additional provider for environment variables. One use -case is a customized Spilo image configured by extra environment variables. -Another case could be to provide custom cloud provider or backup settings. +The operator will assign a set of environment variables to the database pods +that cannot be overridden to guarantee core functionality. Only variables with +'WAL_' and 'LOG_' prefixes can be customized to allow for backup and log +shipping to be specified differently. There are three ways to specify extra +environment variables (or override existing ones) for database pods: -In general the Operator will give preference to the globally configured -variables, to not have the custom ones interfere with core functionality. -Variables with the 'WAL_' and 'LOG_' prefix can be overwritten though, to -allow backup and log shipping to be specified differently. +* [Via ConfigMap](#via-configmap) +* [Via Secret](#via-secret) +* [Via Postgres Cluster Manifest](#via-postgres-cluster-manifest) + +The first two options must be referenced from the operator configuration +making them global settings for all Postgres cluster the operator watches. +One use case is a customized Spilo image that must be configured by extra +environment variables. Another case could be to provide custom cloud +provider or backup settings. + +The last options allows for specifying environment variables individual to +every cluster via the `env` section in the manifest. For example, if you use +individual backup locations for each of your clusters. Or you want to disable +WAL archiving for a certain cluster by setting `WAL_S3_BUCKET`, `WAL_GS_BUCKET` +or `AZURE_STORAGE_ACCOUNT` to an empty string. + +The operator will give precedence to environment variables in the following +order (e.g. a variable defined in 4. overrides a variable with the same name +in 5.): + +1. Assigned by the operator +2. Clone section (with WAL settings from operator config when `s3_wal_path` is empty) +3. Standby section +4. `env` section in cluster manifest +5. Pod environment secret via operator config +6. Pod environment config map via operator config +7. WAL and logical backup settings from operator config ### Via ConfigMap @@ -706,7 +730,7 @@ data: The key-value pairs of the Secret are all accessible as environment variables to the Postgres StatefulSet/pods. -### For individual cluster +### Via Postgres Cluster Manifest It is possible to define environment variables directly in the Postgres cluster manifest to configure it individually. The variables must be listed under the @@ -951,6 +975,10 @@ When the `AWS_REGION` is set, `AWS_ENDPOINT` and `WALE_S3_ENDPOINT` are generated automatically. `WALG_S3_PREFIX` is identical to `WALE_S3_PREFIX`. `SCOPE` is the Postgres cluster name. +:warning: If both `AWS_REGION` and `AWS_ENDPOINT` or `WALE_S3_ENDPOINT` are +defined backups with WAL-E will fail. You can fix it by switching to WAL-G +with `USE_WALG_BACKUP: "true"`. + ### Google Cloud Platform setup To configure the operator on GCP these prerequisites that are needed: diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index d245be58d..08cb37e03 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -645,7 +645,10 @@ yet officially supported. empty. * **aws_region** - AWS region used to store EBS volumes. The default is `eu-central-1`. + AWS region used to store EBS volumes. The default is `eu-central-1`. Note, + this option is not meant for specifying the AWS region for backups and + restore, since it can be separate from the EBS region. You have to define + AWS_REGION as a [custom environment variable](../administrator.md#custom-pod-environment-variables). * **additional_secret_mount** Additional Secret (aws or gcp credentials) to mount in the pod. diff --git a/docs/user.md b/docs/user.md index 4e690fb3c..0fc7c35f2 100644 --- a/docs/user.md +++ b/docs/user.md @@ -766,15 +766,15 @@ spec: uid: "efd12e58-5786-11e8-b5a7-06148230260c" cluster: "acid-minimal-cluster" timestamp: "2017-12-19T12:40:33+01:00" - s3_wal_path: "s3:///spilo///wal/" ``` Here `cluster` is a name of a source cluster that is going to be cloned. A new cluster will be cloned from S3, using the latest backup before the `timestamp`. -Note, that a time zone is required for `timestamp` in the format of +00:00 which -is UTC. You can specify the `s3_wal_path` of the source cluster or let the -operator try to find it based on the configured `wal_[s3|gs]_bucket` and the -specified `uid`. You can find the UID of the source cluster in its metadata: +Note, a time zone is required for `timestamp` in the format of `+00:00` (UTC). + +The operator will try to find the WAL location based on the configured +`wal_[s3|gs]_bucket` or `wal_az_storage_account` and the specified `uid`. +You can find the UID of the source cluster in its metadata: ```yaml apiVersion: acid.zalan.do/v1 @@ -784,6 +784,14 @@ metadata: uid: efd12e58-5786-11e8-b5a7-06148230260c ``` +If your source cluster uses a WAL location different from the global +configuration you can specify the full path under `s3_wal_path`. For +[Google Cloud Platform](administrator.md#google-cloud-platform-setup) +or [Azure](administrator.md#azure-setup) +it can only be set globally with [custom Pod environment variables](administrator.md#custom-pod-environment-variables) +or locally in the Postgres manifest's [`env`](administrator.md#via-postgres-cluster-manifest) section. + + For non AWS S3 following settings can be set to support cloning from other S3 implementations: @@ -793,6 +801,7 @@ spec: uid: "efd12e58-5786-11e8-b5a7-06148230260c" cluster: "acid-minimal-cluster" timestamp: "2017-12-19T12:40:33+01:00" + s3_wal_path: "s3://custom/path/to/bucket" s3_endpoint: https://s3.acme.org s3_access_key_id: 0123456789abcdef0123456789abcdef s3_secret_access_key: 0123456789abcdef0123456789abcdef @@ -864,9 +873,8 @@ the PostgreSQL version between source and target cluster has to be the same. To start a cluster as standby, add the following `standby` section in the YAML file. You can stream changes from archived WAL files (AWS S3 or Google Cloud -Storage) or from a remote primary where you specify the host address and port. -If you leave out the port, Patroni will use `"5432"`. Only one option can be -specfied in the manifest: +Storage) or from a remote primary. Only one option can be specfied in the +manifest: ```yaml spec: @@ -874,12 +882,19 @@ spec: s3_wal_path: "s3:///spilo///wal/" ``` +For GCS, you have to define STANDBY_GOOGLE_APPLICATION_CREDENTIALS as a +[custom pod environment variable](administrator.md#custom-pod-environment-variables). +It is not set from the config to allow for overridding. + ```yaml spec: standby: gs_wal_path: "gs:///spilo///wal/" ``` +For a remote primary you specify the host address and optionally the port. +If you leave out the port Patroni will use `"5432"`. + ```yaml spec: standby: diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 4d0fcb1c0..86a7df8dd 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -1318,7 +1318,7 @@ func (c *Cluster) initAdditionalOwnerRoles() { } } - if len(memberOf) > 1 { + if len(memberOf) > 0 { namespace := c.Namespace additionalOwnerPgUser := spec.PgUser{ Origin: spec.RoleOriginSpilo, diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index f531962d1..7581d1473 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -26,6 +26,8 @@ import ( const ( superUserName = "postgres" replicationUserName = "standby" + exampleSpiloConfig = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}` + spiloConfigDiff = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}` ) var logger = logrus.New().WithField("test", "cluster") @@ -957,7 +959,7 @@ func TestCompareEnv(t *testing.T) { }, { Name: "SPILO_CONFIGURATION", - Value: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`, + Value: exampleSpiloConfig, }, }, ExpectedResult: true, @@ -978,7 +980,7 @@ func TestCompareEnv(t *testing.T) { }, { Name: "SPILO_CONFIGURATION", - Value: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`, + Value: spiloConfigDiff, }, }, ExpectedResult: true, @@ -999,7 +1001,7 @@ func TestCompareEnv(t *testing.T) { }, { Name: "SPILO_CONFIGURATION", - Value: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`, + Value: exampleSpiloConfig, }, }, ExpectedResult: false, @@ -1024,7 +1026,7 @@ func TestCompareEnv(t *testing.T) { }, { Name: "SPILO_CONFIGURATION", - Value: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`, + Value: exampleSpiloConfig, }, }, ExpectedResult: false, @@ -1041,7 +1043,7 @@ func TestCompareEnv(t *testing.T) { }, { Name: "SPILO_CONFIGURATION", - Value: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`, + Value: exampleSpiloConfig, }, }, ExpectedResult: false, diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index a2186caa8..6cdf379bd 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -766,9 +766,10 @@ func (c *Cluster) generateSpiloPodEnvVars( uid types.UID, spiloConfiguration string, cloneDescription *acidv1.CloneDescription, - standbyDescription *acidv1.StandbyDescription, - customPodEnvVarsList []v1.EnvVar) []v1.EnvVar { + standbyDescription *acidv1.StandbyDescription) []v1.EnvVar { + // hard-coded set of environment variables we need + // to guarantee core functionality of the operator envVars := []v1.EnvVar{ { Name: "SCOPE", @@ -875,59 +876,75 @@ func (c *Cluster) generateSpiloPodEnvVars( envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...) } - if c.Spec.StandbyCluster != nil { + if standbyDescription != nil { envVars = append(envVars, c.generateStandbyEnvironment(standbyDescription)...) } + // fetch cluster-specific variables that will override all subsequent global variables if len(c.Spec.Env) > 0 { envVars = appendEnvVars(envVars, c.Spec.Env...) } - // add vars taken from pod_environment_configmap and pod_environment_secret first - // (to allow them to override the globals set in the operator config) - if len(customPodEnvVarsList) > 0 { - envVars = appendEnvVars(envVars, customPodEnvVarsList...) + // fetch variables from custom environment Secret + // that will override all subsequent global variables + secretEnvVarsList, err := c.getPodEnvironmentSecretVariables() + if err != nil { + c.logger.Warningf("%v", err) } + envVars = appendEnvVars(envVars, secretEnvVarsList...) + // fetch variables from custom environment ConfigMap + // that will override all subsequent global variables + configMapEnvVarsList, err := c.getPodEnvironmentConfigMapVariables() + if err != nil { + c.logger.Warningf("%v", err) + } + envVars = appendEnvVars(envVars, configMapEnvVarsList...) + + // global variables derived from operator configuration + opConfigEnvVars := make([]v1.EnvVar, 0) if c.OpConfig.WALES3Bucket != "" { - envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket}) - envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) - envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""}) + opConfigEnvVars = append(opConfigEnvVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket}) + opConfigEnvVars = append(opConfigEnvVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) + opConfigEnvVars = append(opConfigEnvVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""}) } if c.OpConfig.WALGSBucket != "" { - envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_GS_BUCKET", Value: c.OpConfig.WALGSBucket}) - envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) - envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""}) + opConfigEnvVars = append(opConfigEnvVars, v1.EnvVar{Name: "WAL_GS_BUCKET", Value: c.OpConfig.WALGSBucket}) + opConfigEnvVars = append(opConfigEnvVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) + opConfigEnvVars = append(opConfigEnvVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""}) } if c.OpConfig.WALAZStorageAccount != "" { - envVars = appendEnvVars(envVars, v1.EnvVar{Name: "AZURE_STORAGE_ACCOUNT", Value: c.OpConfig.WALAZStorageAccount}) - envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) - envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""}) + opConfigEnvVars = append(opConfigEnvVars, v1.EnvVar{Name: "AZURE_STORAGE_ACCOUNT", Value: c.OpConfig.WALAZStorageAccount}) + opConfigEnvVars = append(opConfigEnvVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) + opConfigEnvVars = append(opConfigEnvVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""}) } if c.OpConfig.GCPCredentials != "" { - envVars = appendEnvVars(envVars, v1.EnvVar{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.GCPCredentials}) + opConfigEnvVars = append(opConfigEnvVars, v1.EnvVar{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.GCPCredentials}) } if c.OpConfig.LogS3Bucket != "" { - envVars = appendEnvVars(envVars, v1.EnvVar{Name: "LOG_S3_BUCKET", Value: c.OpConfig.LogS3Bucket}) - envVars = appendEnvVars(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) - envVars = appendEnvVars(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""}) + opConfigEnvVars = append(opConfigEnvVars, v1.EnvVar{Name: "LOG_S3_BUCKET", Value: c.OpConfig.LogS3Bucket}) + opConfigEnvVars = append(opConfigEnvVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) + opConfigEnvVars = append(opConfigEnvVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""}) } + envVars = appendEnvVars(envVars, opConfigEnvVars...) + return envVars } func appendEnvVars(envs []v1.EnvVar, appEnv ...v1.EnvVar) []v1.EnvVar { - jenvs := envs + collectedEnvs := envs for _, env := range appEnv { - if !isEnvVarPresent(jenvs, env.Name) { - jenvs = append(jenvs, env) + env.Name = strings.ToUpper(env.Name) + if !isEnvVarPresent(collectedEnvs, env.Name) { + collectedEnvs = append(collectedEnvs, env) } } - return jenvs + return collectedEnvs } func isEnvVarPresent(envs []v1.EnvVar, key string) bool { @@ -963,9 +980,11 @@ func (c *Cluster) getPodEnvironmentConfigMapVariables() ([]v1.EnvVar, error) { return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err) } } + for k, v := range cm.Data { configMapPodEnvVarsList = append(configMapPodEnvVarsList, v1.EnvVar{Name: k, Value: v}) } + sort.Slice(configMapPodEnvVarsList, func(i, j int) bool { return configMapPodEnvVarsList[i].Name < configMapPodEnvVarsList[j].Name }) return configMapPodEnvVarsList, nil } @@ -1015,6 +1034,7 @@ func (c *Cluster) getPodEnvironmentSecretVariables() ([]v1.EnvVar, error) { }}) } + sort.Slice(secretPodEnvVarsList, func(i, j int) bool { return secretPodEnvVarsList[i].Name < secretPodEnvVarsList[j].Name }) return secretPodEnvVarsList, nil } @@ -1104,23 +1124,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef initContainers = spec.InitContainers } - // fetch env vars from custom ConfigMap - configMapEnvVarsList, err := c.getPodEnvironmentConfigMapVariables() - if err != nil { - return nil, err - } - - // fetch env vars from custom ConfigMap - secretEnvVarsList, err := c.getPodEnvironmentSecretVariables() - if err != nil { - return nil, err - } - - // concat all custom pod env vars and sort them - customPodEnvVarsList := append(configMapEnvVarsList, secretEnvVarsList...) - sort.Slice(customPodEnvVarsList, - func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name }) - // backward compatible check for InitContainers if spec.InitContainersOld != nil { msg := "manifest parameter init_containers is deprecated." @@ -1153,9 +1156,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef c.Postgresql.GetUID(), spiloConfiguration, spec.Clone, - spec.StandbyCluster, - customPodEnvVarsList, - ) + spec.StandbyCluster) // pickup the docker image for the spilo container effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage) @@ -1297,7 +1298,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef sidecarContainers, conflicts := mergeContainers(clusterSpecificSidecars, c.Config.OpConfig.SidecarContainers, globalSidecarContainersByDockerImage, scalyrSidecars) for containerName := range conflicts { - c.logger.Warningf("a sidecar is specified twice. Ignoring sidecar %q in favor of %q with high a precendence", + c.logger.Warningf("a sidecar is specified twice. Ignoring sidecar %q in favor of %q with high a precedence", containerName, containerName) } @@ -1819,6 +1820,7 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription) cluster := description.ClusterName result = append(result, v1.EnvVar{Name: "CLONE_SCOPE", Value: cluster}) if description.EndTimestamp == "" { + c.logger.Infof("cloning with basebackup from %s", cluster) // cloning with basebackup, make a connection string to the cluster to clone from host, port := c.getClusterServiceConnectionParameters(cluster) // TODO: make some/all of those constants @@ -1840,67 +1842,47 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription) }, }) } else { - // cloning with S3, find out the bucket to clone - msg := "clone from S3 bucket" - c.logger.Info(msg, description.S3WalPath) - + c.logger.Info("cloning from WAL location") if description.S3WalPath == "" { - msg := "figure out which S3 bucket to use from env" - c.logger.Info(msg, description.S3WalPath) + c.logger.Info("no S3 WAL path defined - taking value from global config", description.S3WalPath) if c.OpConfig.WALES3Bucket != "" { - envs := []v1.EnvVar{ - { - Name: "CLONE_WAL_S3_BUCKET", - Value: c.OpConfig.WALES3Bucket, - }, - } - result = append(result, envs...) + c.logger.Debugf("found WALES3Bucket %s - will set CLONE_WAL_S3_BUCKET", c.OpConfig.WALES3Bucket) + result = append(result, v1.EnvVar{Name: "CLONE_WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket}) } else if c.OpConfig.WALGSBucket != "" { - envs := []v1.EnvVar{ - { - Name: "CLONE_WAL_GS_BUCKET", - Value: c.OpConfig.WALGSBucket, - }, - { - Name: "CLONE_GOOGLE_APPLICATION_CREDENTIALS", - Value: c.OpConfig.GCPCredentials, - }, + c.logger.Debugf("found WALGSBucket %s - will set CLONE_WAL_GS_BUCKET", c.OpConfig.WALGSBucket) + result = append(result, v1.EnvVar{Name: "CLONE_WAL_GS_BUCKET", Value: c.OpConfig.WALGSBucket}) + if c.OpConfig.GCPCredentials != "" { + result = append(result, v1.EnvVar{Name: "CLONE_GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.GCPCredentials}) } - result = append(result, envs...) } else if c.OpConfig.WALAZStorageAccount != "" { - envs := []v1.EnvVar{ - { - Name: "CLONE_AZURE_STORAGE_ACCOUNT", - Value: c.OpConfig.WALAZStorageAccount, - }, - } - result = append(result, envs...) + c.logger.Debugf("found WALAZStorageAccount %s - will set CLONE_AZURE_STORAGE_ACCOUNT", c.OpConfig.WALAZStorageAccount) + result = append(result, v1.EnvVar{Name: "CLONE_AZURE_STORAGE_ACCOUNT", Value: c.OpConfig.WALAZStorageAccount}) } else { - c.logger.Error("Cannot figure out S3 or GS bucket. Both are empty.") + c.logger.Error("cannot figure out S3 or GS bucket or AZ storage account. All options are empty in the config.") } + // append suffix because WAL location name is not the whole path + result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(description.UID)}) + } else { + c.logger.Debugf("use S3WalPath %s from the manifest", description.S3WalPath) + envs := []v1.EnvVar{ + { + Name: "CLONE_WALE_S3_PREFIX", + Value: description.S3WalPath, + }, { Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", - Value: getBucketScopeSuffix(description.UID), + Value: "", }, } result = append(result, envs...) - } else { - msg := "use custom parsed S3WalPath %s from the manifest" - c.logger.Warningf(msg, description.S3WalPath) - - result = append(result, v1.EnvVar{ - Name: "CLONE_WALE_S3_PREFIX", - Value: description.S3WalPath, - }) } result = append(result, v1.EnvVar{Name: "CLONE_METHOD", Value: "CLONE_WITH_WALE"}) result = append(result, v1.EnvVar{Name: "CLONE_TARGET_TIME", Value: description.EndTimestamp}) - result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_PREFIX", Value: ""}) if description.S3Endpoint != "" { result = append(result, v1.EnvVar{Name: "CLONE_AWS_ENDPOINT", Value: description.S3Endpoint}) @@ -1933,7 +1915,7 @@ func (c *Cluster) generateStandbyEnvironment(description *acidv1.StandbyDescript result := make([]v1.EnvVar, 0) if description.StandbyHost != "" { - // standby from remote primary + c.logger.Info("standby cluster streaming from remote primary") result = append(result, v1.EnvVar{ Name: "STANDBY_HOST", Value: description.StandbyHost, @@ -1945,30 +1927,20 @@ func (c *Cluster) generateStandbyEnvironment(description *acidv1.StandbyDescript }) } } else { + c.logger.Info("standby cluster streaming from WAL location") if description.S3WalPath != "" { - // standby with S3, find out the bucket to setup standby - msg := "Standby from S3 bucket using custom parsed S3WalPath from the manifest %s " - c.logger.Infof(msg, description.S3WalPath) - result = append(result, v1.EnvVar{ Name: "STANDBY_WALE_S3_PREFIX", Value: description.S3WalPath, }) } else if description.GSWalPath != "" { - msg := "Standby from GS bucket using custom parsed GSWalPath from the manifest %s " - c.logger.Infof(msg, description.GSWalPath) - - envs := []v1.EnvVar{ - { - Name: "STANDBY_WALE_GS_PREFIX", - Value: description.GSWalPath, - }, - { - Name: "STANDBY_GOOGLE_APPLICATION_CREDENTIALS", - Value: c.OpConfig.GCPCredentials, - }, - } - result = append(result, envs...) + result = append(result, v1.EnvVar{ + Name: "STANDBY_WALE_GS_PREFIX", + Value: description.GSWalPath, + }) + } else { + c.logger.Error("no WAL path specified in standby section") + return result } result = append(result, v1.EnvVar{Name: "STANDBY_METHOD", Value: "STANDBY_WITH_WALE"}) diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index 94e78572c..7a1bdeaf9 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -49,6 +49,7 @@ type ExpectedValue struct { envIndex int envVarConstant string envVarValue string + envVarValueRef *v1.EnvVarSource } func TestGenerateSpiloJSONConfiguration(t *testing.T) { @@ -117,29 +118,360 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) { } } -func TestGenerateSpiloPodEnvVars(t *testing.T) { - var cluster = New( - Config{ - OpConfig: config.Config{ - WALGSBucket: "wale-gs-bucket", - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, +func TestExtractPgVersionFromBinPath(t *testing.T) { + testName := "TestExtractPgVersionFromBinPath" + tests := []struct { + subTest string + binPath string + template string + expected string + }{ + { + subTest: "test current bin path with decimal against hard coded template", + binPath: "/usr/lib/postgresql/9.6/bin", + template: pgBinariesLocationTemplate, + expected: "9.6", + }, + { + subTest: "test current bin path against hard coded template", + binPath: "/usr/lib/postgresql/12/bin", + template: pgBinariesLocationTemplate, + expected: "12", + }, + { + subTest: "test alternative bin path against a matching template", + binPath: "/usr/pgsql-12/bin", + template: "/usr/pgsql-%v/bin", + expected: "12", + }, + } + + for _, tt := range tests { + pgVersion, err := extractPgVersionFromBinPath(tt.binPath, tt.template) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if pgVersion != tt.expected { + t.Errorf("%s %s: Expected version %s, have %s instead", + testName, tt.subTest, tt.expected, pgVersion) + } + } +} + +const ( + testPodEnvironmentConfigMapName = "pod_env_cm" + testPodEnvironmentSecretName = "pod_env_sc" + testPodEnvironmentObjectNotExists = "idonotexist" + testPodEnvironmentSecretNameAPIError = "pod_env_sc_apierror" + testResourceCheckInterval = 3 + testResourceCheckTimeout = 10 +) + +type mockSecret struct { + v1core.SecretInterface +} + +type mockConfigMap struct { + v1core.ConfigMapInterface +} + +func (c *mockSecret) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) { + if name == testPodEnvironmentSecretNameAPIError { + return nil, fmt.Errorf("Secret PodEnvironmentSecret API error") + } + if name != testPodEnvironmentSecretName { + return nil, k8serrors.NewNotFound(schema.GroupResource{Group: "core", Resource: "secret"}, name) + } + secret := &v1.Secret{} + secret.Name = testPodEnvironmentSecretName + secret.Data = map[string][]byte{ + "clone_aws_access_key_id": []byte("0123456789abcdef0123456789abcdef"), + "custom_variable": []byte("secret-test"), + "standby_google_application_credentials": []byte("0123456789abcdef0123456789abcdef"), + } + return secret, nil +} + +func (c *mockConfigMap) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ConfigMap, error) { + if name != testPodEnvironmentConfigMapName { + return nil, fmt.Errorf("NotFound") + } + configmap := &v1.ConfigMap{} + configmap.Name = testPodEnvironmentConfigMapName + configmap.Data = map[string]string{ + // hard-coded clone env variable, can set when not specified in manifest + "clone_aws_endpoint": "s3.eu-west-1.amazonaws.com", + // custom variable, can be overridden by c.Spec.Env + "custom_variable": "configmap-test", + // hard-coded env variable, can not be overridden + "kubernetes_scope_label": "pgaas", + // hard-coded env variable, can be overridden + "wal_s3_bucket": "global-s3-bucket-configmap", + } + return configmap, nil +} + +type MockSecretGetter struct { +} + +type MockConfigMapsGetter struct { +} + +func (c *MockSecretGetter) Secrets(namespace string) v1core.SecretInterface { + return &mockSecret{} +} + +func (c *MockConfigMapsGetter) ConfigMaps(namespace string) v1core.ConfigMapInterface { + return &mockConfigMap{} +} + +func newMockKubernetesClient() k8sutil.KubernetesClient { + return k8sutil.KubernetesClient{ + SecretsGetter: &MockSecretGetter{}, + ConfigMapsGetter: &MockConfigMapsGetter{}, + } +} +func newMockCluster(opConfig config.Config) *Cluster { + cluster := &Cluster{ + Config: Config{OpConfig: opConfig}, + KubeClient: newMockKubernetesClient(), + logger: logger, + } + return cluster +} + +func TestPodEnvironmentConfigMapVariables(t *testing.T) { + testName := "TestPodEnvironmentConfigMapVariables" + tests := []struct { + subTest string + opConfig config.Config + envVars []v1.EnvVar + err error + }{ + { + subTest: "no PodEnvironmentConfigMap", + envVars: []v1.EnvVar{}, + }, + { + subTest: "missing PodEnvironmentConfigMap", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentConfigMap: spec.NamespacedName{ + Name: testPodEnvironmentObjectNotExists, + }, }, }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + err: fmt.Errorf("could not read PodEnvironmentConfigMap: NotFound"), + }, + { + subTest: "Pod environment vars configured by PodEnvironmentConfigMap", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentConfigMap: spec.NamespacedName{ + Name: testPodEnvironmentConfigMapName, + }, + }, + }, + envVars: []v1.EnvVar{ + { + Name: "clone_aws_endpoint", + Value: "s3.eu-west-1.amazonaws.com", + }, + { + Name: "custom_variable", + Value: "configmap-test", + }, + { + Name: "kubernetes_scope_label", + Value: "pgaas", + }, + { + Name: "wal_s3_bucket", + Value: "global-s3-bucket-configmap", + }, + }, + }, + } + for _, tt := range tests { + c := newMockCluster(tt.opConfig) + vars, err := c.getPodEnvironmentConfigMapVariables() + if !reflect.DeepEqual(vars, tt.envVars) { + t.Errorf("%s %s: expected `%v` but got `%v`", + testName, tt.subTest, tt.envVars, vars) + } + if tt.err != nil { + if err.Error() != tt.err.Error() { + t.Errorf("%s %s: expected error `%v` but got `%v`", + testName, tt.subTest, tt.err, err) + } + } else { + if err != nil { + t.Errorf("%s %s: expected no error but got error: `%v`", + testName, tt.subTest, err) + } + } + } +} - expectedValuesGSBucket := []ExpectedValue{ +// Test if the keys of an existing secret are properly referenced +func TestPodEnvironmentSecretVariables(t *testing.T) { + maxRetries := int(testResourceCheckTimeout / testResourceCheckInterval) + testName := "TestPodEnvironmentSecretVariables" + tests := []struct { + subTest string + opConfig config.Config + envVars []v1.EnvVar + err error + }{ + { + subTest: "No PodEnvironmentSecret configured", + envVars: []v1.EnvVar{}, + }, + { + subTest: "Secret referenced by PodEnvironmentSecret does not exist", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentSecret: testPodEnvironmentObjectNotExists, + ResourceCheckInterval: time.Duration(testResourceCheckInterval), + ResourceCheckTimeout: time.Duration(testResourceCheckTimeout), + }, + }, + err: fmt.Errorf("could not read Secret PodEnvironmentSecretName: still failing after %d retries: secret.core %q not found", maxRetries, testPodEnvironmentObjectNotExists), + }, + { + subTest: "API error during PodEnvironmentSecret retrieval", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentSecret: testPodEnvironmentSecretNameAPIError, + ResourceCheckInterval: time.Duration(testResourceCheckInterval), + ResourceCheckTimeout: time.Duration(testResourceCheckTimeout), + }, + }, + err: fmt.Errorf("could not read Secret PodEnvironmentSecretName: Secret PodEnvironmentSecret API error"), + }, + { + subTest: "Pod environment vars reference all keys from secret configured by PodEnvironmentSecret", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentSecret: testPodEnvironmentSecretName, + ResourceCheckInterval: time.Duration(testResourceCheckInterval), + ResourceCheckTimeout: time.Duration(testResourceCheckTimeout), + }, + }, + envVars: []v1.EnvVar{ + { + Name: "clone_aws_access_key_id", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: testPodEnvironmentSecretName, + }, + Key: "clone_aws_access_key_id", + }, + }, + }, + { + Name: "custom_variable", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: testPodEnvironmentSecretName, + }, + Key: "custom_variable", + }, + }, + }, + { + Name: "standby_google_application_credentials", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: testPodEnvironmentSecretName, + }, + Key: "standby_google_application_credentials", + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + c := newMockCluster(tt.opConfig) + vars, err := c.getPodEnvironmentSecretVariables() + sort.Slice(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name }) + if !reflect.DeepEqual(vars, tt.envVars) { + t.Errorf("%s %s: expected `%v` but got `%v`", + testName, tt.subTest, tt.envVars, vars) + } + if tt.err != nil { + if err.Error() != tt.err.Error() { + t.Errorf("%s %s: expected error `%v` but got `%v`", + testName, tt.subTest, tt.err, err) + } + } else { + if err != nil { + t.Errorf("%s %s: expected no error but got error: `%v`", + testName, tt.subTest, err) + } + } + } + +} + +func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { + required := map[string]bool{ + "PGHOST": false, + "PGPORT": false, + "PGUSER": false, + "PGSCHEMA": false, + "PGPASSWORD": false, + "CONNECTION_POOLER_MODE": false, + "CONNECTION_POOLER_PORT": false, + } + + container := getPostgresContainer(&podSpec.Spec) + envs := container.Env + for _, env := range envs { + required[env.Name] = true + } + + for env, value := range required { + if !value { + return fmt.Errorf("Environment variable %s is not present", env) + } + } + + return nil +} + +func TestGenerateSpiloPodEnvVars(t *testing.T) { + var dummyUUID = "efd12e58-5786-11e8-b5a7-06148230260c" + + expectedClusterNameLabel := []ExpectedValue{ + { + envIndex: 5, + envVarConstant: "KUBERNETES_SCOPE_LABEL", + envVarValue: "cluster-name", + }, + } + expectedSpiloWalPathCompat := []ExpectedValue{ + { + envIndex: 12, + envVarConstant: "ENABLE_WAL_PATH_COMPAT", + envVarValue: "true", + }, + } + expectedValuesS3Bucket := []ExpectedValue{ { envIndex: 15, - envVarConstant: "WAL_GS_BUCKET", - envVarValue: "wale-gs-bucket", + envVarConstant: "WAL_S3_BUCKET", + envVarValue: "global-s3-bucket", }, { envIndex: 16, envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX", - envVarValue: "/SomeUUID", + envVarValue: fmt.Sprintf("/%s", dummyUUID), }, { envIndex: 17, @@ -147,17 +479,16 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) { envVarValue: "", }, } - expectedValuesGCPCreds := []ExpectedValue{ { envIndex: 15, envVarConstant: "WAL_GS_BUCKET", - envVarValue: "wale-gs-bucket", + envVarValue: "global-gs-bucket", }, { envIndex: 16, envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX", - envVarValue: "/SomeUUID", + envVarValue: fmt.Sprintf("/%s", dummyUUID), }, { envIndex: 17, @@ -167,28 +498,116 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) { { envIndex: 18, envVarConstant: "GOOGLE_APPLICATION_CREDENTIALS", - envVarValue: "some_path_to_credentials", + envVarValue: "some-path-to-credentials", }, } - expectedClusterNameLabel := []ExpectedValue{ + expectedS3BucketConfigMap := []ExpectedValue{ { - envIndex: 5, - envVarConstant: "KUBERNETES_SCOPE_LABEL", - envVarValue: "cluster-name", + envIndex: 17, + envVarConstant: "WAL_S3_BUCKET", + envVarValue: "global-s3-bucket-configmap", }, } - expectedCustomS3Bucket := []ExpectedValue{ + expectedCustomS3BucketSpec := []ExpectedValue{ { envIndex: 15, envVarConstant: "WAL_S3_BUCKET", envVarValue: "custom-s3-bucket", }, } - expectedCustomVariable := []ExpectedValue{ + expectedCustomVariableSecret := []ExpectedValue{ + { + envIndex: 16, + envVarConstant: "CUSTOM_VARIABLE", + envVarValueRef: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: testPodEnvironmentSecretName, + }, + Key: "custom_variable", + }, + }, + }, + } + expectedCustomVariableConfigMap := []ExpectedValue{ + { + envIndex: 16, + envVarConstant: "CUSTOM_VARIABLE", + envVarValue: "configmap-test", + }, + } + expectedCustomVariableSpec := []ExpectedValue{ { envIndex: 15, envVarConstant: "CUSTOM_VARIABLE", - envVarValue: "cluster-variable", + envVarValue: "spec-env-test", + }, + } + expectedCloneEnvSpec := []ExpectedValue{ + { + envIndex: 16, + envVarConstant: "CLONE_WALE_S3_PREFIX", + envVarValue: "s3://another-bucket", + }, + { + envIndex: 17, + envVarConstant: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", + envVarValue: "", + }, + { + envIndex: 20, + envVarConstant: "CLONE_AWS_ENDPOINT", + envVarValue: "s3.eu-central-1.amazonaws.com", + }, + } + expectedCloneEnvConfigMap := []ExpectedValue{ + { + envIndex: 16, + envVarConstant: "CLONE_WAL_S3_BUCKET", + envVarValue: "global-s3-bucket", + }, + { + envIndex: 17, + envVarConstant: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", + envVarValue: fmt.Sprintf("/%s", dummyUUID), + }, + { + envIndex: 20, + envVarConstant: "CLONE_AWS_ENDPOINT", + envVarValue: "s3.eu-west-1.amazonaws.com", + }, + } + expectedCloneEnvSecret := []ExpectedValue{ + { + envIndex: 20, + envVarConstant: "CLONE_AWS_ACCESS_KEY_ID", + envVarValueRef: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: testPodEnvironmentSecretName, + }, + Key: "clone_aws_access_key_id", + }, + }, + }, + } + expectedStandbyEnvSecret := []ExpectedValue{ + { + envIndex: 15, + envVarConstant: "STANDBY_WALE_GS_PREFIX", + envVarValue: "gs://some/path/", + }, + { + envIndex: 20, + envVarConstant: "STANDBY_GOOGLE_APPLICATION_CREDENTIALS", + envVarValueRef: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: testPodEnvironmentSecretName, + }, + Key: "standby_google_application_credentials", + }, + }, }, } @@ -196,51 +615,51 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) { tests := []struct { subTest string opConfig config.Config - uid types.UID - spiloConfig string cloneDescription *acidv1.CloneDescription standbyDescription *acidv1.StandbyDescription - customEnvList []v1.EnvVar expectedValues []ExpectedValue pgsql acidv1.Postgresql }{ { - subTest: "Will set WAL_GS_BUCKET env", + subTest: "will set ENABLE_WAL_PATH_COMPAT env", opConfig: config.Config{ - WALGSBucket: "wale-gs-bucket", + EnableSpiloWalPathCompat: true, }, - uid: "SomeUUID", - spiloConfig: "someConfig", cloneDescription: &acidv1.CloneDescription{}, standbyDescription: &acidv1.StandbyDescription{}, - customEnvList: []v1.EnvVar{}, - expectedValues: expectedValuesGSBucket, + expectedValues: expectedSpiloWalPathCompat, }, { - subTest: "Will set GOOGLE_APPLICATION_CREDENTIALS env", + subTest: "will set WAL_S3_BUCKET env", opConfig: config.Config{ - WALGSBucket: "wale-gs-bucket", - GCPCredentials: "some_path_to_credentials", + WALES3Bucket: "global-s3-bucket", + }, + cloneDescription: &acidv1.CloneDescription{}, + standbyDescription: &acidv1.StandbyDescription{}, + expectedValues: expectedValuesS3Bucket, + }, + { + subTest: "will set GOOGLE_APPLICATION_CREDENTIALS env", + opConfig: config.Config{ + WALGSBucket: "global-gs-bucket", + GCPCredentials: "some-path-to-credentials", }, - uid: "SomeUUID", - spiloConfig: "someConfig", cloneDescription: &acidv1.CloneDescription{}, standbyDescription: &acidv1.StandbyDescription{}, - customEnvList: []v1.EnvVar{}, expectedValues: expectedValuesGCPCreds, }, { - subTest: "Will not overwrite global KUBERNETES_SCOPE_LABEL parameter from the cluster Env option", + subTest: "will not override global config KUBERNETES_SCOPE_LABEL parameter", opConfig: config.Config{ Resources: config.Resources{ ClusterNameLabel: "cluster-name", + PodEnvironmentConfigMap: spec.NamespacedName{ + Name: testPodEnvironmentConfigMapName, // contains kubernetes_scope_label, too + }, }, }, - uid: "SomeUUID", - spiloConfig: "someConfig", cloneDescription: &acidv1.CloneDescription{}, standbyDescription: &acidv1.StandbyDescription{}, - customEnvList: []v1.EnvVar{}, expectedValues: expectedClusterNameLabel, pgsql: acidv1.Postgresql{ Spec: acidv1.PostgresSpec{ @@ -254,16 +673,27 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) { }, }, { - subTest: "Will overwrite global WAL_S3_BUCKET parameter from the cluster Env option", + subTest: "will override global WAL_S3_BUCKET parameter from pod environment config map", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentConfigMap: spec.NamespacedName{ + Name: testPodEnvironmentConfigMapName, + }, + }, + WALES3Bucket: "global-s3-bucket", + }, + cloneDescription: &acidv1.CloneDescription{}, + standbyDescription: &acidv1.StandbyDescription{}, + expectedValues: expectedS3BucketConfigMap, + }, + { + subTest: "will override global WAL_S3_BUCKET parameter from manifest `env` section", opConfig: config.Config{ WALGSBucket: "global-s3-bucket", }, - uid: "SomeUUID", - spiloConfig: "someConfig", cloneDescription: &acidv1.CloneDescription{}, standbyDescription: &acidv1.StandbyDescription{}, - customEnvList: []v1.EnvVar{}, - expectedValues: expectedCustomS3Bucket, + expectedValues: expectedCustomS3BucketSpec, pgsql: acidv1.Postgresql{ Spec: acidv1.PostgresSpec{ Env: []v1.EnvVar{ @@ -276,276 +706,164 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) { }, }, { - subTest: "Will overwrite custom variable parameter from the cluster Env option", - uid: "SomeUUID", - spiloConfig: "someConfig", - cloneDescription: &acidv1.CloneDescription{}, - standbyDescription: &acidv1.StandbyDescription{}, - customEnvList: []v1.EnvVar{ - { - Name: "CUSTOM_VARIABLE", - Value: "custom-variable", + subTest: "will set CUSTOM_VARIABLE from pod environment secret and not config map", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentConfigMap: spec.NamespacedName{ + Name: testPodEnvironmentConfigMapName, + }, + PodEnvironmentSecret: testPodEnvironmentSecretName, + ResourceCheckInterval: time.Duration(testResourceCheckInterval), + ResourceCheckTimeout: time.Duration(testResourceCheckTimeout), }, }, - expectedValues: expectedCustomVariable, + cloneDescription: &acidv1.CloneDescription{}, + standbyDescription: &acidv1.StandbyDescription{}, + expectedValues: expectedCustomVariableSecret, + }, + { + subTest: "will set CUSTOM_VARIABLE from pod environment config map", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentConfigMap: spec.NamespacedName{ + Name: testPodEnvironmentConfigMapName, + }, + }, + }, + cloneDescription: &acidv1.CloneDescription{}, + standbyDescription: &acidv1.StandbyDescription{}, + expectedValues: expectedCustomVariableConfigMap, + }, + { + subTest: "will override CUSTOM_VARIABLE of pod environment secret/configmap from manifest `env` section", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentConfigMap: spec.NamespacedName{ + Name: testPodEnvironmentConfigMapName, + }, + PodEnvironmentSecret: testPodEnvironmentSecretName, + ResourceCheckInterval: time.Duration(testResourceCheckInterval), + ResourceCheckTimeout: time.Duration(testResourceCheckTimeout), + }, + }, + cloneDescription: &acidv1.CloneDescription{}, + standbyDescription: &acidv1.StandbyDescription{}, + expectedValues: expectedCustomVariableSpec, pgsql: acidv1.Postgresql{ Spec: acidv1.PostgresSpec{ Env: []v1.EnvVar{ { Name: "CUSTOM_VARIABLE", - Value: "cluster-variable", + Value: "spec-env-test", }, }, }, }, }, + { + subTest: "will set CLONE_ parameters from spec and not global config or pod environment config map", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentConfigMap: spec.NamespacedName{ + Name: testPodEnvironmentConfigMapName, + }, + }, + WALES3Bucket: "global-s3-bucket", + }, + cloneDescription: &acidv1.CloneDescription{ + ClusterName: "test-cluster", + EndTimestamp: "somewhen", + UID: dummyUUID, + S3WalPath: "s3://another-bucket", + S3Endpoint: "s3.eu-central-1.amazonaws.com", + }, + standbyDescription: &acidv1.StandbyDescription{}, + expectedValues: expectedCloneEnvSpec, + }, + { + subTest: "will set CLONE_AWS_ENDPOINT parameter from pod environment config map", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentConfigMap: spec.NamespacedName{ + Name: testPodEnvironmentConfigMapName, + }, + }, + WALES3Bucket: "global-s3-bucket", + }, + cloneDescription: &acidv1.CloneDescription{ + ClusterName: "test-cluster", + EndTimestamp: "somewhen", + UID: dummyUUID, + }, + standbyDescription: &acidv1.StandbyDescription{}, + expectedValues: expectedCloneEnvConfigMap, + }, + { + subTest: "will set CLONE_AWS_ACCESS_KEY_ID parameter from pod environment secret", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentSecret: testPodEnvironmentSecretName, + ResourceCheckInterval: time.Duration(testResourceCheckInterval), + ResourceCheckTimeout: time.Duration(testResourceCheckTimeout), + }, + WALES3Bucket: "global-s3-bucket", + }, + cloneDescription: &acidv1.CloneDescription{ + ClusterName: "test-cluster", + EndTimestamp: "somewhen", + UID: dummyUUID, + }, + standbyDescription: &acidv1.StandbyDescription{}, + expectedValues: expectedCloneEnvSecret, + }, + { + subTest: "will set STANDBY_GOOGLE_APPLICATION_CREDENTIALS parameter from pod environment secret", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentSecret: testPodEnvironmentSecretName, + ResourceCheckInterval: time.Duration(testResourceCheckInterval), + ResourceCheckTimeout: time.Duration(testResourceCheckTimeout), + }, + WALES3Bucket: "global-s3-bucket", + }, + cloneDescription: &acidv1.CloneDescription{}, + standbyDescription: &acidv1.StandbyDescription{ + GSWalPath: "gs://some/path/", + }, + expectedValues: expectedStandbyEnvSecret, + }, } for _, tt := range tests { - cluster.OpConfig = tt.opConfig - cluster.Postgresql = tt.pgsql - - actualEnvs := cluster.generateSpiloPodEnvVars(tt.uid, tt.spiloConfig, tt.cloneDescription, tt.standbyDescription, tt.customEnvList) + c := newMockCluster(tt.opConfig) + c.Postgresql = tt.pgsql + actualEnvs := c.generateSpiloPodEnvVars( + types.UID(dummyUUID), exampleSpiloConfig, tt.cloneDescription, tt.standbyDescription) for _, ev := range tt.expectedValues { env := actualEnvs[ev.envIndex] if env.Name != ev.envVarConstant { - t.Errorf("%s %s: Expected env name %s, have %s instead", + t.Errorf("%s %s: expected env name %s, have %s instead", testName, tt.subTest, ev.envVarConstant, env.Name) } + if ev.envVarValueRef != nil { + if !reflect.DeepEqual(env.ValueFrom, ev.envVarValueRef) { + t.Errorf("%s %s: expected env value reference %#v, have %#v instead", + testName, tt.subTest, ev.envVarValueRef, env.ValueFrom) + } + continue + } + if env.Value != ev.envVarValue { - t.Errorf("%s %s: Expected env value %s, have %s instead", + t.Errorf("%s %s: expected env value %s, have %s instead", testName, tt.subTest, ev.envVarValue, env.Value) } } } } -func TestCreateLoadBalancerLogic(t *testing.T) { - var cluster = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - - testName := "TestCreateLoadBalancerLogic" - tests := []struct { - subtest string - role PostgresRole - spec *acidv1.PostgresSpec - opConfig config.Config - result bool - }{ - { - subtest: "new format, load balancer is enabled for replica", - role: Replica, - spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.True()}, - opConfig: config.Config{}, - result: true, - }, - { - subtest: "new format, load balancer is disabled for replica", - role: Replica, - spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.False()}, - opConfig: config.Config{}, - result: false, - }, - { - subtest: "new format, load balancer isn't specified for replica", - role: Replica, - spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: nil}, - opConfig: config.Config{EnableReplicaLoadBalancer: true}, - result: true, - }, - { - subtest: "new format, load balancer isn't specified for replica", - role: Replica, - spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: nil}, - opConfig: config.Config{EnableReplicaLoadBalancer: false}, - result: false, - }, - } - for _, tt := range tests { - cluster.OpConfig = tt.opConfig - result := cluster.shouldCreateLoadBalancerForService(tt.role, tt.spec) - if tt.result != result { - t.Errorf("%s %s: Load balancer is %t, expect %t for role %#v and spec %#v", - testName, tt.subtest, result, tt.result, tt.role, tt.spec) - } - } -} - -func TestGeneratePodDisruptionBudget(t *testing.T) { - tests := []struct { - c *Cluster - out policyv1beta1.PodDisruptionBudget - }{ - // With multiple instances. - { - New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, - k8sutil.KubernetesClient{}, - acidv1.Postgresql{ - ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, - Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, - logger, - eventRecorder), - policyv1beta1.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "postgres-myapp-database-pdb", - Namespace: "myapp", - Labels: map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, - }, - Spec: policyv1beta1.PodDisruptionBudgetSpec{ - MinAvailable: util.ToIntStr(1), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, - }, - }, - }, - }, - // With zero instances. - { - New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, - k8sutil.KubernetesClient{}, - acidv1.Postgresql{ - ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, - Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 0}}, - logger, - eventRecorder), - policyv1beta1.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "postgres-myapp-database-pdb", - Namespace: "myapp", - Labels: map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, - }, - Spec: policyv1beta1.PodDisruptionBudgetSpec{ - MinAvailable: util.ToIntStr(0), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, - }, - }, - }, - }, - // With PodDisruptionBudget disabled. - { - New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}}, - k8sutil.KubernetesClient{}, - acidv1.Postgresql{ - ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, - Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, - logger, - eventRecorder), - policyv1beta1.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "postgres-myapp-database-pdb", - Namespace: "myapp", - Labels: map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, - }, - Spec: policyv1beta1.PodDisruptionBudgetSpec{ - MinAvailable: util.ToIntStr(0), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, - }, - }, - }, - }, - // With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled. - { - New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}}, - k8sutil.KubernetesClient{}, - acidv1.Postgresql{ - ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, - Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, - logger, - eventRecorder), - policyv1beta1.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "postgres-myapp-database-databass-budget", - Namespace: "myapp", - Labels: map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, - }, - Spec: policyv1beta1.PodDisruptionBudgetSpec{ - MinAvailable: util.ToIntStr(1), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, - }, - }, - }, - }, - } - - for _, tt := range tests { - result := tt.c.generatePodDisruptionBudget() - if !reflect.DeepEqual(*result, tt.out) { - t.Errorf("Expected PodDisruptionBudget: %#v, got %#v", tt.out, *result) - } - } -} - -func TestShmVolume(t *testing.T) { - testName := "TestShmVolume" - tests := []struct { - subTest string - podSpec *v1.PodSpec - shmPos int - }{ - { - subTest: "empty PodSpec", - podSpec: &v1.PodSpec{ - Volumes: []v1.Volume{}, - Containers: []v1.Container{ - { - VolumeMounts: []v1.VolumeMount{}, - }, - }, - }, - shmPos: 0, - }, - { - subTest: "non empty PodSpec", - podSpec: &v1.PodSpec{ - Volumes: []v1.Volume{{}}, - Containers: []v1.Container{ - { - Name: "postgres", - VolumeMounts: []v1.VolumeMount{ - {}, - }, - }, - }, - }, - shmPos: 1, - }, - } - for _, tt := range tests { - addShmVolume(tt.podSpec) - postgresContainer := getPostgresContainer(tt.podSpec) - - volumeName := tt.podSpec.Volumes[tt.shmPos].Name - volumeMountName := postgresContainer.VolumeMounts[tt.shmPos].Name - - if volumeName != constants.ShmVolumeName { - t.Errorf("%s %s: Expected volume %s was not created, have %s instead", - testName, tt.subTest, constants.ShmVolumeName, volumeName) - } - if volumeMountName != constants.ShmVolumeName { - t.Errorf("%s %s: Expected mount %s was not created, have %s instead", - testName, tt.subTest, constants.ShmVolumeName, volumeMountName) - } - } -} - func TestCloneEnv(t *testing.T) { testName := "TestCloneEnv" tests := []struct { @@ -624,6 +942,79 @@ func TestCloneEnv(t *testing.T) { } } +func TestAppendEnvVar(t *testing.T) { + testName := "TestAppendEnvVar" + tests := []struct { + subTest string + envs []v1.EnvVar + envsToAppend []v1.EnvVar + expectedSize int + }{ + { + subTest: "append two variables - one with same key that should get rejected", + envs: []v1.EnvVar{ + { + Name: "CUSTOM_VARIABLE", + Value: "test", + }, + }, + envsToAppend: []v1.EnvVar{ + { + Name: "CUSTOM_VARIABLE", + Value: "new-test", + }, + { + Name: "ANOTHER_CUSTOM_VARIABLE", + Value: "another-test", + }, + }, + expectedSize: 2, + }, + { + subTest: "append empty slice", + envs: []v1.EnvVar{ + { + Name: "CUSTOM_VARIABLE", + Value: "test", + }, + }, + envsToAppend: []v1.EnvVar{}, + expectedSize: 1, + }, + { + subTest: "append nil", + envs: []v1.EnvVar{ + { + Name: "CUSTOM_VARIABLE", + Value: "test", + }, + }, + envsToAppend: nil, + expectedSize: 1, + }, + } + + for _, tt := range tests { + finalEnvs := appendEnvVars(tt.envs, tt.envsToAppend...) + + if len(finalEnvs) != tt.expectedSize { + t.Errorf("%s %s: expected %d env variables, got %d", + testName, tt.subTest, tt.expectedSize, len(finalEnvs)) + } + + for _, env := range tt.envs { + for _, finalEnv := range finalEnvs { + if env.Name == finalEnv.Name { + if env.Value != finalEnv.Value { + t.Errorf("%s %s: expected env value %s of variable %s, got %s instead", + testName, tt.subTest, env.Value, env.Name, finalEnv.Value) + } + } + } + } + } +} + func TestStandbyEnv(t *testing.T) { testName := "TestStandbyEnv" tests := []struct { @@ -645,18 +1036,6 @@ func TestStandbyEnv(t *testing.T) { envPos: 0, envLen: 3, }, - { - subTest: "from custom gs path", - standbyOpts: &acidv1.StandbyDescription{ - GSWalPath: "gs://some/path/", - }, - env: v1.EnvVar{ - Name: "STANDBY_GOOGLE_APPLICATION_CREDENTIALS", - Value: "", - }, - envPos: 1, - envLen: 4, - }, { subTest: "ignore gs path if s3 is set", standbyOpts: &acidv1.StandbyDescription{ @@ -735,380 +1114,6 @@ func TestStandbyEnv(t *testing.T) { } } -func TestExtractPgVersionFromBinPath(t *testing.T) { - testName := "TestExtractPgVersionFromBinPath" - tests := []struct { - subTest string - binPath string - template string - expected string - }{ - { - subTest: "test current bin path with decimal against hard coded template", - binPath: "/usr/lib/postgresql/9.6/bin", - template: pgBinariesLocationTemplate, - expected: "9.6", - }, - { - subTest: "test current bin path against hard coded template", - binPath: "/usr/lib/postgresql/12/bin", - template: pgBinariesLocationTemplate, - expected: "12", - }, - { - subTest: "test alternative bin path against a matching template", - binPath: "/usr/pgsql-12/bin", - template: "/usr/pgsql-%v/bin", - expected: "12", - }, - } - - for _, tt := range tests { - pgVersion, err := extractPgVersionFromBinPath(tt.binPath, tt.template) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if pgVersion != tt.expected { - t.Errorf("%s %s: Expected version %s, have %s instead", - testName, tt.subTest, tt.expected, pgVersion) - } - } -} - -func TestSecretVolume(t *testing.T) { - testName := "TestSecretVolume" - tests := []struct { - subTest string - podSpec *v1.PodSpec - secretPos int - }{ - { - subTest: "empty PodSpec", - podSpec: &v1.PodSpec{ - Volumes: []v1.Volume{}, - Containers: []v1.Container{ - { - VolumeMounts: []v1.VolumeMount{}, - }, - }, - }, - secretPos: 0, - }, - { - subTest: "non empty PodSpec", - podSpec: &v1.PodSpec{ - Volumes: []v1.Volume{{}}, - Containers: []v1.Container{ - { - VolumeMounts: []v1.VolumeMount{ - { - Name: "data", - ReadOnly: false, - MountPath: "/data", - }, - }, - }, - }, - }, - secretPos: 1, - }, - } - for _, tt := range tests { - additionalSecretMount := "aws-iam-s3-role" - additionalSecretMountPath := "/meta/credentials" - postgresContainer := getPostgresContainer(tt.podSpec) - - numMounts := len(postgresContainer.VolumeMounts) - - addSecretVolume(tt.podSpec, additionalSecretMount, additionalSecretMountPath) - - volumeName := tt.podSpec.Volumes[tt.secretPos].Name - - if volumeName != additionalSecretMount { - t.Errorf("%s %s: Expected volume %s was not created, have %s instead", - testName, tt.subTest, additionalSecretMount, volumeName) - } - - for i := range tt.podSpec.Containers { - volumeMountName := tt.podSpec.Containers[i].VolumeMounts[tt.secretPos].Name - - if volumeMountName != additionalSecretMount { - t.Errorf("%s %s: Expected mount %s was not created, have %s instead", - testName, tt.subTest, additionalSecretMount, volumeMountName) - } - } - - postgresContainer = getPostgresContainer(tt.podSpec) - numMountsCheck := len(postgresContainer.VolumeMounts) - - if numMountsCheck != numMounts+1 { - t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v", - numMountsCheck, numMounts+1) - } - } -} - -const ( - testPodEnvironmentConfigMapName = "pod_env_cm" - testPodEnvironmentSecretName = "pod_env_sc" - testPodEnvironmentObjectNotExists = "idonotexist" - testPodEnvironmentSecretNameAPIError = "pod_env_sc_apierror" - testResourceCheckInterval = 3 - testResourceCheckTimeout = 10 -) - -type mockSecret struct { - v1core.SecretInterface -} - -type mockConfigMap struct { - v1core.ConfigMapInterface -} - -func (c *mockSecret) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) { - if name == testPodEnvironmentSecretNameAPIError { - return nil, fmt.Errorf("Secret PodEnvironmentSecret API error") - } - if name != testPodEnvironmentSecretName { - return nil, k8serrors.NewNotFound(schema.GroupResource{Group: "core", Resource: "secret"}, name) - } - secret := &v1.Secret{} - secret.Name = testPodEnvironmentSecretName - secret.Data = map[string][]byte{ - "minio_access_key": []byte("alpha"), - "minio_secret_key": []byte("beta"), - } - return secret, nil -} - -func (c *mockConfigMap) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ConfigMap, error) { - if name != testPodEnvironmentConfigMapName { - return nil, fmt.Errorf("NotFound") - } - configmap := &v1.ConfigMap{} - configmap.Name = testPodEnvironmentConfigMapName - configmap.Data = map[string]string{ - "foo1": "bar1", - "foo2": "bar2", - } - return configmap, nil -} - -type MockSecretGetter struct { -} - -type MockConfigMapsGetter struct { -} - -func (c *MockSecretGetter) Secrets(namespace string) v1core.SecretInterface { - return &mockSecret{} -} - -func (c *MockConfigMapsGetter) ConfigMaps(namespace string) v1core.ConfigMapInterface { - return &mockConfigMap{} -} - -func newMockKubernetesClient() k8sutil.KubernetesClient { - return k8sutil.KubernetesClient{ - SecretsGetter: &MockSecretGetter{}, - ConfigMapsGetter: &MockConfigMapsGetter{}, - } -} -func newMockCluster(opConfig config.Config) *Cluster { - cluster := &Cluster{ - Config: Config{OpConfig: opConfig}, - KubeClient: newMockKubernetesClient(), - } - return cluster -} - -func TestPodEnvironmentConfigMapVariables(t *testing.T) { - testName := "TestPodEnvironmentConfigMapVariables" - tests := []struct { - subTest string - opConfig config.Config - envVars []v1.EnvVar - err error - }{ - { - subTest: "no PodEnvironmentConfigMap", - envVars: []v1.EnvVar{}, - }, - { - subTest: "missing PodEnvironmentConfigMap", - opConfig: config.Config{ - Resources: config.Resources{ - PodEnvironmentConfigMap: spec.NamespacedName{ - Name: testPodEnvironmentObjectNotExists, - }, - }, - }, - err: fmt.Errorf("could not read PodEnvironmentConfigMap: NotFound"), - }, - { - subTest: "Pod environment vars configured by PodEnvironmentConfigMap", - opConfig: config.Config{ - Resources: config.Resources{ - PodEnvironmentConfigMap: spec.NamespacedName{ - Name: testPodEnvironmentConfigMapName, - }, - }, - }, - envVars: []v1.EnvVar{ - { - Name: "foo1", - Value: "bar1", - }, - { - Name: "foo2", - Value: "bar2", - }, - }, - }, - } - for _, tt := range tests { - c := newMockCluster(tt.opConfig) - vars, err := c.getPodEnvironmentConfigMapVariables() - sort.Slice(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name }) - if !reflect.DeepEqual(vars, tt.envVars) { - t.Errorf("%s %s: expected `%v` but got `%v`", - testName, tt.subTest, tt.envVars, vars) - } - if tt.err != nil { - if err.Error() != tt.err.Error() { - t.Errorf("%s %s: expected error `%v` but got `%v`", - testName, tt.subTest, tt.err, err) - } - } else { - if err != nil { - t.Errorf("%s %s: expected no error but got error: `%v`", - testName, tt.subTest, err) - } - } - } -} - -// Test if the keys of an existing secret are properly referenced -func TestPodEnvironmentSecretVariables(t *testing.T) { - maxRetries := int(testResourceCheckTimeout / testResourceCheckInterval) - testName := "TestPodEnvironmentSecretVariables" - tests := []struct { - subTest string - opConfig config.Config - envVars []v1.EnvVar - err error - }{ - { - subTest: "No PodEnvironmentSecret configured", - envVars: []v1.EnvVar{}, - }, - { - subTest: "Secret referenced by PodEnvironmentSecret does not exist", - opConfig: config.Config{ - Resources: config.Resources{ - PodEnvironmentSecret: testPodEnvironmentObjectNotExists, - ResourceCheckInterval: time.Duration(testResourceCheckInterval), - ResourceCheckTimeout: time.Duration(testResourceCheckTimeout), - }, - }, - err: fmt.Errorf("could not read Secret PodEnvironmentSecretName: still failing after %d retries: secret.core %q not found", maxRetries, testPodEnvironmentObjectNotExists), - }, - { - subTest: "API error during PodEnvironmentSecret retrieval", - opConfig: config.Config{ - Resources: config.Resources{ - PodEnvironmentSecret: testPodEnvironmentSecretNameAPIError, - ResourceCheckInterval: time.Duration(testResourceCheckInterval), - ResourceCheckTimeout: time.Duration(testResourceCheckTimeout), - }, - }, - err: fmt.Errorf("could not read Secret PodEnvironmentSecretName: Secret PodEnvironmentSecret API error"), - }, - { - subTest: "Pod environment vars reference all keys from secret configured by PodEnvironmentSecret", - opConfig: config.Config{ - Resources: config.Resources{ - PodEnvironmentSecret: testPodEnvironmentSecretName, - ResourceCheckInterval: time.Duration(testResourceCheckInterval), - ResourceCheckTimeout: time.Duration(testResourceCheckTimeout), - }, - }, - envVars: []v1.EnvVar{ - { - Name: "minio_access_key", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: testPodEnvironmentSecretName, - }, - Key: "minio_access_key", - }, - }, - }, - { - Name: "minio_secret_key", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: testPodEnvironmentSecretName, - }, - Key: "minio_secret_key", - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - c := newMockCluster(tt.opConfig) - vars, err := c.getPodEnvironmentSecretVariables() - sort.Slice(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name }) - if !reflect.DeepEqual(vars, tt.envVars) { - t.Errorf("%s %s: expected `%v` but got `%v`", - testName, tt.subTest, tt.envVars, vars) - } - if tt.err != nil { - if err.Error() != tt.err.Error() { - t.Errorf("%s %s: expected error `%v` but got `%v`", - testName, tt.subTest, tt.err, err) - } - } else { - if err != nil { - t.Errorf("%s %s: expected no error but got error: `%v`", - testName, tt.subTest, err) - } - } - } - -} - -func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { - required := map[string]bool{ - "PGHOST": false, - "PGPORT": false, - "PGUSER": false, - "PGSCHEMA": false, - "PGPASSWORD": false, - "CONNECTION_POOLER_MODE": false, - "CONNECTION_POOLER_PORT": false, - } - - container := getPostgresContainer(&podSpec.Spec) - envs := container.Env - for _, env := range envs { - required[env.Name] = true - } - - for env, value := range required { - if !value { - return fmt.Errorf("Environment variable %s is not present", env) - } - } - - return nil -} - func TestNodeAffinity(t *testing.T) { var err error var spec acidv1.PostgresSpec @@ -1286,6 +1291,132 @@ func TestTLS(t *testing.T) { assert.Contains(t, postgresContainer.Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"}) } +func TestShmVolume(t *testing.T) { + testName := "TestShmVolume" + tests := []struct { + subTest string + podSpec *v1.PodSpec + shmPos int + }{ + { + subTest: "empty PodSpec", + podSpec: &v1.PodSpec{ + Volumes: []v1.Volume{}, + Containers: []v1.Container{ + { + VolumeMounts: []v1.VolumeMount{}, + }, + }, + }, + shmPos: 0, + }, + { + subTest: "non empty PodSpec", + podSpec: &v1.PodSpec{ + Volumes: []v1.Volume{{}}, + Containers: []v1.Container{ + { + Name: "postgres", + VolumeMounts: []v1.VolumeMount{ + {}, + }, + }, + }, + }, + shmPos: 1, + }, + } + for _, tt := range tests { + addShmVolume(tt.podSpec) + postgresContainer := getPostgresContainer(tt.podSpec) + + volumeName := tt.podSpec.Volumes[tt.shmPos].Name + volumeMountName := postgresContainer.VolumeMounts[tt.shmPos].Name + + if volumeName != constants.ShmVolumeName { + t.Errorf("%s %s: Expected volume %s was not created, have %s instead", + testName, tt.subTest, constants.ShmVolumeName, volumeName) + } + if volumeMountName != constants.ShmVolumeName { + t.Errorf("%s %s: Expected mount %s was not created, have %s instead", + testName, tt.subTest, constants.ShmVolumeName, volumeMountName) + } + } +} + +func TestSecretVolume(t *testing.T) { + testName := "TestSecretVolume" + tests := []struct { + subTest string + podSpec *v1.PodSpec + secretPos int + }{ + { + subTest: "empty PodSpec", + podSpec: &v1.PodSpec{ + Volumes: []v1.Volume{}, + Containers: []v1.Container{ + { + VolumeMounts: []v1.VolumeMount{}, + }, + }, + }, + secretPos: 0, + }, + { + subTest: "non empty PodSpec", + podSpec: &v1.PodSpec{ + Volumes: []v1.Volume{{}}, + Containers: []v1.Container{ + { + VolumeMounts: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + }, + }, + }, + }, + }, + secretPos: 1, + }, + } + for _, tt := range tests { + additionalSecretMount := "aws-iam-s3-role" + additionalSecretMountPath := "/meta/credentials" + postgresContainer := getPostgresContainer(tt.podSpec) + + numMounts := len(postgresContainer.VolumeMounts) + + addSecretVolume(tt.podSpec, additionalSecretMount, additionalSecretMountPath) + + volumeName := tt.podSpec.Volumes[tt.secretPos].Name + + if volumeName != additionalSecretMount { + t.Errorf("%s %s: Expected volume %s was not created, have %s instead", + testName, tt.subTest, additionalSecretMount, volumeName) + } + + for i := range tt.podSpec.Containers { + volumeMountName := tt.podSpec.Containers[i].VolumeMounts[tt.secretPos].Name + + if volumeMountName != additionalSecretMount { + t.Errorf("%s %s: Expected mount %s was not created, have %s instead", + testName, tt.subTest, additionalSecretMount, volumeMountName) + } + } + + postgresContainer = getPostgresContainer(tt.podSpec) + numMountsCheck := len(postgresContainer.VolumeMounts) + + if numMountsCheck != numMounts+1 { + t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v", + numMountsCheck, numMounts+1) + } + } +} + func TestAdditionalVolume(t *testing.T) { testName := "TestAdditionalVolume" @@ -1406,6 +1537,109 @@ func TestAdditionalVolume(t *testing.T) { } } +func TestVolumeSelector(t *testing.T) { + testName := "TestVolumeSelector" + makeSpec := func(volume acidv1.Volume) acidv1.PostgresSpec { + return acidv1.PostgresSpec{ + TeamID: "myapp", + NumberOfInstances: 0, + Resources: &acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + }, + Volume: volume, + } + } + + tests := []struct { + subTest string + volume acidv1.Volume + wantSelector *metav1.LabelSelector + }{ + { + subTest: "PVC template has no selector", + volume: acidv1.Volume{ + Size: "1G", + }, + wantSelector: nil, + }, + { + subTest: "PVC template has simple label selector", + volume: acidv1.Volume{ + Size: "1G", + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"environment": "unittest"}, + }, + }, + wantSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"environment": "unittest"}, + }, + }, + { + subTest: "PVC template has full selector", + volume: acidv1.Volume{ + Size: "1G", + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"environment": "unittest"}, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "flavour", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"banana", "chocolate"}, + }, + }, + }, + }, + wantSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"environment": "unittest"}, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "flavour", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"banana", "chocolate"}, + }, + }, + }, + }, + } + + cluster := New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + for _, tt := range tests { + pgSpec := makeSpec(tt.volume) + sts, err := cluster.generateStatefulSet(&pgSpec) + if err != nil { + t.Fatalf("%s %s: no statefulset created %v", testName, tt.subTest, err) + } + + volIdx := len(sts.Spec.VolumeClaimTemplates) + for i, ct := range sts.Spec.VolumeClaimTemplates { + if ct.ObjectMeta.Name == constants.DataVolumeName { + volIdx = i + break + } + } + if volIdx == len(sts.Spec.VolumeClaimTemplates) { + t.Errorf("%s %s: no datavolume found in sts", testName, tt.subTest) + } + + selector := sts.Spec.VolumeClaimTemplates[volIdx].Spec.Selector + if !reflect.DeepEqual(selector, tt.wantSelector) { + t.Errorf("%s %s: expected: %#v but got: %#v", testName, tt.subTest, tt.wantSelector, selector) + } + } +} + // inject sidecars through all available mechanisms and check the resulting container specs func TestSidecars(t *testing.T) { var err error @@ -1461,7 +1695,7 @@ func TestSidecars(t *testing.T) { }, acidv1.Sidecar{ Name: "replace-sidecar", - DockerImage: "overwrite-image", + DockerImage: "override-image", }, }, } @@ -1589,7 +1823,7 @@ func TestSidecars(t *testing.T) { // replaced sidecar assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{ Name: "replace-sidecar", - Image: "overwrite-image", + Image: "override-image", Resources: generateKubernetesResources("200m", "500m", "0.7Gi", "1.3Gi"), ImagePullPolicy: v1.PullIfNotPresent, Env: env, @@ -1610,6 +1844,117 @@ func TestSidecars(t *testing.T) { } +func TestGeneratePodDisruptionBudget(t *testing.T) { + tests := []struct { + c *Cluster + out policyv1beta1.PodDisruptionBudget + }{ + // With multiple instances. + { + New( + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, + k8sutil.KubernetesClient{}, + acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, + Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, + logger, + eventRecorder), + policyv1beta1.PodDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres-myapp-database-pdb", + Namespace: "myapp", + Labels: map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, + }, + Spec: policyv1beta1.PodDisruptionBudgetSpec{ + MinAvailable: util.ToIntStr(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, + }, + }, + }, + }, + // With zero instances. + { + New( + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, + k8sutil.KubernetesClient{}, + acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, + Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 0}}, + logger, + eventRecorder), + policyv1beta1.PodDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres-myapp-database-pdb", + Namespace: "myapp", + Labels: map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, + }, + Spec: policyv1beta1.PodDisruptionBudgetSpec{ + MinAvailable: util.ToIntStr(0), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, + }, + }, + }, + }, + // With PodDisruptionBudget disabled. + { + New( + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}}, + k8sutil.KubernetesClient{}, + acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, + Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, + logger, + eventRecorder), + policyv1beta1.PodDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres-myapp-database-pdb", + Namespace: "myapp", + Labels: map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, + }, + Spec: policyv1beta1.PodDisruptionBudgetSpec{ + MinAvailable: util.ToIntStr(0), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, + }, + }, + }, + }, + // With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled. + { + New( + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}}, + k8sutil.KubernetesClient{}, + acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, + Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, + logger, + eventRecorder), + policyv1beta1.PodDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres-myapp-database-databass-budget", + Namespace: "myapp", + Labels: map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, + }, + Spec: policyv1beta1.PodDisruptionBudgetSpec{ + MinAvailable: util.ToIntStr(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, + }, + }, + }, + }, + } + + for _, tt := range tests { + result := tt.c.generatePodDisruptionBudget() + if !reflect.DeepEqual(*result, tt.out) { + t.Errorf("Expected PodDisruptionBudget: %#v, got %#v", tt.out, *result) + } + } +} + func TestGenerateService(t *testing.T) { var spec acidv1.PostgresSpec var cluster *Cluster @@ -1636,7 +1981,7 @@ func TestGenerateService(t *testing.T) { }, acidv1.Sidecar{ Name: "replace-sidecar", - DockerImage: "overwrite-image", + DockerImage: "override-image", }, }, EnableMasterLoadBalancer: &enableLB, @@ -1690,6 +2035,65 @@ func TestGenerateService(t *testing.T) { } +func TestCreateLoadBalancerLogic(t *testing.T) { + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + testName := "TestCreateLoadBalancerLogic" + tests := []struct { + subtest string + role PostgresRole + spec *acidv1.PostgresSpec + opConfig config.Config + result bool + }{ + { + subtest: "new format, load balancer is enabled for replica", + role: Replica, + spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.True()}, + opConfig: config.Config{}, + result: true, + }, + { + subtest: "new format, load balancer is disabled for replica", + role: Replica, + spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.False()}, + opConfig: config.Config{}, + result: false, + }, + { + subtest: "new format, load balancer isn't specified for replica", + role: Replica, + spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: nil}, + opConfig: config.Config{EnableReplicaLoadBalancer: true}, + result: true, + }, + { + subtest: "new format, load balancer isn't specified for replica", + role: Replica, + spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: nil}, + opConfig: config.Config{EnableReplicaLoadBalancer: false}, + result: false, + }, + } + for _, tt := range tests { + cluster.OpConfig = tt.opConfig + result := cluster.shouldCreateLoadBalancerForService(tt.role, tt.spec) + if tt.result != result { + t.Errorf("%s %s: Load balancer is %t, expect %t for role %#v and spec %#v", + testName, tt.subtest, result, tt.result, tt.role, tt.spec) + } + } +} + func newLBFakeClient() (k8sutil.KubernetesClient, *fake.Clientset) { clientSet := fake.NewSimpleClientset() @@ -2231,106 +2635,3 @@ func TestGenerateCapabilities(t *testing.T) { } } } - -func TestVolumeSelector(t *testing.T) { - testName := "TestVolumeSelector" - makeSpec := func(volume acidv1.Volume) acidv1.PostgresSpec { - return acidv1.PostgresSpec{ - TeamID: "myapp", - NumberOfInstances: 0, - Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, - }, - Volume: volume, - } - } - - tests := []struct { - subTest string - volume acidv1.Volume - wantSelector *metav1.LabelSelector - }{ - { - subTest: "PVC template has no selector", - volume: acidv1.Volume{ - Size: "1G", - }, - wantSelector: nil, - }, - { - subTest: "PVC template has simple label selector", - volume: acidv1.Volume{ - Size: "1G", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"environment": "unittest"}, - }, - }, - wantSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"environment": "unittest"}, - }, - }, - { - subTest: "PVC template has full selector", - volume: acidv1.Volume{ - Size: "1G", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"environment": "unittest"}, - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "flavour", - Operator: metav1.LabelSelectorOpIn, - Values: []string{"banana", "chocolate"}, - }, - }, - }, - }, - wantSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"environment": "unittest"}, - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "flavour", - Operator: metav1.LabelSelectorOpIn, - Values: []string{"banana", "chocolate"}, - }, - }, - }, - }, - } - - cluster := New( - Config{ - OpConfig: config.Config{ - PodManagementPolicy: "ordered_ready", - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - - for _, tt := range tests { - pgSpec := makeSpec(tt.volume) - sts, err := cluster.generateStatefulSet(&pgSpec) - if err != nil { - t.Fatalf("%s %s: no statefulset created %v", testName, tt.subTest, err) - } - - volIdx := len(sts.Spec.VolumeClaimTemplates) - for i, ct := range sts.Spec.VolumeClaimTemplates { - if ct.ObjectMeta.Name == constants.DataVolumeName { - volIdx = i - break - } - } - if volIdx == len(sts.Spec.VolumeClaimTemplates) { - t.Errorf("%s %s: no datavolume found in sts", testName, tt.subTest) - } - - selector := sts.Spec.VolumeClaimTemplates[volIdx].Spec.Selector - if !reflect.DeepEqual(selector, tt.wantSelector) { - t.Errorf("%s %s: expected: %#v but got: %#v", testName, tt.subTest, tt.wantSelector, selector) - } - } -}