diff --git a/e2e/requirements.txt b/e2e/requirements.txt index 4f6f5ac5f..b276d2537 100644 --- a/e2e/requirements.txt +++ b/e2e/requirements.txt @@ -1,3 +1,3 @@ kubernetes==11.0.0 timeout_decorator==0.4.1 -pyyaml==5.3.1 +pyyaml==5.4.1 diff --git a/kubectl-pg/cmd/addDb.go b/kubectl-pg/cmd/addDb.go index cd45ea974..1c33579d9 100644 --- a/kubectl-pg/cmd/addDb.go +++ b/kubectl-pg/cmd/addDb.go @@ -71,7 +71,7 @@ func addDb(dbName string, dbOwner string, clusterName string) { var dbOwnerExists bool dbUsers := postgresql.Spec.Users - for key, _ := range dbUsers { + for key := range dbUsers { if key == dbOwner { dbOwnerExists = true } diff --git a/kubectl-pg/cmd/connect.go b/kubectl-pg/cmd/connect.go index 2f1500639..2c6d87835 100644 --- a/kubectl-pg/cmd/connect.go +++ b/kubectl-pg/cmd/connect.go @@ -23,13 +23,14 @@ THE SOFTWARE. package cmd import ( + "log" + "os" + user "os/user" + "github.com/spf13/cobra" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" - "log" - "os" - user "os/user" ) // connectCmd represents the kubectl pg connect command @@ -80,13 +81,13 @@ kubectl pg connect -c cluster -p -u user01 -d db01 func connect(clusterName string, master bool, replica string, psql bool, user string, dbName string) { config := getConfig() - client, er := kubernetes.NewForConfig(config) - if er != nil { - log.Fatal(er) + client, err := kubernetes.NewForConfig(config) + if err != nil { + log.Fatal(err) } podName := getPodName(clusterName, master, replica) - execRequest := &rest.Request{} + var execRequest *rest.Request if psql { execRequest = client.CoreV1().RESTClient().Post().Resource("pods"). diff --git a/kubectl-pg/cmd/create.go b/kubectl-pg/cmd/create.go index 4d1bc75fb..00ee7ac24 100644 --- a/kubectl-pg/cmd/create.go +++ b/kubectl-pg/cmd/create.go @@ -53,6 +53,9 @@ kubectl pg create -f cluster-manifest.yaml func create(fileName string) { config := getConfig() postgresConfig, err := PostgresqlLister.NewForConfig(config) + if err != nil { + log.Fatal(err) + } ymlFile, err := ioutil.ReadFile(fileName) if err != nil { log.Fatal(err) diff --git a/kubectl-pg/cmd/extVolume.go b/kubectl-pg/cmd/extVolume.go index 58a9eef67..02ccc372d 100644 --- a/kubectl-pg/cmd/extVolume.go +++ b/kubectl-pg/cmd/extVolume.go @@ -67,7 +67,7 @@ func extVolume(increasedVolumeSize string, clusterName string) { namespace := getCurrentNamespace() postgresql, err := postgresConfig.Postgresqls(namespace).Get(context.TODO(), clusterName, metav1.GetOptions{}) if err != nil { - log.Fatalf("hii %v", err) + log.Fatal(err) } oldSize, err := resource.ParseQuantity(postgresql.Spec.Volume.Size) diff --git a/kubectl-pg/cmd/scale.go b/kubectl-pg/cmd/scale.go index 5e8848de6..0a7bdc60f 100644 --- a/kubectl-pg/cmd/scale.go +++ b/kubectl-pg/cmd/scale.go @@ -31,7 +31,6 @@ import ( "github.com/spf13/cobra" PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1" - v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" @@ -46,6 +45,9 @@ var scaleCmd = &cobra.Command{ Scaling to 0 leads to down time.`, Run: func(cmd *cobra.Command, args []string) { clusterName, err := cmd.Flags().GetString("cluster") + if err != nil { + log.Fatal(err) + } namespace, err := cmd.Flags().GetString("namespace") if err != nil { log.Fatal(err) @@ -129,8 +131,7 @@ func allowedMinMaxInstances(config *rest.Config) (int32, int32) { log.Fatal(err) } - var operator *v1.Deployment - operator = getPostgresOperator(k8sClient) + operator := getPostgresOperator(k8sClient) operatorContainer := operator.Spec.Template.Spec.Containers var configMapName, operatorConfigName string diff --git a/kubectl-pg/cmd/update.go b/kubectl-pg/cmd/update.go index 604d613fc..6a5f4e36d 100644 --- a/kubectl-pg/cmd/update.go +++ b/kubectl-pg/cmd/update.go @@ -57,6 +57,9 @@ kubectl pg update -f cluster-manifest.yaml func updatePgResources(fileName string) { config := getConfig() postgresConfig, err := PostgresqlLister.NewForConfig(config) + if err != nil { + log.Fatal(err) + } ymlFile, err := ioutil.ReadFile(fileName) if err != nil { log.Fatal(err) diff --git a/kubectl-pg/cmd/util.go b/kubectl-pg/cmd/util.go index a6bc10296..fa0eb6d42 100644 --- a/kubectl-pg/cmd/util.go +++ b/kubectl-pg/cmd/util.go @@ -99,9 +99,9 @@ func confirmAction(clusterName string, namespace string) { func getPodName(clusterName string, master bool, replicaNumber string) string { config := getConfig() - client, er := kubernetes.NewForConfig(config) - if er != nil { - log.Fatal(er) + client, err := kubernetes.NewForConfig(config) + if err != nil { + log.Fatal(err) } postgresConfig, err := PostgresqlLister.NewForConfig(config) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 512f6a6c9..273eb2932 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -1164,7 +1164,7 @@ func (c *Cluster) initHumanUsers() error { for _, superuserTeam := range superuserTeams { err := c.initTeamMembers(superuserTeam, true) if err != nil { - return fmt.Errorf("Cannot initialize members for team %q of Postgres superusers: %v", superuserTeam, err) + return fmt.Errorf("cannot initialize members for team %q of Postgres superusers: %v", superuserTeam, err) } if superuserTeam == c.Spec.TeamID { clusterIsOwnedBySuperuserTeam = true @@ -1177,7 +1177,7 @@ func (c *Cluster) initHumanUsers() error { if !(util.SliceContains(superuserTeams, additionalTeam)) { err := c.initTeamMembers(additionalTeam, false) if err != nil { - return fmt.Errorf("Cannot initialize members for additional team %q for cluster owned by %q: %v", additionalTeam, c.Spec.TeamID, err) + return fmt.Errorf("cannot initialize members for additional team %q for cluster owned by %q: %v", additionalTeam, c.Spec.TeamID, err) } } } @@ -1190,7 +1190,7 @@ func (c *Cluster) initHumanUsers() error { err := c.initTeamMembers(c.Spec.TeamID, false) if err != nil { - return fmt.Errorf("Cannot initialize members for team %q who owns the Postgres cluster: %v", c.Spec.TeamID, err) + return fmt.Errorf("cannot initialize members for team %q who owns the Postgres cluster: %v", c.Spec.TeamID, err) } return nil diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 4e8af610d..f579b446e 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -420,9 +420,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Clean up the deployment object. If deployment resource we've remembered // is somehow empty, try to delete based on what would we generate - var deployment *appsv1.Deployment - deployment = c.ConnectionPooler[role].Deployment - + deployment := c.ConnectionPooler[role].Deployment policy := metav1.DeletePropagationForeground options := metav1.DeleteOptions{PropagationPolicy: &policy} @@ -445,8 +443,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { } // Repeat the same for the service object - var service *v1.Service - service = c.ConnectionPooler[role].Service + service := c.ConnectionPooler[role].Service if service == nil { c.logger.Debugf("no connection pooler service object to delete") } else { diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index ace665fb0..9e4b045ab 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -213,10 +213,10 @@ PatroniInitDBParams: for _, k := range initdbOptionNames { v := patroni.InitDB[k] for i, defaultParam := range config.Bootstrap.Initdb { - switch defaultParam.(type) { + switch t := defaultParam.(type) { case map[string]string: { - for k1 := range defaultParam.(map[string]string) { + for k1 := range t { if k1 == k { (config.Bootstrap.Initdb[i]).(map[string]string)[k] = v continue PatroniInitDBParams @@ -226,7 +226,7 @@ PatroniInitDBParams: case string: { /* if the option already occurs in the list */ - if defaultParam.(string) == v { + if t == v { continue PatroniInitDBParams } } @@ -264,7 +264,7 @@ PatroniInitDBParams: if patroni.SynchronousMode { config.Bootstrap.DCS.SynchronousMode = patroni.SynchronousMode } - if patroni.SynchronousModeStrict != false { + if patroni.SynchronousModeStrict { config.Bootstrap.DCS.SynchronousModeStrict = patroni.SynchronousModeStrict } @@ -336,7 +336,7 @@ func nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAff if len(nodeReadinessLabel) == 0 && nodeAffinity == nil { return nil } - nodeAffinityCopy := *&v1.NodeAffinity{} + nodeAffinityCopy := v1.NodeAffinity{} if nodeAffinity != nil { nodeAffinityCopy = *nodeAffinity.DeepCopy() } @@ -1279,15 +1279,12 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy) } - stsAnnotations := make(map[string]string) - stsAnnotations = c.AnnotationsToPropagate(c.annotationsSet(nil)) - statefulSet := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: c.statefulSetName(), Namespace: c.Namespace, Labels: c.labelsSet(true), - Annotations: stsAnnotations, + Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)), }, Spec: appsv1.StatefulSetSpec{ Replicas: &numberOfInstances, diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index cf0441f98..29908c0e5 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" @@ -24,9 +25,21 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes/fake" v1core "k8s.io/client-go/kubernetes/typed/core/v1" ) +func newFakeK8sTestClient() (k8sutil.KubernetesClient, *fake.Clientset) { + acidClientSet := fakeacidv1.NewSimpleClientset() + clientSet := fake.NewSimpleClientset() + + return k8sutil.KubernetesClient{ + PodsGetter: clientSet.CoreV1(), + PostgresqlsGetter: acidClientSet.AcidV1(), + StatefulSetsGetter: clientSet.AppsV1(), + }, clientSet +} + // For testing purposes type ExpectedValue struct { envIndex int @@ -930,15 +943,6 @@ func TestNodeAffinity(t *testing.T) { assert.Equal(t, s.Spec.Template.Spec.Affinity.NodeAffinity, nodeAff, "cluster template has correct node affinity") } -func testCustomPodTemplate(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { - if podSpec.ObjectMeta.Name != "test-pod-template" { - return fmt.Errorf("Custom pod template is not used, current spec %+v", - podSpec) - } - - return nil -} - func testDeploymentOwnerReference(cluster *Cluster, deployment *appsv1.Deployment) error { owner := deployment.ObjectMeta.OwnerReferences[0] @@ -962,16 +966,23 @@ func testServiceOwnerReference(cluster *Cluster, service *v1.Service, role Postg } func TestTLS(t *testing.T) { - var err error - var spec acidv1.PostgresSpec - var cluster *Cluster - var spiloRunAsUser = int64(101) - var spiloRunAsGroup = int64(103) - var spiloFSGroup = int64(103) - var additionalVolumes = spec.AdditionalVolumes - makeSpec := func(tls acidv1.TLSDescription) acidv1.PostgresSpec { - return acidv1.PostgresSpec{ + client, _ := newFakeK8sTestClient() + clusterName := "acid-test-cluster" + namespace := "default" + tlsSecretName := "my-secret" + spiloRunAsUser := int64(101) + spiloRunAsGroup := int64(103) + spiloFSGroup := int64(103) + defaultMode := int32(0640) + mountPath := "/tls" + + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ TeamID: "myapp", NumberOfInstances: 1, Resources: acidv1.Resources{ ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, @@ -980,11 +991,24 @@ func TestTLS(t *testing.T) { Volume: acidv1.Volume{ Size: "1G", }, - TLS: &tls, - } + TLS: &acidv1.TLSDescription{ + SecretName: tlsSecretName, CAFile: "ca.crt"}, + AdditionalVolumes: []acidv1.AdditionalVolume{ + acidv1.AdditionalVolume{ + Name: tlsSecretName, + MountPath: mountPath, + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: tlsSecretName, + DefaultMode: &defaultMode, + }, + }, + }, + }, + }, } - cluster = New( + var cluster = New( Config{ OpConfig: config.Config{ PodManagementPolicy: "ordered_ready", @@ -999,28 +1023,14 @@ func TestTLS(t *testing.T) { SpiloFSGroup: &spiloFSGroup, }, }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - spec = makeSpec(acidv1.TLSDescription{SecretName: "my-secret", CAFile: "ca.crt"}) - s, err := cluster.generateStatefulSet(&spec) - if err != nil { - assert.NoError(t, err) - } + }, client, pg, logger, eventRecorder) + + // create a statefulset + sts, err := cluster.createStatefulSet() + assert.NoError(t, err) fsGroup := int64(103) - assert.Equal(t, &fsGroup, s.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned") - - defaultMode := int32(0640) - mountPath := "/tls" - additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{ - Name: spec.TLS.SecretName, - MountPath: mountPath, - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{ - SecretName: spec.TLS.SecretName, - DefaultMode: &defaultMode, - }, - }, - }) + assert.Equal(t, &fsGroup, sts.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned") volume := v1.Volume{ Name: "my-secret", @@ -1031,16 +1041,16 @@ func TestTLS(t *testing.T) { }, }, } - assert.Contains(t, s.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume") + assert.Contains(t, sts.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume") - assert.Contains(t, s.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ + assert.Contains(t, sts.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ MountPath: "/tls", Name: "my-secret", }, "the volume gets mounted in /tls") - assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"}) - assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"}) - assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"}) + assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"}) + assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"}) + assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"}) } func TestAdditionalVolume(t *testing.T) { diff --git a/pkg/cluster/majorversionupgrade.go b/pkg/cluster/majorversionupgrade.go index 06dd979b2..c997a675a 100644 --- a/pkg/cluster/majorversionupgrade.go +++ b/pkg/cluster/majorversionupgrade.go @@ -19,8 +19,8 @@ var VersionMap = map[string]int{ // IsBiggerPostgresVersion Compare two Postgres version numbers func IsBiggerPostgresVersion(old string, new string) bool { - oldN, _ := VersionMap[old] - newN, _ := VersionMap[new] + oldN := VersionMap[old] + newN := VersionMap[new] return newN > oldN } diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 3f08cfb4d..3036a9942 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -11,7 +11,6 @@ import ( "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" - appsv1 "k8s.io/api/apps/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/api/core/v1" policybeta1 "k8s.io/api/policy/v1beta1" @@ -260,28 +259,6 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error { return nil } -func (c *Cluster) mustUpdatePodsAfterLazyUpdate(desiredSset *appsv1.StatefulSet) (bool, error) { - - pods, err := c.listPods() - if err != nil { - return false, fmt.Errorf("could not list pods of the statefulset: %v", err) - } - - for _, pod := range pods { - - effectivePodImage := pod.Spec.Containers[0].Image - ssImage := desiredSset.Spec.Template.Spec.Containers[0].Image - - if ssImage != effectivePodImage { - c.logger.Infof("not all pods were re-started when the lazy upgrade was enabled; forcing the rolling upgrade now") - return true, nil - } - - } - - return false, nil -} - func (c *Cluster) syncStatefulSet() error { podsToRecreate := make([]v1.Pod, 0) diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index fa8a52a1b..fabc6b216 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -227,11 +227,6 @@ func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isU } } -func (c *Cluster) logVolumeChanges(old, new acidv1.Volume) { - c.logger.Infof("volume specification has been changed") - logNiceDiff(c.logger, old, new) -} - func (c *Cluster) getTeamMembers(teamID string) ([]string, error) { if teamID == "" { @@ -251,9 +246,7 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) { } } - for _, member := range additionalMembers { - members = append(members, member) - } + members = append(members, additionalMembers...) } if !c.OpConfig.EnableTeamsAPI { @@ -292,12 +285,10 @@ func (c *Cluster) annotationsSet(annotations map[string]string) map[string]strin pgCRDAnnotations := c.ObjectMeta.Annotations // allow to inherit certain labels from the 'postgres' object - if pgCRDAnnotations != nil { - for k, v := range pgCRDAnnotations { - for _, match := range c.OpConfig.InheritedAnnotations { - if k == match { - annotations[k] = v - } + for k, v := range pgCRDAnnotations { + for _, match := range c.OpConfig.InheritedAnnotations { + if k == match { + annotations[k] = v } } } diff --git a/pkg/cluster/volumes.go b/pkg/cluster/volumes.go index e07d453ec..9a41f5f05 100644 --- a/pkg/cluster/volumes.go +++ b/pkg/cluster/volumes.go @@ -74,10 +74,15 @@ func (c *Cluster) syncVolumes() error { func (c *Cluster) syncUnderlyingEBSVolume() error { c.logger.Infof("starting to sync EBS volumes: type, iops, throughput, and size") - var err error + var ( + err error + newSize resource.Quantity + ) targetValue := c.Spec.Volume - newSize, err := resource.ParseQuantity(targetValue.Size) + if newSize, err = resource.ParseQuantity(targetValue.Size); err != nil { + return fmt.Errorf("could not parse volume size: %v", err) + } targetSize := quantityToGigabyte(newSize) awsGp3 := aws.String("gp3") diff --git a/pkg/cluster/volumes_test.go b/pkg/cluster/volumes_test.go index aea7711af..204ea8aab 100644 --- a/pkg/cluster/volumes_test.go +++ b/pkg/cluster/volumes_test.go @@ -24,6 +24,20 @@ import ( "k8s.io/client-go/kubernetes/fake" ) +type testVolume struct { + size int64 + iops int64 + throughtput int64 + volType string +} + +var testVol = testVolume{ + size: 100, + iops: 300, + throughtput: 125, + volType: "gp2", +} + func newFakeK8sPVCclient() (k8sutil.KubernetesClient, *fake.Clientset) { clientSet := fake.NewSimpleClientset() @@ -189,14 +203,7 @@ func TestMigrateEBS(t *testing.T) { cluster.Namespace = namespace filterLabels := cluster.labelsSet(false) - testVolumes := []testVolume{ - { - size: 100, - }, - { - size: 100, - }, - } + testVolumes := []testVolume{testVol, testVol} initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes) @@ -220,13 +227,6 @@ func TestMigrateEBS(t *testing.T) { cluster.executeEBSMigration() } -type testVolume struct { - iops int64 - throughtput int64 - size int64 - volType string -} - func initTestVolumesAndPods(client k8sutil.KubernetesClient, namespace, clustername string, labels labels.Set, volumes []testVolume) { i := 0 for _, v := range volumes { @@ -305,17 +305,7 @@ func TestMigrateGp3Support(t *testing.T) { cluster.Namespace = namespace filterLabels := cluster.labelsSet(false) - testVolumes := []testVolume{ - { - size: 100, - }, - { - size: 100, - }, - { - size: 100, - }, - } + testVolumes := []testVolume{testVol, testVol, testVol} initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes) @@ -371,14 +361,7 @@ func TestManualGp2Gp3Support(t *testing.T) { cluster.Namespace = namespace filterLabels := cluster.labelsSet(false) - testVolumes := []testVolume{ - { - size: 100, - }, - { - size: 100, - }, - } + testVolumes := []testVolume{testVol, testVol} initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes)