Merge branch 'master' into rename-removed-roles

This commit is contained in:
Felix Kunde 2021-05-11 09:48:49 +02:00
commit 635c81e70f
19 changed files with 246 additions and 183 deletions

View File

@ -1,3 +1,3 @@
kubernetes==11.0.0
timeout_decorator==0.4.1
pyyaml==5.3.1
pyyaml==5.4.1

View File

@ -71,7 +71,7 @@ func addDb(dbName string, dbOwner string, clusterName string) {
var dbOwnerExists bool
dbUsers := postgresql.Spec.Users
for key, _ := range dbUsers {
for key := range dbUsers {
if key == dbOwner {
dbOwnerExists = true
}

View File

@ -23,13 +23,14 @@ THE SOFTWARE.
package cmd
import (
"log"
"os"
user "os/user"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
"log"
"os"
user "os/user"
)
// connectCmd represents the kubectl pg connect command
@ -80,13 +81,13 @@ kubectl pg connect -c cluster -p -u user01 -d db01
func connect(clusterName string, master bool, replica string, psql bool, user string, dbName string) {
config := getConfig()
client, er := kubernetes.NewForConfig(config)
if er != nil {
log.Fatal(er)
client, err := kubernetes.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
podName := getPodName(clusterName, master, replica)
execRequest := &rest.Request{}
var execRequest *rest.Request
if psql {
execRequest = client.CoreV1().RESTClient().Post().Resource("pods").

View File

@ -53,6 +53,9 @@ kubectl pg create -f cluster-manifest.yaml
func create(fileName string) {
config := getConfig()
postgresConfig, err := PostgresqlLister.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
ymlFile, err := ioutil.ReadFile(fileName)
if err != nil {
log.Fatal(err)

View File

@ -67,7 +67,7 @@ func extVolume(increasedVolumeSize string, clusterName string) {
namespace := getCurrentNamespace()
postgresql, err := postgresConfig.Postgresqls(namespace).Get(context.TODO(), clusterName, metav1.GetOptions{})
if err != nil {
log.Fatalf("hii %v", err)
log.Fatal(err)
}
oldSize, err := resource.ParseQuantity(postgresql.Spec.Volume.Size)

View File

@ -31,7 +31,6 @@ import (
"github.com/spf13/cobra"
PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
@ -46,6 +45,9 @@ var scaleCmd = &cobra.Command{
Scaling to 0 leads to down time.`,
Run: func(cmd *cobra.Command, args []string) {
clusterName, err := cmd.Flags().GetString("cluster")
if err != nil {
log.Fatal(err)
}
namespace, err := cmd.Flags().GetString("namespace")
if err != nil {
log.Fatal(err)
@ -129,8 +131,7 @@ func allowedMinMaxInstances(config *rest.Config) (int32, int32) {
log.Fatal(err)
}
var operator *v1.Deployment
operator = getPostgresOperator(k8sClient)
operator := getPostgresOperator(k8sClient)
operatorContainer := operator.Spec.Template.Spec.Containers
var configMapName, operatorConfigName string

View File

@ -57,6 +57,9 @@ kubectl pg update -f cluster-manifest.yaml
func updatePgResources(fileName string) {
config := getConfig()
postgresConfig, err := PostgresqlLister.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
ymlFile, err := ioutil.ReadFile(fileName)
if err != nil {
log.Fatal(err)

View File

@ -99,9 +99,9 @@ func confirmAction(clusterName string, namespace string) {
func getPodName(clusterName string, master bool, replicaNumber string) string {
config := getConfig()
client, er := kubernetes.NewForConfig(config)
if er != nil {
log.Fatal(er)
client, err := kubernetes.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
postgresConfig, err := PostgresqlLister.NewForConfig(config)

View File

@ -372,6 +372,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
}
if !reflect.DeepEqual(c.Statefulset.Annotations, statefulSet.Annotations) {
match = false
needsReplace = true
reasons = append(reasons, "new statefulset's annotations do not match the current one")
}
@ -456,6 +457,11 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
}
}
if len(c.Statefulset.Spec.Template.Spec.Volumes) != len(statefulSet.Spec.Template.Spec.Volumes) {
needsReplace = true
reasons = append(reasons, fmt.Sprintf("new statefulset's Volumes contains different number of volumes to the old one"))
}
// we assume any change in priority happens by rolling out a new priority class
// changing the priority value in an existing class is not supproted
if c.Statefulset.Spec.Template.Spec.PriorityClassName != statefulSet.Spec.Template.Spec.PriorityClassName {
@ -513,6 +519,8 @@ func (c *Cluster) compareContainers(description string, setA, setB []v1.Containe
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }),
newCheck("new statefulset %s's %s (index %d) security context does not match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.SecurityContext, b.SecurityContext) }),
newCheck("new statefulset %s's %s (index %d) volume mounts do not match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.VolumeMounts, b.VolumeMounts) }),
}
if !c.OpConfig.EnableLazySpiloUpgrade {
@ -609,7 +617,7 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
// for a cluster that had no such job before. In this case a missing job is not an error.
func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
updateFailed := false
syncStatetfulSet := false
syncStatefulSet := false
c.mu.Lock()
defer c.mu.Unlock()
@ -630,7 +638,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
if IsBiggerPostgresVersion(oldSpec.Spec.PostgresqlParam.PgVersion, c.GetDesiredMajorVersion()) {
c.logger.Infof("postgresql version increased (%s -> %s), depending on config manual upgrade needed",
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
syncStatetfulSet = true
syncStatefulSet = true
} else {
c.logger.Infof("postgresql major version unchanged or smaller, no changes needed")
// sticking with old version, this will also advance GetDesiredVersion next time.
@ -699,9 +707,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
updateFailed = true
return
}
if syncStatetfulSet || !reflect.DeepEqual(oldSs, newSs) || !reflect.DeepEqual(oldSpec.Annotations, newSpec.Annotations) {
if syncStatefulSet || !reflect.DeepEqual(oldSs, newSs) || !reflect.DeepEqual(oldSpec.Annotations, newSpec.Annotations) {
c.logger.Debugf("syncing statefulsets")
syncStatetfulSet = false
syncStatefulSet = false
// TODO: avoid generating the StatefulSet object twice by passing it to syncStatefulSet
if err := c.syncStatefulSet(); err != nil {
c.logger.Errorf("could not sync statefulsets: %v", err)

View File

@ -420,9 +420,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
// Clean up the deployment object. If deployment resource we've remembered
// is somehow empty, try to delete based on what would we generate
var deployment *appsv1.Deployment
deployment = c.ConnectionPooler[role].Deployment
deployment := c.ConnectionPooler[role].Deployment
policy := metav1.DeletePropagationForeground
options := metav1.DeleteOptions{PropagationPolicy: &policy}
@ -445,8 +443,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
}
// Repeat the same for the service object
var service *v1.Service
service = c.ConnectionPooler[role].Service
service := c.ConnectionPooler[role].Service
if service == nil {
c.logger.Debugf("no connection pooler service object to delete")
} else {

View File

@ -213,10 +213,10 @@ PatroniInitDBParams:
for _, k := range initdbOptionNames {
v := patroni.InitDB[k]
for i, defaultParam := range config.Bootstrap.Initdb {
switch defaultParam.(type) {
switch t := defaultParam.(type) {
case map[string]string:
{
for k1 := range defaultParam.(map[string]string) {
for k1 := range t {
if k1 == k {
(config.Bootstrap.Initdb[i]).(map[string]string)[k] = v
continue PatroniInitDBParams
@ -226,7 +226,7 @@ PatroniInitDBParams:
case string:
{
/* if the option already occurs in the list */
if defaultParam.(string) == v {
if t == v {
continue PatroniInitDBParams
}
}
@ -264,7 +264,7 @@ PatroniInitDBParams:
if patroni.SynchronousMode {
config.Bootstrap.DCS.SynchronousMode = patroni.SynchronousMode
}
if patroni.SynchronousModeStrict != false {
if patroni.SynchronousModeStrict {
config.Bootstrap.DCS.SynchronousModeStrict = patroni.SynchronousModeStrict
}
@ -336,7 +336,7 @@ func nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAff
if len(nodeReadinessLabel) == 0 && nodeAffinity == nil {
return nil
}
nodeAffinityCopy := *&v1.NodeAffinity{}
nodeAffinityCopy := v1.NodeAffinity{}
if nodeAffinity != nil {
nodeAffinityCopy = *nodeAffinity.DeepCopy()
}
@ -1279,15 +1279,12 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
}
stsAnnotations := make(map[string]string)
stsAnnotations = c.AnnotationsToPropagate(c.annotationsSet(nil))
statefulSet := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: c.statefulSetName(),
Namespace: c.Namespace,
Labels: c.labelsSet(true),
Annotations: stsAnnotations,
Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)),
},
Spec: appsv1.StatefulSetSpec{
Replicas: &numberOfInstances,

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/assert"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
"github.com/zalando/postgres-operator/pkg/spec"
"github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/config"
@ -24,9 +25,21 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes/fake"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
)
func newFakeK8sTestClient() (k8sutil.KubernetesClient, *fake.Clientset) {
acidClientSet := fakeacidv1.NewSimpleClientset()
clientSet := fake.NewSimpleClientset()
return k8sutil.KubernetesClient{
PodsGetter: clientSet.CoreV1(),
PostgresqlsGetter: acidClientSet.AcidV1(),
StatefulSetsGetter: clientSet.AppsV1(),
}, clientSet
}
// For testing purposes
type ExpectedValue struct {
envIndex int
@ -930,15 +943,6 @@ func TestNodeAffinity(t *testing.T) {
assert.Equal(t, s.Spec.Template.Spec.Affinity.NodeAffinity, nodeAff, "cluster template has correct node affinity")
}
func testCustomPodTemplate(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
if podSpec.ObjectMeta.Name != "test-pod-template" {
return fmt.Errorf("Custom pod template is not used, current spec %+v",
podSpec)
}
return nil
}
func testDeploymentOwnerReference(cluster *Cluster, deployment *appsv1.Deployment) error {
owner := deployment.ObjectMeta.OwnerReferences[0]
@ -962,16 +966,23 @@ func testServiceOwnerReference(cluster *Cluster, service *v1.Service, role Postg
}
func TestTLS(t *testing.T) {
var err error
var spec acidv1.PostgresSpec
var cluster *Cluster
var spiloRunAsUser = int64(101)
var spiloRunAsGroup = int64(103)
var spiloFSGroup = int64(103)
var additionalVolumes = spec.AdditionalVolumes
makeSpec := func(tls acidv1.TLSDescription) acidv1.PostgresSpec {
return acidv1.PostgresSpec{
client, _ := newFakeK8sTestClient()
clusterName := "acid-test-cluster"
namespace := "default"
tlsSecretName := "my-secret"
spiloRunAsUser := int64(101)
spiloRunAsGroup := int64(103)
spiloFSGroup := int64(103)
defaultMode := int32(0640)
mountPath := "/tls"
pg := acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
TeamID: "myapp", NumberOfInstances: 1,
Resources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
@ -980,11 +991,24 @@ func TestTLS(t *testing.T) {
Volume: acidv1.Volume{
Size: "1G",
},
TLS: &tls,
}
TLS: &acidv1.TLSDescription{
SecretName: tlsSecretName, CAFile: "ca.crt"},
AdditionalVolumes: []acidv1.AdditionalVolume{
acidv1.AdditionalVolume{
Name: tlsSecretName,
MountPath: mountPath,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: tlsSecretName,
DefaultMode: &defaultMode,
},
},
},
},
},
}
cluster = New(
var cluster = New(
Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
@ -999,28 +1023,14 @@ func TestTLS(t *testing.T) {
SpiloFSGroup: &spiloFSGroup,
},
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
spec = makeSpec(acidv1.TLSDescription{SecretName: "my-secret", CAFile: "ca.crt"})
s, err := cluster.generateStatefulSet(&spec)
if err != nil {
assert.NoError(t, err)
}
}, client, pg, logger, eventRecorder)
// create a statefulset
sts, err := cluster.createStatefulSet()
assert.NoError(t, err)
fsGroup := int64(103)
assert.Equal(t, &fsGroup, s.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned")
defaultMode := int32(0640)
mountPath := "/tls"
additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{
Name: spec.TLS.SecretName,
MountPath: mountPath,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: spec.TLS.SecretName,
DefaultMode: &defaultMode,
},
},
})
assert.Equal(t, &fsGroup, sts.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned")
volume := v1.Volume{
Name: "my-secret",
@ -1031,16 +1041,16 @@ func TestTLS(t *testing.T) {
},
},
}
assert.Contains(t, s.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume")
assert.Contains(t, sts.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume")
assert.Contains(t, s.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
MountPath: "/tls",
Name: "my-secret",
}, "the volume gets mounted in /tls")
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"})
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"})
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"})
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"})
}
func TestAdditionalVolume(t *testing.T) {

View File

@ -19,8 +19,8 @@ var VersionMap = map[string]int{
// IsBiggerPostgresVersion Compare two Postgres version numbers
func IsBiggerPostgresVersion(old string, new string) bool {
oldN, _ := VersionMap[old]
newN, _ := VersionMap[new]
oldN := VersionMap[old]
newN := VersionMap[new]
return newN > oldN
}

View File

@ -147,25 +147,6 @@ func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error {
return nil
}
func (c *Cluster) updateStatefulSetAnnotations(annotations map[string]string) (*appsv1.StatefulSet, error) {
c.logger.Debugf("patching statefulset annotations")
patchData, err := metaAnnotationsPatch(annotations)
if err != nil {
return nil, fmt.Errorf("could not form patch for the statefulset metadata: %v", err)
}
result, err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Patch(
context.TODO(),
c.Statefulset.Name,
types.MergePatchType,
[]byte(patchData),
metav1.PatchOptions{},
"")
if err != nil {
return nil, fmt.Errorf("could not patch statefulset annotations %q: %v", patchData, err)
}
return result, nil
}
func (c *Cluster) updateStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
c.setProcessName("updating statefulset")
if c.Statefulset == nil {
@ -197,13 +178,6 @@ func (c *Cluster) updateStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
return fmt.Errorf("could not patch statefulset spec %q: %v", statefulSetName, err)
}
if newStatefulSet.Annotations != nil {
statefulSet, err = c.updateStatefulSetAnnotations(newStatefulSet.Annotations)
if err != nil {
return err
}
}
c.Statefulset = statefulSet
return nil

View File

@ -11,7 +11,6 @@ import (
"github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/constants"
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
appsv1 "k8s.io/api/apps/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
v1 "k8s.io/api/core/v1"
policybeta1 "k8s.io/api/policy/v1beta1"
@ -260,28 +259,6 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
return nil
}
func (c *Cluster) mustUpdatePodsAfterLazyUpdate(desiredSset *appsv1.StatefulSet) (bool, error) {
pods, err := c.listPods()
if err != nil {
return false, fmt.Errorf("could not list pods of the statefulset: %v", err)
}
for _, pod := range pods {
effectivePodImage := pod.Spec.Containers[0].Image
ssImage := desiredSset.Spec.Template.Spec.Containers[0].Image
if ssImage != effectivePodImage {
c.logger.Infof("not all pods were re-started when the lazy upgrade was enabled; forcing the rolling upgrade now")
return true, nil
}
}
return false, nil
}
func (c *Cluster) syncStatefulSet() error {
podsToRecreate := make([]v1.Pod, 0)
@ -373,8 +350,6 @@ func (c *Cluster) syncStatefulSet() error {
}
}
c.updateStatefulSetAnnotations(c.AnnotationsToPropagate(c.annotationsSet(c.Statefulset.Annotations)))
if len(podsToRecreate) == 0 && !c.OpConfig.EnableLazySpiloUpgrade {
// even if the desired and the running statefulsets match
// there still may be not up-to-date pods on condition

115
pkg/cluster/sync_test.go Normal file
View File

@ -0,0 +1,115 @@
package cluster
import (
"testing"
"time"
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/stretchr/testify/assert"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
"github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
"k8s.io/client-go/kubernetes/fake"
)
func newFakeK8sSyncClient() (k8sutil.KubernetesClient, *fake.Clientset) {
acidClientSet := fakeacidv1.NewSimpleClientset()
clientSet := fake.NewSimpleClientset()
return k8sutil.KubernetesClient{
PodsGetter: clientSet.CoreV1(),
PostgresqlsGetter: acidClientSet.AcidV1(),
StatefulSetsGetter: clientSet.AppsV1(),
}, clientSet
}
func TestSyncStatefulSetsAnnotations(t *testing.T) {
testName := "test syncing statefulsets annotations"
client, _ := newFakeK8sSyncClient()
clusterName := "acid-test-cluster"
namespace := "default"
inheritedAnnotation := "environment"
pg := acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
Annotations: map[string]string{inheritedAnnotation: "test"},
},
Spec: acidv1.PostgresSpec{
Volume: acidv1.Volume{
Size: "1Gi",
},
},
}
var cluster = New(
Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: "cluster-name",
DefaultCPURequest: "300m",
DefaultCPULimit: "300m",
DefaultMemoryRequest: "300Mi",
DefaultMemoryLimit: "300Mi",
InheritedAnnotations: []string{inheritedAnnotation},
PodRoleLabel: "spilo-role",
ResourceCheckInterval: time.Duration(3),
ResourceCheckTimeout: time.Duration(10),
},
},
}, client, pg, logger, eventRecorder)
cluster.Name = clusterName
cluster.Namespace = namespace
// create a statefulset
_, err := cluster.createStatefulSet()
assert.NoError(t, err)
// patch statefulset and add annotation
patchData, err := metaAnnotationsPatch(map[string]string{"test-anno": "true"})
assert.NoError(t, err)
newSts, err := cluster.KubeClient.StatefulSets(namespace).Patch(
context.TODO(),
clusterName,
types.MergePatchType,
[]byte(patchData),
metav1.PatchOptions{},
"")
assert.NoError(t, err)
cluster.Statefulset = newSts
// first compare running with desired statefulset - they should not match
// because no inherited annotations or downscaler annotations are configured
desiredSts, err := cluster.generateStatefulSet(&cluster.Postgresql.Spec)
assert.NoError(t, err)
cmp := cluster.compareStatefulSetWith(desiredSts)
if cmp.match {
t.Errorf("%s: match between current and desired statefulsets albeit differences: %#v", testName, cmp)
}
// now sync statefulset - the diff will trigger a replacement of the statefulset
cluster.syncStatefulSet()
// compare again after the SYNC - must be identical to the desired state
cmp = cluster.compareStatefulSetWith(desiredSts)
if !cmp.match {
t.Errorf("%s: current and desired statefulsets are not matching %#v", testName, cmp)
}
// check if inherited annotation exists
if _, exists := desiredSts.Annotations[inheritedAnnotation]; !exists {
t.Errorf("%s: inherited annotation not found in desired statefulset: %#v", testName, desiredSts.Annotations)
}
}

View File

@ -227,11 +227,6 @@ func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isU
}
}
func (c *Cluster) logVolumeChanges(old, new acidv1.Volume) {
c.logger.Infof("volume specification has been changed")
logNiceDiff(c.logger, old, new)
}
func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
if teamID == "" {
@ -251,9 +246,7 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
}
}
for _, member := range additionalMembers {
members = append(members, member)
}
members = append(members, additionalMembers...)
}
if !c.OpConfig.EnableTeamsAPI {
@ -292,12 +285,10 @@ func (c *Cluster) annotationsSet(annotations map[string]string) map[string]strin
pgCRDAnnotations := c.ObjectMeta.Annotations
// allow to inherit certain labels from the 'postgres' object
if pgCRDAnnotations != nil {
for k, v := range pgCRDAnnotations {
for _, match := range c.OpConfig.InheritedAnnotations {
if k == match {
annotations[k] = v
}
for k, v := range pgCRDAnnotations {
for _, match := range c.OpConfig.InheritedAnnotations {
if k == match {
annotations[k] = v
}
}
}

View File

@ -74,10 +74,15 @@ func (c *Cluster) syncVolumes() error {
func (c *Cluster) syncUnderlyingEBSVolume() error {
c.logger.Infof("starting to sync EBS volumes: type, iops, throughput, and size")
var err error
var (
err error
newSize resource.Quantity
)
targetValue := c.Spec.Volume
newSize, err := resource.ParseQuantity(targetValue.Size)
if newSize, err = resource.ParseQuantity(targetValue.Size); err != nil {
return fmt.Errorf("could not parse volume size: %v", err)
}
targetSize := quantityToGigabyte(newSize)
awsGp3 := aws.String("gp3")

View File

@ -24,6 +24,20 @@ import (
"k8s.io/client-go/kubernetes/fake"
)
type testVolume struct {
size int64
iops int64
throughtput int64
volType string
}
var testVol = testVolume{
size: 100,
iops: 300,
throughtput: 125,
volType: "gp2",
}
func newFakeK8sPVCclient() (k8sutil.KubernetesClient, *fake.Clientset) {
clientSet := fake.NewSimpleClientset()
@ -189,14 +203,7 @@ func TestMigrateEBS(t *testing.T) {
cluster.Namespace = namespace
filterLabels := cluster.labelsSet(false)
testVolumes := []testVolume{
{
size: 100,
},
{
size: 100,
},
}
testVolumes := []testVolume{testVol, testVol}
initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes)
@ -220,13 +227,6 @@ func TestMigrateEBS(t *testing.T) {
cluster.executeEBSMigration()
}
type testVolume struct {
iops int64
throughtput int64
size int64
volType string
}
func initTestVolumesAndPods(client k8sutil.KubernetesClient, namespace, clustername string, labels labels.Set, volumes []testVolume) {
i := 0
for _, v := range volumes {
@ -305,17 +305,7 @@ func TestMigrateGp3Support(t *testing.T) {
cluster.Namespace = namespace
filterLabels := cluster.labelsSet(false)
testVolumes := []testVolume{
{
size: 100,
},
{
size: 100,
},
{
size: 100,
},
}
testVolumes := []testVolume{testVol, testVol, testVol}
initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes)
@ -371,14 +361,7 @@ func TestManualGp2Gp3Support(t *testing.T) {
cluster.Namespace = namespace
filterLabels := cluster.labelsSet(false)
testVolumes := []testVolume{
{
size: 100,
},
{
size: 100,
},
}
testVolumes := []testVolume{testVol, testVol}
initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes)