check resize mode on update events (#1194)

* check resize mode on update events

* add unit test for PVC resizing

* set resize mode to pvc in charts and manifests

* add test for quantityToGigabyte

* just one debug line for syncing volumes

* extend test and update log msg
This commit is contained in:
Felix Kunde 2020-11-11 13:22:43 +01:00 committed by GitHub
parent e779eab22f
commit 3fed565328
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 199 additions and 13 deletions

View File

@ -105,6 +105,10 @@ rules:
- delete
- get
- list
{{- if toString .Values.configKubernetes.storage_resize_mode | eq "pvc" }}
- patch
- update
{{- end }}
# to read existing PVs. Creation should be done via dynamic provisioning
- apiGroups:
- ""
@ -113,7 +117,9 @@ rules:
verbs:
- get
- list
{{- if toString .Values.configKubernetes.storage_resize_mode | eq "ebs" }}
- update # only for resizing AWS volumes
{{- end }}
# to watch Spilo pods and do rolling updates. Creation via StatefulSet
- apiGroups:
- ""

View File

@ -136,7 +136,7 @@ configKubernetes:
# whether the Spilo container should run in privileged mode
spilo_privileged: false
# storage resize strategy, available options are: ebs, pvc, off
storage_resize_mode: ebs
storage_resize_mode: pvc
# operator watches for postgres objects in the given namespace
watched_namespace: "*" # listen to all namespaces

View File

@ -130,7 +130,7 @@ configKubernetes:
# whether the Spilo container should run in privileged mode
spilo_privileged: "false"
# storage resize strategy, available options are: ebs, pvc, off
storage_resize_mode: ebs
storage_resize_mode: pvc
# operator watches for postgres objects in the given namespace
watched_namespace: "*" # listen to all namespaces

View File

@ -107,7 +107,7 @@ data:
# spilo_runasgroup: 103
# spilo_fsgroup: 103
spilo_privileged: "false"
# storage_resize_mode: "off"
storage_resize_mode: "pvc"
super_username: postgres
# team_admin_role: "admin"
# team_api_role_configuration: "log_statement:all"

View File

@ -106,6 +106,8 @@ rules:
- delete
- get
- list
- patch
- update
# to read existing PVs. Creation should be done via dynamic provisioning
- apiGroups:
- ""

View File

@ -72,7 +72,7 @@ configuration:
# spilo_runasgroup: 103
# spilo_fsgroup: 103
spilo_privileged: false
storage_resize_mode: ebs
storage_resize_mode: pvc
# toleration: {}
# watched_namespace: ""
postgres_pod_resources:

View File

@ -140,7 +140,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
Secrets: make(map[types.UID]*v1.Secret),
Services: make(map[PostgresRole]*v1.Service),
Endpoints: make(map[PostgresRole]*v1.Endpoints)},
userSyncStrategy: users.DefaultUserSyncStrategy{password_encryption},
userSyncStrategy: users.DefaultUserSyncStrategy{PasswordEncryption: password_encryption},
deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy},
podEventsQueue: podEventsQueue,
KubeClient: kubeClient,
@ -671,13 +671,21 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
// Volume
if oldSpec.Spec.Size != newSpec.Spec.Size {
c.logger.Debugf("syncing persistent volumes")
c.logVolumeChanges(oldSpec.Spec.Volume, newSpec.Spec.Volume)
if err := c.syncVolumes(); err != nil {
c.logger.Errorf("could not sync persistent volumes: %v", err)
updateFailed = true
c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode)
if c.OpConfig.StorageResizeMode == "pvc" {
if err := c.syncVolumeClaims(); err != nil {
c.logger.Errorf("could not sync persistent volume claims: %v", err)
updateFailed = true
}
} else if c.OpConfig.StorageResizeMode == "ebs" {
if err := c.syncVolumes(); err != nil {
c.logger.Errorf("could not sync persistent volumes: %v", err)
updateFailed = true
}
}
} else {
c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.")
}
// Statefulset

View File

@ -57,8 +57,8 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
return err
}
c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode)
if c.OpConfig.StorageResizeMode == "pvc" {
c.logger.Debugf("syncing persistent volume claims")
if err = c.syncVolumeClaims(); err != nil {
err = fmt.Errorf("could not sync persistent volume claims: %v", err)
return err
@ -70,7 +70,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
// TODO: handle the case of the cluster that is downsized and enlarged again
// (there will be a volume from the old pod for which we can't act before the
// the statefulset modification is concluded)
c.logger.Debugf("syncing persistent volumes")
if err = c.syncVolumes(); err != nil {
err = fmt.Errorf("could not sync persistent volumes: %v", err)
return err

View File

@ -62,7 +62,7 @@ func (c *Cluster) resizeVolumeClaims(newVolume acidv1.Volume) error {
if err != nil {
return fmt.Errorf("could not parse volume size: %v", err)
}
_, newSize, err := c.listVolumesWithManifestSize(newVolume)
newSize := quantityToGigabyte(newQuantity)
for _, pvc := range pvcs {
volumeSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage])
if volumeSize >= newSize {

171
pkg/cluster/volumes_test.go Normal file
View File

@ -0,0 +1,171 @@
package cluster
import (
"testing"
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"github.com/stretchr/testify/assert"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/constants"
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
"k8s.io/client-go/kubernetes/fake"
)
func NewFakeKubernetesClient() (k8sutil.KubernetesClient, *fake.Clientset) {
clientSet := fake.NewSimpleClientset()
return k8sutil.KubernetesClient{
PersistentVolumeClaimsGetter: clientSet.CoreV1(),
}, clientSet
}
func TestResizeVolumeClaim(t *testing.T) {
testName := "test resizing of persistent volume claims"
client, _ := NewFakeKubernetesClient()
clusterName := "acid-test-cluster"
namespace := "default"
newVolumeSize := "2Gi"
// new cluster with pvc storage resize mode and configured labels
var cluster = New(
Config{
OpConfig: config.Config{
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: "cluster-name",
},
StorageResizeMode: "pvc",
},
}, client, acidv1.Postgresql{}, logger, eventRecorder)
// set metadata, so that labels will get correct values
cluster.Name = clusterName
cluster.Namespace = namespace
filterLabels := cluster.labelsSet(false)
// define and create PVCs for 1Gi volumes
storage1Gi, err := resource.ParseQuantity("1Gi")
assert.NoError(t, err)
pvcList := &v1.PersistentVolumeClaimList{
Items: []v1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: constants.DataVolumeName + "-" + clusterName + "-0",
Namespace: namespace,
Labels: filterLabels,
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: storage1Gi,
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: constants.DataVolumeName + "-" + clusterName + "-1",
Namespace: namespace,
Labels: filterLabels,
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: storage1Gi,
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: constants.DataVolumeName + "-" + clusterName + "-2-0",
Namespace: namespace,
Labels: labels.Set{},
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: storage1Gi,
},
},
},
},
},
}
for _, pvc := range pvcList.Items {
cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{})
}
// test resizing
cluster.resizeVolumeClaims(acidv1.Volume{Size: newVolumeSize})
pvcs, err := cluster.listPersistentVolumeClaims()
assert.NoError(t, err)
// check if listPersistentVolumeClaims returns only the PVCs matching the filter
if len(pvcs) != len(pvcList.Items)-1 {
t.Errorf("%s: could not find all PVCs, got %v, expected %v", testName, len(pvcs), len(pvcList.Items)-1)
}
// check if PVCs were correctly resized
for _, pvc := range pvcs {
newStorageSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage])
expectedQuantity, err := resource.ParseQuantity(newVolumeSize)
assert.NoError(t, err)
expectedSize := quantityToGigabyte(expectedQuantity)
if newStorageSize != expectedSize {
t.Errorf("%s: resizing failed, got %v, expected %v", testName, newStorageSize, expectedSize)
}
}
// check if other PVC was not resized
pvc2, err := cluster.KubeClient.PersistentVolumeClaims(namespace).Get(context.TODO(), constants.DataVolumeName+"-"+clusterName+"-2-0", metav1.GetOptions{})
assert.NoError(t, err)
unchangedSize := quantityToGigabyte(pvc2.Spec.Resources.Requests[v1.ResourceStorage])
expectedSize := quantityToGigabyte(storage1Gi)
if unchangedSize != expectedSize {
t.Errorf("%s: volume size changed, got %v, expected %v", testName, unchangedSize, expectedSize)
}
}
func TestQuantityToGigabyte(t *testing.T) {
tests := []struct {
name string
quantityStr string
expected int64
}{
{
"test with 1Gi",
"1Gi",
1,
},
{
"test with float",
"1.5Gi",
int64(1),
},
{
"test with 1000Mi",
"1000Mi",
int64(0),
},
}
for _, tt := range tests {
quantity, err := resource.ParseQuantity(tt.quantityStr)
assert.NoError(t, err)
gigabyte := quantityToGigabyte(quantity)
if gigabyte != tt.expected {
t.Errorf("%s: got %v, expected %v", tt.name, gigabyte, tt.expected)
}
}
}