improve logical backup comparison unit test and improve container sync (#2686)

* improve logical backup comparison unit test and improve container sync
* add new comparison function for volume mounts + unit test
This commit is contained in:
Felix Kunde 2024-07-08 14:06:14 +02:00 committed by GitHub
parent 37d6993439
commit e71891e2bd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 334 additions and 15 deletions

View File

@ -597,7 +597,7 @@ func (c *Cluster) compareContainers(description string, setA, setB []v1.Containe
newCheck("new %s's %s (index %d) security context does not match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.SecurityContext, b.SecurityContext) }),
newCheck("new %s's %s (index %d) volume mounts do not match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.VolumeMounts, b.VolumeMounts) }),
func(a, b v1.Container) bool { return !compareVolumeMounts(a.VolumeMounts, b.VolumeMounts) }),
}
if !c.OpConfig.EnableLazySpiloUpgrade {
@ -738,6 +738,27 @@ func comparePorts(a, b []v1.ContainerPort) bool {
return true
}
func compareVolumeMounts(old, new []v1.VolumeMount) bool {
if len(old) != len(new) {
return false
}
for _, mount := range old {
if !volumeMountExists(mount, new) {
return false
}
}
return true
}
func volumeMountExists(mount v1.VolumeMount, mounts []v1.VolumeMount) bool {
for _, m := range mounts {
if reflect.DeepEqual(mount, m) {
return true
}
}
return false
}
func (c *Cluster) compareAnnotations(old, new map[string]string) (bool, string) {
reason := ""
ignoredAnnotations := make(map[string]bool)

View File

@ -18,9 +18,11 @@ import (
"github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/constants"
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
"github.com/zalando/postgres-operator/pkg/util/patroni"
"github.com/zalando/postgres-operator/pkg/util/teams"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
@ -1464,7 +1466,7 @@ func TestCompareServices(t *testing.T) {
}
}
func newCronJob(image, schedule string, vars []v1.EnvVar) *batchv1.CronJob {
func newCronJob(image, schedule string, vars []v1.EnvVar, mounts []v1.VolumeMount) *batchv1.CronJob {
cron := &batchv1.CronJob{
Spec: batchv1.CronJobSpec{
Schedule: schedule,
@ -1477,6 +1479,37 @@ func newCronJob(image, schedule string, vars []v1.EnvVar) *batchv1.CronJob {
Name: "logical-backup",
Image: image,
Env: vars,
Ports: []v1.ContainerPort{
{
ContainerPort: patroni.ApiPort,
Protocol: v1.ProtocolTCP,
},
{
ContainerPort: pgPort,
Protocol: v1.ProtocolTCP,
},
{
ContainerPort: operatorPort,
Protocol: v1.ProtocolTCP,
},
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
},
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: nil,
Privileged: util.False(),
ReadOnlyRootFilesystem: util.False(),
Capabilities: nil,
},
VolumeMounts: mounts,
},
},
},
@ -1493,37 +1526,110 @@ func TestCompareLogicalBackupJob(t *testing.T) {
img1 := "registry.opensource.zalan.do/acid/logical-backup:v1.0"
img2 := "registry.opensource.zalan.do/acid/logical-backup:v2.0"
clientSet := fake.NewSimpleClientset()
acidClientSet := fakeacidv1.NewSimpleClientset()
namespace := "default"
client := k8sutil.KubernetesClient{
CronJobsGetter: clientSet.BatchV1(),
PostgresqlsGetter: acidClientSet.AcidV1(),
}
pg := acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: "acid-cron-cluster",
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
Volume: acidv1.Volume{
Size: "1Gi",
},
EnableLogicalBackup: true,
LogicalBackupSchedule: "0 0 * * *",
LogicalBackupRetention: "3 months",
},
}
var cluster = New(
Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: "cluster-name",
DefaultCPURequest: "300m",
DefaultCPULimit: "300m",
DefaultMemoryRequest: "300Mi",
DefaultMemoryLimit: "300Mi",
PodRoleLabel: "spilo-role",
},
LogicalBackup: config.LogicalBackup{
LogicalBackupSchedule: "30 00 * * *",
LogicalBackupDockerImage: img1,
LogicalBackupJobPrefix: "logical-backup-",
LogicalBackupCPURequest: "100m",
LogicalBackupCPULimit: "100m",
LogicalBackupMemoryRequest: "100Mi",
LogicalBackupMemoryLimit: "100Mi",
LogicalBackupProvider: "s3",
LogicalBackupS3Bucket: "testBucket",
LogicalBackupS3BucketPrefix: "spilo",
LogicalBackupS3Region: "eu-central-1",
LogicalBackupS3Endpoint: "https://s3.amazonaws.com",
LogicalBackupS3AccessKeyID: "access",
LogicalBackupS3SecretAccessKey: "secret",
LogicalBackupS3SSE: "aws:kms",
LogicalBackupS3RetentionTime: "3 months",
LogicalBackupCronjobEnvironmentSecret: "",
},
},
}, client, pg, logger, eventRecorder)
desiredCronJob, err := cluster.generateLogicalBackupJob()
if err != nil {
t.Errorf("Could not generate logical backup job with error: %v", err)
}
err = cluster.createLogicalBackupJob()
if err != nil {
t.Errorf("Could not create logical backup job with error: %v", err)
}
currentCronJob, err := cluster.KubeClient.CronJobs(namespace).Get(context.TODO(), cluster.getLogicalBackupJobName(), metav1.GetOptions{})
if err != nil {
t.Errorf("Could not create logical backup job with error: %v", err)
}
tests := []struct {
about string
current *batchv1.CronJob
new *batchv1.CronJob
cronjob *batchv1.CronJob
match bool
reason string
}{
{
about: "two equal cronjobs",
current: newCronJob(img1, "0 0 * * *", []v1.EnvVar{}),
new: newCronJob(img1, "0 0 * * *", []v1.EnvVar{}),
cronjob: newCronJob(img1, "0 0 * * *", []v1.EnvVar{}, []v1.VolumeMount{}),
match: true,
},
{
about: "two cronjobs with different image",
current: newCronJob(img1, "0 0 * * *", []v1.EnvVar{}),
new: newCronJob(img2, "0 0 * * *", []v1.EnvVar{}),
cronjob: newCronJob(img2, "0 0 * * *", []v1.EnvVar{}, []v1.VolumeMount{}),
match: false,
reason: fmt.Sprintf("new job's image %q does not match the current one %q", img2, img1),
},
{
about: "two cronjobs with different schedule",
current: newCronJob(img1, "0 0 * * *", []v1.EnvVar{}),
new: newCronJob(img1, "0 * * * *", []v1.EnvVar{}),
cronjob: newCronJob(img1, "0 * * * *", []v1.EnvVar{}, []v1.VolumeMount{}),
match: false,
reason: fmt.Sprintf("new job's schedule %q does not match the current one %q", "0 * * * *", "0 0 * * *"),
},
{
about: "two cronjobs with empty and nil volume mounts",
cronjob: newCronJob(img1, "0 0 * * *", []v1.EnvVar{}, nil),
match: true,
},
{
about: "two cronjobs with different environment variables",
current: newCronJob(img1, "0 0 * * *", []v1.EnvVar{{Name: "LOGICAL_BACKUP_S3_BUCKET_PREFIX", Value: "spilo"}}),
new: newCronJob(img1, "0 0 * * *", []v1.EnvVar{{Name: "LOGICAL_BACKUP_S3_BUCKET_PREFIX", Value: "logical-backup"}}),
cronjob: newCronJob(img1, "0 0 * * *", []v1.EnvVar{{Name: "LOGICAL_BACKUP_S3_BUCKET_PREFIX", Value: "logical-backup"}}, []v1.VolumeMount{}),
match: false,
reason: "logical backup container specs do not match: new cronjob container's logical-backup (index 0) environment does not match the current one",
},
@ -1531,9 +1637,21 @@ func TestCompareLogicalBackupJob(t *testing.T) {
for _, tt := range tests {
t.Run(tt.about, func(t *testing.T) {
match, reason := cl.compareLogicalBackupJob(tt.current, tt.new)
desiredCronJob.Spec.Schedule = tt.cronjob.Spec.Schedule
desiredCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image = tt.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image
desiredCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts = tt.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts
for _, testEnv := range tt.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env {
for i, env := range desiredCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env {
if env.Name == testEnv.Name {
desiredCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env[i] = testEnv
}
}
}
match, reason := cluster.compareLogicalBackupJob(currentCronJob, desiredCronJob)
if match != tt.match {
t.Errorf("%s - unexpected match result %t when comparing cronjobs %q and %q", t.Name(), match, tt.current, tt.new)
t.Errorf("%s - unexpected match result %t when comparing cronjobs %#v and %#v", t.Name(), match, currentCronJob, desiredCronJob)
} else {
if !strings.HasPrefix(reason, tt.reason) {
t.Errorf("%s - expected reason prefix %s, found %s", t.Name(), tt.reason, reason)
@ -1728,3 +1846,183 @@ func TestComparePorts(t *testing.T) {
})
}
}
func TestCompareVolumeMounts(t *testing.T) {
testCases := []struct {
name string
mountsA []v1.VolumeMount
mountsB []v1.VolumeMount
expected bool
}{
{
name: "empty vs nil",
mountsA: []v1.VolumeMount{},
mountsB: nil,
expected: true,
},
{
name: "both empty",
mountsA: []v1.VolumeMount{},
mountsB: []v1.VolumeMount{},
expected: true,
},
{
name: "same mounts",
mountsA: []v1.VolumeMount{
{
Name: "data",
ReadOnly: false,
MountPath: "/data",
SubPath: "subdir",
},
},
mountsB: []v1.VolumeMount{
{
Name: "data",
ReadOnly: false,
MountPath: "/data",
SubPath: "subdir",
},
},
expected: true,
},
{
name: "different mounts",
mountsA: []v1.VolumeMount{
{
Name: "data",
ReadOnly: false,
MountPath: "/data",
SubPathExpr: "$(POD_NAME)",
},
},
mountsB: []v1.VolumeMount{
{
Name: "data",
ReadOnly: false,
MountPath: "/data",
SubPath: "subdir",
},
},
expected: false,
},
{
name: "one equal mount one different",
mountsA: []v1.VolumeMount{
{
Name: "data",
ReadOnly: false,
MountPath: "/data",
SubPath: "subdir",
},
{
Name: "poddata",
ReadOnly: false,
MountPath: "/poddata",
SubPathExpr: "$(POD_NAME)",
},
},
mountsB: []v1.VolumeMount{
{
Name: "data",
ReadOnly: false,
MountPath: "/data",
SubPath: "subdir",
},
{
Name: "etc",
ReadOnly: true,
MountPath: "/etc",
},
},
expected: false,
},
{
name: "same mounts, different order",
mountsA: []v1.VolumeMount{
{
Name: "data",
ReadOnly: false,
MountPath: "/data",
SubPath: "subdir",
},
{
Name: "etc",
ReadOnly: true,
MountPath: "/etc",
},
},
mountsB: []v1.VolumeMount{
{
Name: "etc",
ReadOnly: true,
MountPath: "/etc",
},
{
Name: "data",
ReadOnly: false,
MountPath: "/data",
SubPath: "subdir",
},
},
expected: true,
},
{
name: "new mounts added",
mountsA: []v1.VolumeMount{
{
Name: "data",
ReadOnly: false,
MountPath: "/data",
SubPath: "subdir",
},
},
mountsB: []v1.VolumeMount{
{
Name: "etc",
ReadOnly: true,
MountPath: "/etc",
},
{
Name: "data",
ReadOnly: false,
MountPath: "/data",
SubPath: "subdir",
},
},
expected: false,
},
{
name: "one mount removed",
mountsA: []v1.VolumeMount{
{
Name: "data",
ReadOnly: false,
MountPath: "/data",
SubPath: "subdir",
},
{
Name: "etc",
ReadOnly: true,
MountPath: "/etc",
},
},
mountsB: []v1.VolumeMount{
{
Name: "data",
ReadOnly: false,
MountPath: "/data",
SubPath: "subdir",
},
},
expected: false,
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
got := compareVolumeMounts(tt.mountsA, tt.mountsB)
assert.Equal(t, tt.expected, got)
})
}
}

View File

@ -892,7 +892,7 @@ func (c *Cluster) generatePodTemplate(
addSecretVolume(&podSpec, additionalSecretMount, additionalSecretMountPath)
}
if additionalVolumes != nil {
if len(additionalVolumes) > 0 {
c.addAdditionalVolumes(&podSpec, additionalVolumes)
}