Merge branch 'master' into move-to-multiarch

This commit is contained in:
idanovinda 2025-12-10 15:18:34 +01:00
commit 5d5bea020d
6 changed files with 296 additions and 26 deletions

View File

@ -17,7 +17,7 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
* Live volume resize without pod restarts (AWS EBS, PVC)
* Database connection pooling with PGBouncer
* Support fast in place major version upgrade. Supports global upgrade of all clusters.
* Pod protection during boostrap phase and configurable maintenance windows
* Pod protection during bootstrap phase and configurable maintenance windows
* Restore and cloning Postgres clusters on AWS, GCS and Azure
* Additionally logical backups to S3 or GCS bucket can be configured
* Standby cluster from S3 or GCS WAL archive

View File

@ -281,9 +281,23 @@ func findUsersFromRotation(rotatedUsers []string, db *sql.DB) (map[string]string
return extraUsers, nil
}
func (c *Cluster) cleanupRotatedUsers(rotatedUsers []string, db *sql.DB) error {
func (c *Cluster) cleanupRotatedUsers(rotatedUsers []string) error {
c.setProcessName("checking for rotated users to remove from the database due to configured retention")
extraUsers, err := findUsersFromRotation(rotatedUsers, db)
err := c.initDbConn()
if err != nil {
return fmt.Errorf("could not init db connection: %v", err)
}
defer func() {
if c.connectionIsClosed() {
return
}
if err := c.closeDbConn(); err != nil {
c.logger.Errorf("could not close database connection after removing users exceeding configured retention interval: %v", err)
}
}()
extraUsers, err := findUsersFromRotation(rotatedUsers, c.pgDb)
if err != nil {
return fmt.Errorf("error when querying for deprecated users from password rotation: %v", err)
}
@ -304,7 +318,7 @@ func (c *Cluster) cleanupRotatedUsers(rotatedUsers []string, db *sql.DB) error {
}
if retentionDate.After(userCreationDate) {
c.logger.Infof("dropping user %q due to configured days in password_rotation_user_retention", rotatedUser)
if err = users.DropPgUser(rotatedUser, db); err != nil {
if err = users.DropPgUser(rotatedUser, c.pgDb); err != nil {
c.logger.Errorf("could not drop role %q: %v", rotatedUser, err)
continue
}

View File

@ -1303,6 +1303,9 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
c.logger.Warningf("initContainers specified but disabled in configuration - next statefulset creation would fail")
}
initContainers = spec.InitContainers
if err := c.validateContainers(initContainers); err != nil {
return nil, fmt.Errorf("invalid init containers: %v", err)
}
}
// backward compatible check for InitContainers
@ -1455,6 +1458,10 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
sidecarContainers = patchSidecarContainers(sidecarContainers, volumeMounts, c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername))
if err := c.validateContainers(sidecarContainers); err != nil {
return nil, fmt.Errorf("invalid sidecar containers: %v", err)
}
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
@ -2592,3 +2599,15 @@ func ensurePath(file string, defaultDir string, defaultFile string) string {
}
return file
}
func (c *Cluster) validateContainers(containers []v1.Container) error {
for i, container := range containers {
if container.Name == "" {
return fmt.Errorf("container[%d]: name is required", i)
}
if container.Image == "" {
return fmt.Errorf("container '%v': image is required", container.Name)
}
}
return nil
}

View File

@ -1935,7 +1935,8 @@ func TestAdditionalVolume(t *testing.T) {
AdditionalVolumes: additionalVolumes,
Sidecars: []acidv1.Sidecar{
{
Name: sidecarName,
Name: sidecarName,
DockerImage: "test-image",
},
},
},
@ -2163,10 +2164,12 @@ func TestSidecars(t *testing.T) {
},
Sidecars: []acidv1.Sidecar{
{
Name: "cluster-specific-sidecar",
Name: "cluster-specific-sidecar",
DockerImage: "test-image",
},
{
Name: "cluster-specific-sidecar-with-resources",
Name: "cluster-specific-sidecar-with-resources",
DockerImage: "test-image",
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")},
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")},
@ -2201,7 +2204,8 @@ func TestSidecars(t *testing.T) {
},
SidecarContainers: []v1.Container{
{
Name: "global-sidecar",
Name: "global-sidecar",
Image: "test-image",
},
// will be replaced by a cluster specific sidecar with the same name
{
@ -2271,6 +2275,7 @@ func TestSidecars(t *testing.T) {
// cluster specific sidecar
assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{
Name: "cluster-specific-sidecar",
Image: "test-image",
Env: env,
Resources: generateKubernetesResources("200m", "500m", "0.7Gi", "1.3Gi"),
ImagePullPolicy: v1.PullIfNotPresent,
@ -2297,6 +2302,7 @@ func TestSidecars(t *testing.T) {
// global sidecar
assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{
Name: "global-sidecar",
Image: "test-image",
Env: env,
VolumeMounts: mounts,
})
@ -2325,6 +2331,180 @@ func TestSidecars(t *testing.T) {
}
func TestContainerValidation(t *testing.T) {
testCases := []struct {
name string
spec acidv1.PostgresSpec
clusterConfig Config
expectedError string
}{
{
name: "init container without image",
spec: acidv1.PostgresSpec{
PostgresqlParam: acidv1.PostgresqlParam{
PgVersion: "17",
},
TeamID: "myapp",
NumberOfInstances: 1,
Volume: acidv1.Volume{
Size: "1G",
},
InitContainers: []v1.Container{
{
Name: "invalid-initcontainer",
},
},
},
clusterConfig: Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
},
},
expectedError: "image is required",
},
{
name: "sidecar without name",
spec: acidv1.PostgresSpec{
PostgresqlParam: acidv1.PostgresqlParam{
PgVersion: "17",
},
TeamID: "myapp",
NumberOfInstances: 1,
Volume: acidv1.Volume{
Size: "1G",
},
},
clusterConfig: Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
SidecarContainers: []v1.Container{
{
Image: "test-image",
},
},
},
},
expectedError: "name is required",
},
{
name: "sidecar without image",
spec: acidv1.PostgresSpec{
PostgresqlParam: acidv1.PostgresqlParam{
PgVersion: "17",
},
TeamID: "myapp",
NumberOfInstances: 1,
Volume: acidv1.Volume{
Size: "1G",
},
Sidecars: []acidv1.Sidecar{
{
Name: "invalid-sidecar",
},
},
},
clusterConfig: Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
},
},
expectedError: "image is required",
},
{
name: "valid containers pass validation",
spec: acidv1.PostgresSpec{
PostgresqlParam: acidv1.PostgresqlParam{
PgVersion: "17",
},
TeamID: "myapp",
NumberOfInstances: 1,
Volume: acidv1.Volume{
Size: "1G",
},
Sidecars: []acidv1.Sidecar{
{
Name: "valid-sidecar",
DockerImage: "busybox:latest",
},
},
InitContainers: []v1.Container{
{
Name: "valid-initcontainer",
Image: "alpine:latest",
},
},
},
clusterConfig: Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
},
},
expectedError: "",
},
{
name: "multiple invalid sidecars",
spec: acidv1.PostgresSpec{
Sidecars: []acidv1.Sidecar{
{
Name: "sidecar1",
},
{
Name: "sidecar2",
},
},
},
expectedError: "image is required",
},
{
name: "empty container name and image",
spec: acidv1.PostgresSpec{
InitContainers: []v1.Container{
{
Name: "",
Image: "",
},
},
},
expectedError: "name is required",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
cluster := New(tc.clusterConfig, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
_, err := cluster.generateStatefulSet(&tc.spec)
if tc.expectedError != "" {
assert.Error(t, err)
assert.Contains(t, err.Error(), tc.expectedError)
} else {
assert.NoError(t, err)
}
})
}
}
func TestGeneratePodDisruptionBudget(t *testing.T) {
testName := "Test PodDisruptionBudget spec generation"
@ -2618,7 +2798,8 @@ func TestGenerateService(t *testing.T) {
Name: "cluster-specific-sidecar",
},
{
Name: "cluster-specific-sidecar-with-resources",
Name: "cluster-specific-sidecar-with-resources",
DockerImage: "test-image",
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")},
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")},
@ -2928,6 +3109,7 @@ func TestGenerateResourceRequirements(t *testing.T) {
namespace := "default"
clusterNameLabel := "cluster-name"
sidecarName := "postgres-exporter"
dockerImage := "test-image"
// enforceMinResourceLimits will be called 2 times emitting 4 events (2x cpu, 2x memory raise)
// enforceMaxResourceRequests will be called 4 times emitting 6 events (2x cpu, 4x memory cap)
@ -2993,7 +3175,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
Spec: acidv1.PostgresSpec{
Sidecars: []acidv1.Sidecar{
{
Name: sidecarName,
Name: sidecarName,
DockerImage: dockerImage,
},
},
TeamID: "acid",
@ -3232,7 +3415,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
Spec: acidv1.PostgresSpec{
Sidecars: []acidv1.Sidecar{
{
Name: sidecarName,
Name: sidecarName,
DockerImage: dockerImage,
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")},
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
@ -3321,7 +3505,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
Spec: acidv1.PostgresSpec{
Sidecars: []acidv1.Sidecar{
{
Name: sidecarName,
Name: sidecarName,
DockerImage: dockerImage,
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")},
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},

View File

@ -1078,7 +1078,7 @@ func (c *Cluster) syncSecrets() error {
c.Secrets[updatedSecret.UID] = updatedSecret
continue
}
errors = append(errors, fmt.Sprintf("syncing secret %s failed: %v", util.NameFromMeta(updatedSecret.ObjectMeta), err))
errors = append(errors, fmt.Sprintf("syncing secret %s failed: %v", util.NameFromMeta(generatedSecret.ObjectMeta), err))
pgUserDegraded = true
} else {
errors = append(errors, fmt.Sprintf("could not create secret for user %s: in namespace %s: %v", secretUsername, generatedSecret.Namespace, err))
@ -1089,16 +1089,9 @@ func (c *Cluster) syncSecrets() error {
// remove rotation users that exceed the retention interval
if len(retentionUsers) > 0 {
err := c.initDbConn()
if err != nil {
errors = append(errors, fmt.Sprintf("could not init db connection: %v", err))
}
if err = c.cleanupRotatedUsers(retentionUsers, c.pgDb); err != nil {
if err := c.cleanupRotatedUsers(retentionUsers); err != nil {
errors = append(errors, fmt.Sprintf("error removing users exceeding configured retention interval: %v", err))
}
if err := c.closeDbConn(); err != nil {
errors = append(errors, fmt.Sprintf("could not close database connection after removing users exceeding configured retention interval: %v", err))
}
}
if len(errors) > 0 {
@ -1187,13 +1180,18 @@ func (c *Cluster) updateSecret(
}
} else {
// username might not match if password rotation has been disabled again
if secretUsername != string(secret.Data["username"]) {
usernameFromSecret := string(secret.Data["username"])
if secretUsername != usernameFromSecret {
// handle edge case when manifest user conflicts with a user from prepared databases
if strings.Replace(usernameFromSecret, "-", "_", -1) == strings.Replace(secretUsername, "-", "_", -1) {
return nil, fmt.Errorf("could not update secret because of user name mismatch: expected: %s, got: %s", secretUsername, usernameFromSecret)
}
*retentionUsers = append(*retentionUsers, secretUsername)
secret.Data["username"] = []byte(secretUsername)
secret.Data["password"] = []byte(util.RandomPassword(constants.PasswordLength))
secret.Data["nextRotation"] = []byte{}
updateSecret = true
updateSecretMsg = fmt.Sprintf("secret %s does not contain the role %s - updating username and resetting password", secretName, secretUsername)
updateSecretMsg = fmt.Sprintf("secret does not contain the role %s - updating username and resetting password", secretUsername)
}
}
@ -1223,18 +1221,18 @@ func (c *Cluster) updateSecret(
if updateSecret {
c.logger.Infof("%s", updateSecretMsg)
if secret, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil {
return secret, fmt.Errorf("could not update secret %s: %v", secretName, err)
return nil, fmt.Errorf("could not update secret: %v", err)
}
}
if changed, _ := c.compareAnnotations(secret.Annotations, generatedSecret.Annotations, nil); changed {
patchData, err := metaAnnotationsPatch(generatedSecret.Annotations)
if err != nil {
return secret, fmt.Errorf("could not form patch for secret %q annotations: %v", secret.Name, err)
return nil, fmt.Errorf("could not form patch for secret annotations: %v", err)
}
secret, err = c.KubeClient.Secrets(secret.Namespace).Patch(context.TODO(), secret.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
if err != nil {
return secret, fmt.Errorf("could not patch annotations for secret %q: %v", secret.Name, err)
return nil, fmt.Errorf("could not patch annotations for secret: %v", err)
}
}

View File

@ -964,3 +964,57 @@ func TestUpdateSecret(t *testing.T) {
t.Errorf("%s: updated secret does not contain expected username: expected %s, got %s", testName, appUser, currentUsername)
}
}
func TestUpdateSecretNameConflict(t *testing.T) {
client, _ := newFakeK8sSyncSecretsClient()
clusterName := "acid-test-cluster"
namespace := "default"
secretTemplate := config.StringTemplate("{username}.{cluster}.credentials")
// define manifest user that has the same name as a prepared database owner user except for dashes vs underscores
// because of this the operator cannot create both secrets because underscores are not allowed in k8s secret names
pg := acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
PreparedDatabases: map[string]acidv1.PreparedDatabase{"prepared": {DefaultUsers: true}},
Users: map[string]acidv1.UserFlags{"prepared-owner-user": {}},
Volume: acidv1.Volume{
Size: "1Gi",
},
},
}
var cluster = New(
Config{
OpConfig: config.Config{
Auth: config.Auth{
SuperUsername: "postgres",
ReplicationUsername: "standby",
SecretNameTemplate: secretTemplate,
},
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: "cluster-name",
},
},
}, client, pg, logger, eventRecorder)
cluster.Name = clusterName
cluster.Namespace = namespace
cluster.pgUsers = map[string]spec.PgUser{}
// init all users
cluster.initUsers()
// create secrets and fail because of user name mismatch
// prepared-owner-user from manifest vs prepared_owner_user from prepared database
err := cluster.syncSecrets()
assert.Error(t, err)
// the order of secrets to sync is not deterministic, check only first part of the error message
expectedError := fmt.Sprintf("syncing secret %s failed: could not update secret because of user name mismatch", "default/prepared-owner-user.acid-test-cluster.credentials")
assert.Contains(t, err.Error(), expectedError)
}