This commit is contained in:
Jan Breitkopf 2025-10-21 15:02:32 +02:00 committed by GitHub
commit fdbdae1764
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 43 additions and 16 deletions

View File

@ -551,6 +551,9 @@ spec:
default: "30 00 * * *"
logical_backup_cronjob_environment_secret:
type: string
logical_backup_filename_date_format:
type: string
default: "+%s"
debug:
type: object
properties:

View File

@ -392,6 +392,8 @@ configLogicalBackup:
logical_backup_schedule: "30 00 * * *"
# secret to be used as reference for env variables in cronjob
logical_backup_cronjob_environment_secret: ""
# backup filename date format
logical_backup_filename_date_format: ""
# automate creation of human users with teams API service
configTeamsApi:

View File

@ -881,6 +881,9 @@ grouped under the `logical_backup` key.
* **logical_backup_cronjob_environment_secret**
Reference to a Kubernetes secret, which keys will be added as environment variables to the cronjob. Default: ""
* **logical_backup_filename_date_format**
Date format to use for the logical backup filename. Uses date linux utility. Default: "+%s"
## Debugging the operator
Options to aid debugging of the operator itself. Grouped under the `debug` key.

View File

@ -10,6 +10,7 @@ ALL_DB_SIZE_QUERY="select sum(pg_database_size(datname)::numeric) from pg_databa
PG_BIN=$PG_DIR/$PG_VERSION/bin
DUMP_SIZE_COEFF=5
ERRORCOUNT=0
TIMESTAMP=$(eval date $LOGICAL_BACKUP_FILENAME_DATE_FORMAT)
TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
KUBERNETES_SERVICE_PORT=${KUBERNETES_SERVICE_PORT:-443}
@ -45,7 +46,7 @@ function compress {
}
function az_upload {
PATH_TO_BACKUP=$LOGICAL_BACKUP_S3_BUCKET"/"$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz
PATH_TO_BACKUP=$LOGICAL_BACKUP_S3_BUCKET"/"$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$TIMESTAMP.sql.gz
az storage blob upload --file "$1" --account-name "$LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_NAME" --account-key "$LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_KEY" -c "$LOGICAL_BACKUP_AZURE_STORAGE_CONTAINER" -n "$PATH_TO_BACKUP"
}
@ -107,7 +108,7 @@ function aws_upload {
# mimic bucket setup from Spilo
# to keep logical backups at the same path as WAL
# NB: $LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX already contains the leading "/" when set by the Postgres Operator
PATH_TO_BACKUP=s3://$LOGICAL_BACKUP_S3_BUCKET"/"$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz
PATH_TO_BACKUP=s3://$LOGICAL_BACKUP_S3_BUCKET"/"$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$TIMESTAMP.sql.gz
args=()
@ -120,7 +121,7 @@ function aws_upload {
}
function gcs_upload {
PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_S3_BUCKET"/"$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz
PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_S3_BUCKET"/"$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$TIMESTAMP.sql.gz
#Set local LOGICAL_GOOGLE_APPLICATION_CREDENTIALS to nothing or
#value of LOGICAL_GOOGLE_APPLICATION_CREDENTIALS env var. Needed

View File

@ -101,6 +101,7 @@ data:
logical_backup_s3_sse: "AES256"
logical_backup_s3_retention_time: ""
logical_backup_schedule: "30 00 * * *"
logical_backup_filename_date_format: "+%s"
major_version_upgrade_mode: "manual"
# major_version_upgrade_team_allow_list: ""
master_dns_name_format: "{cluster}.{namespace}.{hostedzone}"

View File

@ -1800,6 +1800,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
"logical_backup_cronjob_environment_secret": {
Type: "string",
},
"logical_backup_filename_date_format": {
Type: "string",
},
},
},
"debug": {

View File

@ -244,6 +244,7 @@ type OperatorLogicalBackupConfiguration struct {
MemoryRequest string `json:"logical_backup_memory_request,omitempty"`
CPULimit string `json:"logical_backup_cpu_limit,omitempty"`
MemoryLimit string `json:"logical_backup_memory_limit,omitempty"`
FilenameDateFormat string `json:"logical_backup_filename_date_format,omitempty"`
}
// PatroniConfiguration defines configuration for Patroni

View File

@ -1660,6 +1660,7 @@ func TestCompareLogicalBackupJob(t *testing.T) {
LogicalBackupS3SSE: "aws:kms",
LogicalBackupS3RetentionTime: "3 months",
LogicalBackupCronjobEnvironmentSecret: "",
LogicalBackupFilenameDateFormat: "+%s",
},
},
}, client, pg, logger, eventRecorder)

View File

@ -2487,6 +2487,10 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
Name: "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX",
Value: getBucketScopeSuffix(string(c.Postgresql.GetUID())),
},
{
Name: "LOGICAL_BACKUP_FILENAME_DATE_FORMAT",
Value: c.OpConfig.LogicalBackup.LogicalBackupFilenameDateFormat,
},
}
switch backupProvider {

View File

@ -3784,21 +3784,26 @@ func TestGenerateLogicalBackupPodEnvVars(t *testing.T) {
},
{
envIndex: 13,
envVarConstant: "LOGICAL_BACKUP_FILENAME_DATE_FORMAT",
envVarValue: "+%s",
},
{
envIndex: 14,
envVarConstant: "LOGICAL_BACKUP_S3_REGION",
envVarValue: "eu-central-1",
},
{
envIndex: 14,
envIndex: 15,
envVarConstant: "LOGICAL_BACKUP_S3_ENDPOINT",
envVarValue: "",
},
{
envIndex: 15,
envIndex: 16,
envVarConstant: "LOGICAL_BACKUP_S3_SSE",
envVarValue: "",
},
{
envIndex: 16,
envIndex: 17,
envVarConstant: "LOGICAL_BACKUP_S3_RETENTION_TIME",
envVarValue: "1 month",
},
@ -3811,7 +3816,7 @@ func TestGenerateLogicalBackupPodEnvVars(t *testing.T) {
envVarValue: "gcs",
},
{
envIndex: 13,
envIndex: 14,
envVarConstant: "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS",
envVarValue: "some-path-to-credentials",
},
@ -3824,17 +3829,17 @@ func TestGenerateLogicalBackupPodEnvVars(t *testing.T) {
envVarValue: "az",
},
{
envIndex: 13,
envIndex: 14,
envVarConstant: "LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_NAME",
envVarValue: "some-azure-storage-account-name",
},
{
envIndex: 14,
envIndex: 15,
envVarConstant: "LOGICAL_BACKUP_AZURE_STORAGE_CONTAINER",
envVarValue: "some-azure-storage-container",
},
{
envIndex: 15,
envIndex: 16,
envVarConstant: "LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_KEY",
envVarValue: "some-azure-storage-account-key",
},
@ -3842,7 +3847,7 @@ func TestGenerateLogicalBackupPodEnvVars(t *testing.T) {
expectedLogicalBackupRetentionTime := []ExpectedValue{
{
envIndex: 16,
envIndex: 17,
envVarConstant: "LOGICAL_BACKUP_S3_RETENTION_TIME",
envVarValue: "3 months",
},
@ -3861,6 +3866,7 @@ func TestGenerateLogicalBackupPodEnvVars(t *testing.T) {
LogicalBackupProvider: "s3",
LogicalBackupS3Bucket: dummyBucket,
LogicalBackupS3BucketPrefix: "spilo",
LogicalBackupFilenameDateFormat: "+%s",
LogicalBackupS3Region: "eu-central-1",
LogicalBackupS3RetentionTime: "1 month",
},

View File

@ -200,6 +200,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.LogicalBackupMemoryRequest = fromCRD.LogicalBackup.MemoryRequest
result.LogicalBackupCPULimit = fromCRD.LogicalBackup.CPULimit
result.LogicalBackupMemoryLimit = fromCRD.LogicalBackup.MemoryLimit
result.LogicalBackupFilenameDateFormat = fromCRD.LogicalBackup.FilenameDateFormat
// debug config
result.DebugLogging = fromCRD.OperatorDebug.DebugLogging

View File

@ -129,6 +129,7 @@ type LogicalBackup struct {
LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"`
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"ghcr.io/zalando/postgres-operator/logical-backup:v1.15.0"`
LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"`
LogicalBackupFilenameDateFormat string `name:"logical_backup_filename_date_format"`
LogicalBackupAzureStorageAccountName string `name:"logical_backup_azure_storage_account_name" default:""`
LogicalBackupAzureStorageContainer string `name:"logical_backup_azure_storage_container" default:""`
LogicalBackupAzureStorageAccountKey string `name:"logical_backup_azure_storage_account_key" default:""`