add new options to all places

This commit is contained in:
Felix Kunde 2020-12-11 11:52:29 +01:00
parent 64060cad84
commit 182369819d
13 changed files with 86 additions and 79 deletions

View File

@ -292,6 +292,10 @@ spec:
type: string
aws_region:
type: string
enable_ebs_gp3_migration:
type: boolean
enable_ebs_gp3_migration_max_size:
type: integer
gcp_credentials:
type: string
kube_iam_role:

View File

@ -219,6 +219,11 @@ configAwsOrGcp:
# AWS region used to store ESB volumes
aws_region: eu-central-1
# enable automatic migration on AWS from gp2 to gp3 volumes
enable_ebs_gp3_migration: false
# defines maximum volume size in GB until which auto migration happens
# enable_ebs_gp3_migration_max_size: 1000
# GCP credentials that will be used by the operator / pods
# gcp_credentials: ""

View File

@ -211,6 +211,14 @@ configAwsOrGcp:
# AWS region used to store ESB volumes
aws_region: eu-central-1
# enable automatic migration on AWS from gp2 to gp3 volumes
enable_ebs_gp3_migration: "false"
# defines maximum volume size in GB until which auto migration happens
# enable_ebs_gp3_migration_max_size: 1000
# GCP credentials for setting the GOOGLE_APPLICATION_CREDNETIALS environment variable
# gcp_credentials: ""
# AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods
# kube_iam_role: ""
@ -223,9 +231,6 @@ configAwsOrGcp:
# GCS bucket to use for shipping WAL segments with WAL-E
# wal_gs_bucket: ""
# GCP credentials for setting the GOOGLE_APPLICATION_CREDNETIALS environment variable
# gcp_credentials: ""
# configure K8s cron job managed by the operator
configLogicalBackup:
# image for pods of the logical backup job (example runs pg_dumpall)

View File

@ -368,13 +368,6 @@ configuration they are grouped under the `kubernetes` key.
changes PVC definition, off - disables resize of the volumes. Default is "ebs".
When using OpenShift please use one of the other available options.
* **enable_ebs_gp3_migration**
enable automatic migration on AWS from gp2 volumes to gp3 volumes, smaller than configured max size.
it ignored that ebs gp3 is by default only 125mb/sec vs 250mb/sec for gp2 >= 333gb.
* **enable_ebs_gp3_migration_max_size**
defines the maximum volume size until which auto migration happens, default 1tb which matches 3000iops default
## Kubernetes resource requests
This group allows you to configure resource requests for the Postgres pods.
@ -525,10 +518,22 @@ yet officially supported.
AWS region used to store EBS volumes. The default is `eu-central-1`.
* **additional_secret_mount**
Additional Secret (aws or gcp credentials) to mount in the pod. The default is empty.
Additional Secret (aws or gcp credentials) to mount in the pod.
The default is empty.
* **additional_secret_mount_path**
Path to mount the above Secret in the filesystem of the container(s). The default is empty.
Path to mount the above Secret in the filesystem of the container(s).
The default is empty.
* **enable_ebs_gp3_migration**
enable automatic migration on AWS from gp2 to gp3 volumes, that are smaller
than the configured max size (see below). This ignores that EBS gp3 is by
default only 125 MB/sec vs 250 MB/sec for gp2 >= 333GB.
The default is `false`.
* **enable_ebs_gp3_migration_max_size**
defines the maximum volume size in GB until which auto migration happens.
Default is 1000 (1TB) which matches 3000 IOPS.
## Logical backup

View File

@ -36,6 +36,8 @@ data:
# enable_admin_role_for_users: "true"
# enable_crd_validation: "true"
# enable_database_access: "true"
enable_ebs_gp3_migration: "false"
# enable_ebs_gp3_migration_max_size: 1000
# enable_init_containers: "true"
# enable_lazy_spilo_upgrade: "false"
enable_master_load_balancer: "false"

View File

@ -290,6 +290,10 @@ spec:
type: string
aws_region:
type: string
enable_ebs_gp3_migration:
type: boolean
enable_ebs_gp3_migration_max_size:
type: integer
gcp_credentials:
type: string
kube_iam_role:

View File

@ -103,6 +103,8 @@ configuration:
# additional_secret_mount: "some-secret-name"
# additional_secret_mount_path: "/some/dir"
aws_region: eu-central-1
enable_ebs_gp3_migration: false
# enable_ebs_gp3_migration_max_size: 1000
# gcp_credentials: ""
# kube_iam_role: ""
# log_s3_bucket: ""

View File

@ -1171,6 +1171,15 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
"aws_region": {
Type: "string",
},
"enable_ebs_gp3_migration": {
Type: "boolean",
},
"enable_ebs_gp3_migration_max_size": {
Type: "integer",
},
"gcp_credentials": {
Type: "string",
},
"kube_iam_role": {
Type: "string",
},

View File

@ -117,14 +117,16 @@ type LoadBalancerConfiguration struct {
// AWSGCPConfiguration defines the configuration for AWS
// TODO complete Google Cloud Platform (GCP) configuration
type AWSGCPConfiguration struct {
WALES3Bucket string `json:"wal_s3_bucket,omitempty"`
AWSRegion string `json:"aws_region,omitempty"`
WALGSBucket string `json:"wal_gs_bucket,omitempty"`
GCPCredentials string `json:"gcp_credentials,omitempty"`
LogS3Bucket string `json:"log_s3_bucket,omitempty"`
KubeIAMRole string `json:"kube_iam_role,omitempty"`
AdditionalSecretMount string `json:"additional_secret_mount,omitempty"`
AdditionalSecretMountPath string `json:"additional_secret_mount_path" default:"/meta/credentials"`
WALES3Bucket string `json:"wal_s3_bucket,omitempty"`
AWSRegion string `json:"aws_region,omitempty"`
WALGSBucket string `json:"wal_gs_bucket,omitempty"`
GCPCredentials string `json:"gcp_credentials,omitempty"`
LogS3Bucket string `json:"log_s3_bucket,omitempty"`
KubeIAMRole string `json:"kube_iam_role,omitempty"`
AdditionalSecretMount string `json:"additional_secret_mount,omitempty"`
AdditionalSecretMountPath string `json:"additional_secret_mount_path" default:"/meta/credentials"`
EnableEBSGp3Migration bool `json:"enable_ebs_gp3_migration" default:"false"`
EnableEBSGp3MigrationMaxSize int64 `json:"enable_ebs_gp3_migration_max_size" default:"1000"`
}
// OperatorDebugConfiguration defines options for the debug mode

View File

@ -524,7 +524,7 @@ func (in *PostgresPodResourcesDefaults) DeepCopy() *PostgresPodResourcesDefaults
func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
*out = *in
in.PostgresqlParam.DeepCopyInto(&out.PostgresqlParam)
out.Volume = in.Volume
in.Volume.DeepCopyInto(&out.Volume)
in.Patroni.DeepCopyInto(&out.Patroni)
out.Resources = in.Resources
if in.EnableConnectionPooler != nil {
@ -623,6 +623,11 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
(*out)[key] = *val.DeepCopy()
}
}
if in.SchedulerName != nil {
in, out := &in.SchedulerName, &out.SchedulerName
*out = new(string)
**out = **in
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]corev1.Toleration, len(*in))
@ -687,11 +692,6 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SchedulerName != nil {
in, out := &in.SchedulerName, &out.SchedulerName
*out = new(string)
**out = **in
}
return
}
@ -1160,6 +1160,16 @@ func (in UserFlags) DeepCopy() UserFlags {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Volume) DeepCopyInto(out *Volume) {
*out = *in
if in.Iops != nil {
in, out := &in.Iops, &out.Iops
*out = new(int64)
**out = **in
}
if in.Throughput != nil {
in, out := &in.Throughput, &out.Throughput
*out = new(int64)
**out = **in
}
return
}

View File

@ -39,6 +39,9 @@ func TestResizeVolumeClaim(t *testing.T) {
namespace := "default"
newVolumeSize := "2Gi"
storage1Gi, err := resource.ParseQuantity("1Gi")
assert.NoError(t, err)
// new cluster with pvc storage resize mode and configured labels
var cluster = New(
Config{
@ -57,55 +60,9 @@ func TestResizeVolumeClaim(t *testing.T) {
filterLabels := cluster.labelsSet(false)
// define and create PVCs for 1Gi volumes
storage1Gi, err := resource.ParseQuantity("1Gi")
assert.NoError(t, err)
pvcList := &v1.PersistentVolumeClaimList{
Items: []v1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: constants.DataVolumeName + "-" + clusterName + "-0",
Namespace: namespace,
Labels: filterLabels,
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: storage1Gi,
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: constants.DataVolumeName + "-" + clusterName + "-1",
Namespace: namespace,
Labels: filterLabels,
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: storage1Gi,
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: constants.DataVolumeName + "-" + clusterName + "-2-0",
Namespace: namespace,
Labels: labels.Set{},
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: storage1Gi,
},
},
},
},
},
}
pvcList := CreatePVCs(namespace, clusterName, filterLabels, 2, "1Gi")
// add another PVC with different cluster name
pvcList.Items = append(pvcList.Items, CreatePVCs(namespace, clusterName+"-2", labels.Set{}, 1, "1Gi").Items[0])
for _, pvc := range pvcList.Items {
cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{})
@ -178,12 +135,12 @@ func TestQuantityToGigabyte(t *testing.T) {
func CreatePVCs(namespace string, clusterName string, labels labels.Set, n int, size string) v1.PersistentVolumeClaimList {
// define and create PVCs for 1Gi volumes
storage1Gi, _ := resource.ParseQuantity("1Gi")
storage1Gi, _ := resource.ParseQuantity(size)
pvcList := v1.PersistentVolumeClaimList{
Items: []v1.PersistentVolumeClaim{},
}
for i := 0; i <= n; i++ {
for i := 0; i < n; i++ {
pvc := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s-%d", constants.DataVolumeName, clusterName, i),

View File

@ -138,6 +138,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.GCPCredentials = fromCRD.AWSGCP.GCPCredentials
result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount
result.AdditionalSecretMountPath = util.Coalesce(fromCRD.AWSGCP.AdditionalSecretMountPath, "/meta/credentials")
result.EnableEBSGp3Migration = fromCRD.AWSGCP.EnableEBSGp3Migration
result.EnableEBSGp3MigrationMaxSize = fromCRD.AWSGCP.EnableEBSGp3MigrationMaxSize
// logical backup config
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")

View File

@ -163,6 +163,8 @@ type Config struct {
GCPCredentials string `name:"gcp_credentials"`
AdditionalSecretMount string `name:"additional_secret_mount"`
AdditionalSecretMountPath string `name:"additional_secret_mount_path" default:"/meta/credentials"`
EnableEBSGp3Migration bool `name:"enable_ebs_gp3_migration" default:"false"`
EnableEBSGp3MigrationMaxSize int64 `name:"enable_ebs_gp3_migration_max_size" default:"1000"`
DebugLogging bool `name:"debug_logging" default:"true"`
EnableDBAccess bool `name:"enable_database_access" default:"true"`
EnableTeamsAPI bool `name:"enable_teams_api" default:"true"`
@ -198,8 +200,6 @@ type Config struct {
SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"`
EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"`
EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"false"`
EnableEBSGp3Migration bool `name:"enable_ebs_gp3_migration" default:"false"`
EnableEBSGp3MigrationMaxSize int64 `name:"enable_ebs_gp3_migration_max_size" default:"1000"`
}
// MustMarshal marshals the config or panics