From 629feac98f275f01e6cdbfa2ed350cb6ef1f4a20 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Mon, 16 Dec 2019 17:07:36 +0100 Subject: [PATCH 01/31] Remove bind verb and explain privileges (#765) Closes #256 --- manifests/operator-service-account-rbac.yaml | 28 +++++++++++++------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/manifests/operator-service-account-rbac.yaml b/manifests/operator-service-account-rbac.yaml index e95fe320b..a37abe476 100644 --- a/manifests/operator-service-account-rbac.yaml +++ b/manifests/operator-service-account-rbac.yaml @@ -10,6 +10,7 @@ kind: ClusterRole metadata: name: zalando-postgres-operator rules: +# all verbs allowed for custom operator resources - apiGroups: - acid.zalan.do resources: @@ -18,6 +19,7 @@ rules: - operatorconfigurations verbs: - "*" +# to create or get/update CRDs when starting up - apiGroups: - apiextensions.k8s.io resources: @@ -27,12 +29,14 @@ rules: - get - patch - update +# to read configuration from ConfigMaps - apiGroups: - "" resources: - configmaps verbs: - get +# to manage endpoints which are also used by Patroni - apiGroups: - "" resources: @@ -45,6 +49,7 @@ rules: - list - patch - watch # needed if zalando-postgres-operator account is used for pods as well +# to CRUD secrets for database access - apiGroups: - "" resources: @@ -54,6 +59,7 @@ rules: - update - delete - get +# to check nodes for node readiness label - apiGroups: - "" resources: @@ -62,6 +68,7 @@ rules: - get - list - watch +# to read or delete existing PVCs. Creation via StatefulSet - apiGroups: - "" resources: @@ -70,6 +77,7 @@ rules: - delete - get - list + # to read existing PVs. Creation should be done via dynamic provisioning - apiGroups: - "" resources: @@ -78,6 +86,7 @@ rules: - get - list - update # only for resizing AWS volumes +# to watch Spilo pods and do rolling updates. Creation via StatefulSet - apiGroups: - "" resources: @@ -88,12 +97,14 @@ rules: - list - watch - patch +# to resize the filesystem in Spilo pods when increasing volume size - apiGroups: - "" resources: - pods/exec verbs: - create +# to CRUD services to point to Postgres cluster instances - apiGroups: - "" resources: @@ -103,6 +114,7 @@ rules: - delete - get - patch +# to CRUD the StatefulSet which controls the Postgres cluster instances - apiGroups: - apps resources: @@ -113,12 +125,14 @@ rules: - get - list - patch +# to get namespaces operator resources can run in - apiGroups: - "" resources: - namespaces verbs: - get +# to define PDBs. Update happens via delete/create - apiGroups: - policy resources: @@ -127,6 +141,7 @@ rules: - create - delete - get +# to create ServiceAccounts in each namespace the operator watches - apiGroups: - "" resources: @@ -134,6 +149,7 @@ rules: verbs: - get - create +# to create role bindings to the operator service account - apiGroups: - "rbac.authorization.k8s.io" resources: @@ -141,18 +157,11 @@ rules: verbs: - get - create -- apiGroups: - - "rbac.authorization.k8s.io" - resources: - - clusterroles - verbs: - - bind - resourceNames: - - zalando-postgres-operator +# to CRUD cron jobs for logical backups - apiGroups: - batch resources: - - cronjobs # enables logical backups + - cronjobs verbs: - create - delete @@ -160,6 +169,7 @@ rules: - list - patch - update + --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding From 182e3bc7db0fd6c7b2617286c324167c7f0c158e Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Mon, 16 Dec 2019 17:08:09 +0100 Subject: [PATCH 02/31] add missing fields to OperatorConfiguration CRD validation (#767) --- .../crds/operatorconfigurations.yaml | 30 +++++++----- manifests/complete-postgres-manifest.yaml | 35 ++++++++------ manifests/operatorconfiguration.crd.yaml | 30 +++++++----- ...gresql-operator-default-configuration.yaml | 12 +++-- manifests/standby-manifest.yaml | 4 -- pkg/apis/acid.zalan.do/v1/crds.go | 46 ++++++++++++------- .../v1/operator_configuration_type.go | 32 +++++-------- .../acid.zalan.do/v1/zz_generated.deepcopy.go | 28 ----------- pkg/util/config/config.go | 2 +- 9 files changed, 109 insertions(+), 110 deletions(-) diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index d50a2b431..c97e246ab 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -121,6 +121,8 @@ spec: type: array items: type: string + master_pod_move_timeout: + type: string node_readiness_label: type: object additionalProperties: @@ -138,10 +140,16 @@ spec: enum: - "ordered_ready" - "parallel" + pod_priority_class_name: + type: string pod_role_label: type: string + pod_service_account_definition: + type: string pod_service_account_name: type: string + pod_service_account_role_binding_definition: + type: string pod_terminate_grace_period: type: string secret_name_template: @@ -189,16 +197,16 @@ spec: load_balancer: type: object properties: + custom_service_annotations: + type: object + additionalProperties: + type: string db_hosted_zone: type: string enable_master_load_balancer: type: boolean enable_replica_load_balancer: type: boolean - custom_service_annotations: - type: object - additionalProperties: - type: string master_dns_name_format: type: string replica_dns_name_format: @@ -221,21 +229,21 @@ spec: logical_backup: type: object properties: - logical_backup_schedule: - type: string - pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' logical_backup_docker_image: type: string + logical_backup_s3_access_key_id: + type: string logical_backup_s3_bucket: type: string logical_backup_s3_endpoint: type: string - logical_backup_s3_sse: - type: string - logical_backup_s3_access_key_id: - type: string logical_backup_s3_secret_access_key: type: string + logical_backup_s3_sse: + type: string + logical_backup_schedule: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' debug: type: object properties: diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index ba2315753..23dd40638 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -6,10 +6,6 @@ metadata: # environment: demo spec: dockerImage: registry.opensource.zalan.do/acid/spilo-11:1.6-p1 - initContainers: - - name: date - image: busybox - command: [ "/bin/date" ] teamId: "acid" volume: size: 1Gi @@ -25,18 +21,22 @@ spec: - 127.0.0.1/32 databases: foo: zalando -# podAnnotations: -# annotation.key: value -# Expert section - - enableShmVolume: true -# spiloFSGroup: 103 postgresql: version: "11" - parameters: + parameters: # Expert section shared_buffers: "32MB" max_connections: "10" log_statement: "all" + + enableShmVolume: true +# spiloFSGroup: 103 +# podAnnotations: +# annotation.key: value +# podPriorityClassName: "spilo-pod-priority" +# tolerations: +# - key: postgres +# operator: Exists +# effect: NoSchedule resources: requests: cpu: 10m @@ -63,6 +63,7 @@ spec: loop_wait: &loop_wait 10 retry_timeout: 10 maximum_lag_on_failover: 33554432 + # restore a Postgres DB with point-in-time-recovery # with a non-empty timestamp, clone from an S3 bucket using the latest backup before the timestamp # with an empty/absent timestamp, clone from an existing alive cluster using pg_basebackup @@ -75,9 +76,15 @@ spec: # run periodic backups with k8s cron jobs # enableLogicalBackup: true # logicalBackupSchedule: "30 00 * * *" - maintenanceWindows: - - 01:00-06:00 #UTC - - Sat:00:00-04:00 + +# maintenanceWindows: +# - 01:00-06:00 #UTC +# - Sat:00:00-04:00 + + initContainers: + - name: date + image: busybox + command: [ "/bin/date" ] # sidecars: # - name: "telegraf-sidecar" # image: "telegraf:latest" diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index bed892dc8..810624bc4 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -97,6 +97,8 @@ spec: type: array items: type: string + master_pod_move_timeout: + type: string node_readiness_label: type: object additionalProperties: @@ -114,10 +116,16 @@ spec: enum: - "ordered_ready" - "parallel" + pod_priority_class_name: + type: string pod_role_label: type: string + pod_service_account_definition: + type: string pod_service_account_name: type: string + pod_service_account_role_binding_definition: + type: string pod_terminate_grace_period: type: string secret_name_template: @@ -165,16 +173,16 @@ spec: load_balancer: type: object properties: + custom_service_annotations: + type: object + additionalProperties: + type: string db_hosted_zone: type: string enable_master_load_balancer: type: boolean enable_replica_load_balancer: type: boolean - custom_service_annotations: - type: object - additionalProperties: - type: string master_dns_name_format: type: string replica_dns_name_format: @@ -197,21 +205,21 @@ spec: logical_backup: type: object properties: - logical_backup_schedule: - type: string - pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' logical_backup_docker_image: type: string + logical_backup_s3_access_key_id: + type: string logical_backup_s3_bucket: type: string logical_backup_s3_endpoint: type: string - logical_backup_s3_sse: - type: string - logical_backup_s3_access_key_id: - type: string logical_backup_s3_secret_access_key: type: string + logical_backup_s3_sse: + type: string + logical_backup_schedule: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' debug: type: object properties: diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index 84e12b4ee..cdfe0f573 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -21,7 +21,7 @@ configuration: kubernetes: cluster_domain: cluster.local cluster_labels: - application: spilo + application: spilo cluster_name_label: cluster-name # custom_pod_annotations: # keya: valuea @@ -34,6 +34,7 @@ configuration: # inherited_labels: # - application # - environment + master_pod_move_timeout: 20m # node_readiness_label: # status: ready oauth_token_secret_name: postgresql-operator @@ -41,8 +42,11 @@ configuration: pod_antiaffinity_topology_key: "kubernetes.io/hostname" # pod_environment_configmap: "" pod_management_policy: "ordered_ready" + # pod_priority_class_name: "" pod_role_label: spilo-role + # pod_service_account_definition: "" pod_service_account_name: zalando-postgres-operator + # pod_service_account_role_binding_definition: "" pod_terminate_grace_period: 5m secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" # spilo_fsgroup: 103 @@ -79,10 +83,10 @@ configuration: # wal_s3_bucket: "" logical_backup: logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" - logical_backup_s3_access_key_id: "" + # logical_backup_s3_access_key_id: "" logical_backup_s3_bucket: "my-bucket-url" - logical_backup_s3_endpoint: "" - logical_backup_s3_secret_access_key: "" + # logical_backup_s3_endpoint: "" + # logical_backup_s3_secret_access_key: "" logical_backup_s3_sse: "AES256" logical_backup_schedule: "30 00 * * *" debug: diff --git a/manifests/standby-manifest.yaml b/manifests/standby-manifest.yaml index e5299bc9b..2b621bd10 100644 --- a/manifests/standby-manifest.yaml +++ b/manifests/standby-manifest.yaml @@ -13,7 +13,3 @@ spec: # Make this a standby cluster and provide the s3 bucket path of source cluster for continuous streaming. standby: s3_wal_path: "s3://path/to/bucket/containing/wal/of/source/cluster/" - - maintenanceWindows: - - 01:00-06:00 #UTC - - Sat:00:00-04:00 diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 75704afde..20fa37138 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -717,6 +717,9 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, }, }, + "master_pod_move_timeout": { + Type: "string", + }, "node_readiness_label": { Type: "object", AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ @@ -748,12 +751,21 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, }, }, + "pod_priority_class_name": { + Type: "string", + }, "pod_role_label": { Type: "string", }, + "pod_service_account_definition": { + Type: "string", + }, "pod_service_account_name": { Type: "string", }, + "pod_service_account_role_binding_definition": { + Type: "string", + }, "pod_terminate_grace_period": { Type: "string", }, @@ -826,6 +838,14 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "load_balancer": { Type: "object", Properties: map[string]apiextv1beta1.JSONSchemaProps{ + "custom_service_annotations": { + Type: "object", + AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ + Schema: &apiextv1beta1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "db_hosted_zone": { Type: "string", }, @@ -835,14 +855,6 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "enable_replica_load_balancer": { Type: "boolean", }, - "custom_service_annotations": { - Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ - Type: "string", - }, - }, - }, "master_dns_name_format": { Type: "string", }, @@ -877,27 +889,27 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "logical_backup": { Type: "object", Properties: map[string]apiextv1beta1.JSONSchemaProps{ - "logical_backup_schedule": { - Type: "string", - Pattern: "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$", - }, "logical_backup_docker_image": { Type: "string", }, + "logical_backup_s3_access_key_id": { + Type: "string", + }, "logical_backup_s3_bucket": { Type: "string", }, "logical_backup_s3_endpoint": { Type: "string", }, + "logical_backup_s3_secret_access_key": { + Type: "string", + }, "logical_backup_s3_sse": { Type: "string", }, - "logical_backup_s3_access_key_id": { - Type: "string", - }, - "logical_backup_s3_secret_access_key": { - Type: "string", + "logical_backup_schedule": { + Type: "string", + Pattern: "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$", }, }, }, diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index f76790ad5..948c7cbbf 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -118,7 +118,7 @@ type OperatorDebugConfiguration struct { EnableDBAccess bool `json:"enable_database_access,omitempty"` } -// TeamsAPIConfiguration defines the configration of TeamsAPI +// TeamsAPIConfiguration defines the configuration of TeamsAPI type TeamsAPIConfiguration struct { EnableTeamsAPI bool `json:"enable_teams_api,omitempty"` TeamsAPIUrl string `json:"teams_api_url,omitempty"` @@ -150,6 +150,17 @@ type ScalyrConfiguration struct { ScalyrMemoryLimit string `json:"scalyr_memory_limit,omitempty"` } +// OperatorLogicalBackupConfiguration defines configuration for logical backup +type OperatorLogicalBackupConfiguration struct { + Schedule string `json:"logical_backup_schedule,omitempty"` + DockerImage string `json:"logical_backup_docker_image,omitempty"` + S3Bucket string `json:"logical_backup_s3_bucket,omitempty"` + S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"` + S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"` + S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"` + S3SSE string `json:"logical_backup_s3_sse,omitempty"` +} + // OperatorConfigurationData defines the operation config type OperatorConfigurationData struct { EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"` @@ -176,24 +187,5 @@ type OperatorConfigurationData struct { LogicalBackup OperatorLogicalBackupConfiguration `json:"logical_backup"` } -// OperatorConfigurationUsers defines configration for super user -type OperatorConfigurationUsers struct { - SuperUserName string `json:"superuser_name,omitempty"` - Replication string `json:"replication_user_name,omitempty"` - ProtectedRoles []string `json:"protected_roles,omitempty"` - TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"` -} - //Duration shortens this frequently used name type Duration time.Duration - -// OperatorLogicalBackupConfiguration defines configration for logical backup -type OperatorLogicalBackupConfiguration struct { - Schedule string `json:"logical_backup_schedule,omitempty"` - DockerImage string `json:"logical_backup_docker_image,omitempty"` - S3Bucket string `json:"logical_backup_s3_bucket,omitempty"` - S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"` - S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"` - S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"` - S3SSE string `json:"logical_backup_s3_sse,omitempty"` -} diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index 16f5a9d67..b68a72d1f 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -300,34 +300,6 @@ func (in *OperatorConfigurationList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorConfigurationUsers) DeepCopyInto(out *OperatorConfigurationUsers) { - *out = *in - if in.ProtectedRoles != nil { - in, out := &in.ProtectedRoles, &out.ProtectedRoles - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.TeamAPIRoleConfiguration != nil { - in, out := &in.TeamAPIRoleConfiguration, &out.TeamAPIRoleConfiguration - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigurationUsers. -func (in *OperatorConfigurationUsers) DeepCopy() *OperatorConfigurationUsers { - if in == nil { - return nil - } - out := new(OperatorConfigurationUsers) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OperatorDebugConfiguration) DeepCopyInto(out *OperatorDebugConfiguration) { *out = *in diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index d46cba2b2..b2a135fad 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -69,7 +69,7 @@ type Scalyr struct { ScalyrMemoryLimit string `name:"scalyr_memory_limit" default:"1Gi"` } -// LogicalBackup defines configration for logical backup +// LogicalBackup defines configuration for logical backup type LogicalBackup struct { LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"` LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup"` From 7af1de890cbadb02e92d167492d9a02a45ab0a14 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Tue, 17 Dec 2019 17:13:56 +0100 Subject: [PATCH 03/31] bump operator v1.3.0 with Spilo 12 image (#770) --- charts/postgres-operator/Chart.yaml | 6 ++-- charts/postgres-operator/index.yaml | 27 ++++++++++++++++-- .../postgres-operator-1.3.0.tgz | Bin 0 -> 19063 bytes charts/postgres-operator/values-crd.yaml | 4 +-- charts/postgres-operator/values.yaml | 4 +-- manifests/complete-postgres-manifest.yaml | 2 +- manifests/configmap.yaml | 2 +- manifests/postgres-operator.yaml | 2 +- ...gresql-operator-default-configuration.yaml | 2 +- pkg/util/config/config.go | 2 +- 10 files changed, 35 insertions(+), 16 deletions(-) create mode 100644 charts/postgres-operator/postgres-operator-1.3.0.tgz diff --git a/charts/postgres-operator/Chart.yaml b/charts/postgres-operator/Chart.yaml index ae9bb855e..08e242a53 100644 --- a/charts/postgres-operator/Chart.yaml +++ b/charts/postgres-operator/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: postgres-operator -version: 1.2.0 -appVersion: 1.2.0 +version: 1.3.0 +appVersion: 1.3.0 home: https://github.com/zalando/postgres-operator description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes keywords: @@ -13,8 +13,6 @@ keywords: maintainers: - name: Zalando email: opensource@zalando.de -- name: kimxogus - email: kgyoo8232@gmail.com sources: - https://github.com/zalando/postgres-operator engine: gotpl diff --git a/charts/postgres-operator/index.yaml b/charts/postgres-operator/index.yaml index b549f1220..84502f6a6 100644 --- a/charts/postgres-operator/index.yaml +++ b/charts/postgres-operator/index.yaml @@ -1,13 +1,34 @@ apiVersion: v1 entries: postgres-operator: + - apiVersion: v1 + appVersion: 1.3.0 + created: "2019-12-17T12:58:49.477140129+01:00" + description: Postgres Operator creates and manages PostgreSQL clusters running + in Kubernetes + digest: 7e788fd37daec76a01f6d6f9fe5be5b54f5035e4eba0041e80a760d656537325 + home: https://github.com/zalando/postgres-operator + keywords: + - postgres + - operator + - cloud-native + - patroni + - spilo + maintainers: + - email: opensource@zalando.de + name: Zalando + name: postgres-operator + sources: + - https://github.com/zalando/postgres-operator + urls: + - postgres-operator-1.3.0.tgz + version: 1.3.0 - apiVersion: v1 appVersion: 1.2.0 - created: "2019-08-13T17:33:32.735021423+02:00" + created: "2019-12-17T12:58:49.475844233+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes digest: d10710c7cf19f4e266e7704f5d1e98dcfc61bee3919522326c35c22ca7d2f2bf - engine: gotpl home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -26,4 +47,4 @@ entries: urls: - postgres-operator-1.2.0.tgz version: 1.2.0 -generated: "2019-08-13T17:33:32.734335398+02:00" +generated: "2019-12-17T12:58:49.474719294+01:00" diff --git a/charts/postgres-operator/postgres-operator-1.3.0.tgz b/charts/postgres-operator/postgres-operator-1.3.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..460fed53286e21075290ed1d7cdcc1778100f95c GIT binary patch literal 19063 zcmV)XK&`(YiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH;nd=y3ZD4=p62ndL@A)$njyX107AV)JK0Rp6ugc|VJyPdmR zvbTHe?j<=A2wgfz?;usAO9zoIO`3=(y*DWWf(SzXzn$H^+iM{S$oGA}KdV2z?9RM- zGxO%ndvD&nXQf%r#6VU_TOosSG!qNKO{ok z$nhJ!SBFJt!i#~B*M&fSkB#LprdY@;x8>fs|B(l?5}hE!5;Ua+8EOS)wL1P(t2C+* z1rAvQLs&WVDoLakkSJ0MFfb6~APX=G2NsONOpujIrF3Wy45W?aAj1O2Mo|Q10tBT1 zpq)(*847ZcRhVfD)B-cdSy^pxu!-QzHoeL~TY~#wBu3$Mum^!E(xE+*W^h)kPy*?z zLJ1t-mB2vKHe5+z9FYO}LKw%;6roT8)=H4H!h#VLhY=KH5DaL+2vW<}r&!v?7~p4O z+bSF?0H81)!*0SWh47arQbJ7xuTY)Ia2@S8KL4KUKLaCekbRvD;3exnEId5iZT*L< zLqguJ|2OeqMk7HHoLvjD`zj1HWh6{dnQS7{+F;cx0KhU?9Rp2>LZMA*fE34Iq*5&w zje&Zbi7$uqKXfE*G7*$X3;a072K^KOKnl~7P={k2rpH*Q!wd$_cbI>LgPpce2gvm9erIoGBNEhQCB2V({T4@L`GX~3;Yb*NUYicnhB5zb}}46e(- zNCL+M_T`QiYqsbzXwt?nY3Dc2fa}aO%V~ih|F;Fp(h(HPVHCf)KPV?MKEB&n^X#+;e zzLS_9lB^bBRx3#ugnkNK&RK@%SwcZA5c!RB4EWjC8E6MX^cW;7jq5OqBb=1tXT(U> zy$DXQj16rQI=u}yLC(u?6F4+rE^SzGy?~yPW-J&dlldPcCi>kla6r`2#bFs ze&iryAt;Q4IunB#pz9p7(7-@W=m`fcRubc&7SzwSv5?_k_EkB$qcSiMhkQp!wmx5# zZ%Ailhzx>+CWy<2HxuIwW{4xWtQ9lxFazt0^wOTj@Rs~yFhB+^9X+C2i0k^$6x0G- zuYy^a#Y#ec3v$ka1rrE|#&Dfps_!|c3`7LwyPOCIr&!k+tnchB1Yy9)mY5;khL|BKywK^<8(m>}l~`RVRQH6(YDD(PQ?!o2 zMgHSiN^diy^XtoER|-FsHiqmL8CMeD)D3NfT?fNdsLM@1jO$iN;@baA}QyU3+ z_69;iKp_8*IE<3am_*~r&><%ATZR#{k(|z8wdqL0Lhwjx{Mojp=*6;x1=2PS zbpmldNJ7N-GBJYFal%)=S&LWzu@tXg5%&?k5EKWQ42;wQ4eR>FQAZOZd=UmwXD~xU zx@SF?uP*Hg=yzn1-0N6|2z7~3X#u~b;d-7WvNkJZWM3Szwt>OX7KKEAE#P^%LRbZS zhixR8Br@4JV*<@3F_486hZ+=?NdAjOz>H=Ie#vS9S~{L`GXNZB;#C$THd8bXQy>Wq z9L*p}#!BN+6h(8WcdYcPy|`KOe=!8j5S%@l#8@@~_0P{w!RRrAN7on!)$$ct$Yc-( z7-cZfHj48oEa?lBjU*KuP4egpD5Ujx^V0pxA5ZyT+P~P~gv8X?6cv}ny};S)h9tyTs7jFDg1&u$3G#4p7AQ&Jhgq#ES6PjRz*m;&4)}Q>q2kN<0Tt-m*YCe^ z5&aK8`K^DQ8Io4Wuqw{_N~VC9tp8Ar$NFy+8rtaX`hOEoc6M+e$RI3Qz(Nif2@-O4 zD{N-rhs9uqS`ZlAx37XPrHIY4ViXQIGemnFppEFa0ojWcVmT!*h7nXgt61qQ$BSz~ zB^aD}mH||q1mhE5GBE=(41q(C!|$w=0YqrfPr{O7Ga893;HPu~gA|T_3z$Zu9T{Mb zHjFlsw1eI@j3kT%#CdHQ^+BcR3x6J#^%7~ued zGLSa>CFG!L;23^am}oXYe(1|6l;;4V4T+~tM5Q9h3#E{YfoPIx5-Kc^ z!*Gnlv<|hHZ_YQf^2Y0Up(;RX4;Mw_(92M}^dI%QVBmOVlVCYWal)ybfy4-l8+YdV<0U%JgqGBNH6mn&BIll}C0$;Y$%E0}n*?;<|B1iJ_@pw4UYHvT(q|N{3iL zIK+AtVwADc>};fI{I4h)1(g3qqA8jh@Dll79Tw*1{}-wbYZUrc{(lqCYmxswMgNup zy}Z=`UWW!ilQ0++cn1puCk*eeb`Yt#3F1`eqq0`L@9>J^1H&*3QJ=e{90*}lV< z#w)kakEhD4)Bn$zyfkc98rO+jODA$Y-79fAzQc4^%tE?Nu;_FPasWZ$km5jSex;Gq zPyx%zPRj~bL2{@nU~#$QV*#s3F3kmQLFV~az&gkky5R6-&Wi;dCDBFAqqy=_HmQ91 zKZC!PdjK!t{~@6v5$^s!;p(^N|8L}RmH!P4E))NG(qb^m1VJ4u#+ida9lf1j)!Jr2 z)rue?t7=Wsdf>;?NpK*d6hD!I>qrG4Csandx>6}8*H@(6x6kW%@>~Bnw34*l0;yNL z4*0V3zpxMw|DQ%-Z_oeU$Rn5NFsqeyYV$FU*>{oa{@+SyMH6XRD>TTQtywKlqx&MF zuQI~Kf^i0Od)c_Yu8}QDCmq*U1k_|RkKoCis>N4@(;$HOAoq0EW)YQ47x6X#xq^T? zs#FmdRZEP8N&GJ2=cbhUH2^A*16VT_8Xm#gEY2nzem}3HQLo3O1wlaA-ekihjghN^ zx_7zHt&BtNYb=K;Yr=JLzD_bK3L$y#v_R%Q`zwC-?5am+rGHJ#;=B(h zzEL_?zuYrPHbUY(AV0o@*dg>vC0%v^eftJ``6yj=0Db#vJw9`o2{}ExG?FB{V&PH4 zdH+I=9HcT{ZM{nUk4{Wz71vr9AC;s_h>DMOdjztGUP`v04GUbY)Zfj*+;G!UH}<6#c=b8$!u z&QSY_dW2U9Z%xHTS43Qvqc`4~pMe(un{fZLfR^4cp;Ed_Z~_2m%*L5@9Gwm+o$T^^ zQMTPTX7ToBmlnGxK}V4c^4;=%-)Z&$$~o={7u@hIR&~8}Ur0;12m{Mxbu^EytQM>#9M})k+|NBNBSNp%)#USsVKn@F`n-j=e zU!Pas+uyjyiRb!P@k2$JD4Kz-1WuTGBf;h+|I5Vi-Km`&h(hg_})#OnER>%O7prA^jib>I>a5MuI z{vevRSZE4#icSGI!LSOIiQt0Ke+BdvDt#X&82wjzVKxQx|B?P=GpJxEAVJ1Kcb{2B zpo-13Dgsq{EL{<(;w)B0U_XUF=!7u@ZDT=POf0KVSsA()G;j(PfkP}BuaVbM9%7voD3xqCkByJ#R5Cl!75*EmEn8k{8Ynrd;@U+HJ ztTDaIg&hEdOR6a3762JC-2VDIXHg!+Bs`F^S>(m|CVaCTfipVT zbkV@OTW6hT;;z<2ci=9==d0U@4&~7hUB39l(^s5UueJ3wO+t+FR$Ugxwcu49QC$7vWUxt?h2By#i+|3&Q@=} zOFqIx`^3lFDwe_Vh$j6ifIMSP7ZsW#Gp#4l)%p=+`6ByQdW$?ND`8s z1$ZPSXC9q_#8|e7ljs`ey(rb)bv9>Io7V!a$lY=Wgt{r-zkO9CeFVQREGVyG@(T?F z1=Kmzm75NL-SLJES~w7B*r0{@ zDlIlCwRL>E9z;Q~d2K^c6nr>YsC>_s3r$)+g6S89qj?Y7Usy=q;`hG-jH09AJte>^ z!YK+mZk|itY875N6jUO|2KV2f7P)0xDeOq+my4ySI*j*>Ero{WBd<&0X~}z%m%_l2 zyRdnyQfNwYPQIiHSfnfth3cFSI65oFnF|lhnPaQ4Rnu*Hs3R~767~vRo1{&$bTWml zoym8;NVVOa3%yk<^wj2;oTmr_lW$_4SBF=`G0!K%&#Qy-%mwrsV6v}1`ciJ8SC(LM zx(7B02o@xl?rcN1R}Hem`nriPxRl<`v&|xi#qtb!Mdk7CiS!BuN9I}kTDm0Sgy9x~ z64Xs2Em)L`ZXe`v)FIO}59gJ@c_SYnPU`$-n=xcrJG*#&F6Y5(D0r>*XNXt+$#9A6r&KGWfbnkJeI=C*_jLc|Fn1F+vneYeE9#$(gnPF1hD-5e>Li` zFpv0;>bLiQ-^}yx`TxpE=zqfL*NHHC>Gbq_iY4?U6n%F^htKHtUL8L9F;eluX&f$VQSJblf1@J&ln(7>nZ-R@ za1MsbSN_8Q6PU-)VSabn`n&l;>y|yxeXSeqbWomN5(rnZKH58 z_k-6Kg-ug{~{;m<84ws~2HqHPQVDGso9${-3){Ny^x)NvHz(Ir$^C3RkOad$NQ zKj_=cPm=iOYj$aNV}amb^<$RDgYq##-bYD*>y1gszdLy3OFHOV)5J)U&V+ahB7|g& z^6ALx&7eZrND|YN&MedN$A_imB{nL#%94|&Rz~*dj_0Vm1za;s>d(P$z; zijbBJgsUPN2D9J1ZXrqyZob&cBM}v;3(Oxl$&B82Jo)@Vj zBO{dRP^CIDRUM(#L~6quDMK1-Lqfb(OAu2;5Uh|uS6Y&CWIQU`ywkaOm<((-0xt|5 zNarVS@D72q>yocM>zD@|m?48^Ktc@AnY<(gEH;)Cl4Tf3jDa{HC^X&ZbOoeCyCjVA zp!mO4uKh&EpK(RFjN=l~#d}@fS8?Kz?|?ikO<^G<3vL6i0wTq1=~T$uK<&l5P0ezZ^D{Qo|DNjZ(B_&DFqXFvW4>5V-z7cid++>CAfU&7GOD zB<<;B`7dnxF^X)E%fEW$A)mP@pY}D0Q;YJXh|O7SPNcR(xqRMQkUUo=@6UAXP7cPEfX(A zTX^!l`x2?r_`LBFsS3;ITX;U-B7A;Rhxq=a_Wdc;%dH!X!7Px=*PrL#WTlK3VXy$C z=}p2b$}NZds7tbCdijJ7wcS7D>$-?46k&*cBPd`Zn0>u{aads8pZxITAA{9aB(5$z z{vZY#2n$963gc`HMgk)(n8kqJ4lF@{W_XRqR|%g`ahh|1WV7=6D`#dPbil(26Tz`6 zUod>aMj@{V7U-b?v#~;gLRl!$=%kJg;sR!j1r~xN36!RrGkfZ@5>#>906ZZRJl9bH zq~T)*&W4df5OWq_3>1!eAlpRH?rBQI+H-YCU{N}4 z9#he1Au6*bA_G#Qd|CLJGIN}j)dmN$w>pJBy6`b!`H4zOkPf}A9HncR zPWO+{X!O=n{t}cASA~}{SP7FNRnE__$M+<*w+$nGf6XGi8Hl6|Wko?q7`lI@F9~#P z7Btv6UxE@}LOQDRStN;pM#$t_&`G%|&_GjozBWN`tK7!I*I^JWY(T!kSlYcG@8ru& z-5eDCl4%kGBZ-+joCO@a1izm#7)}^C05O9Z(3}}Eo<>}sXG%r1X)A%=ZpB!ZA-&PI z6141^6qS;a+&;zYSBlQRc0ywQ^~G4gdaoa&lVhV&V`EzSXgS}y35ogE7V0vzSDKbu zPEKqen{V4b>r1Wsgdj$R^ZG3=p-pUZT&jyDx)~koYnfjtjS5zov9dicRjA5}WL!{rqbuBzmu( zU);V|pfFMY|Lm*js@hlRW4e^Q4v3JH08|LaXWZi$^_m$^MCL%5UVRsje%xE!z4 zs!)~S*(2_@THqHNqK;ICG*+rLsp>{rjYg{pRW;IthlHxbLNyITLbV~{iM6+@MXVz- zx(Ja)MmRysXXO`=M4xOHFgA*Eu6KfxnH_~@2oq#EEx^?wjnpBAMh2rg(ijFqBO+j< zMj>Iwa9j-|8{-C}AwsQ(P!p++3=a*}ghm)N;fByq1J(!+HHPYq2pFVH1O>IgL~~X_ z7ckQnsC5`FOay1P=~V{W5{#}b;B>IJn{7zL$)p+Fc|S}nCI0DBRtXFwZNrrm^3|0p z2rg9OUr2>Uzmd0_+%-@Mpamm@VryTxKgMIWyj2qKEpJc za7|=mi9Y|+3-w=mJo$e<16jm2^5_5R@KALl_xr!$jo#+}dm~S+kso}Fe&jVM)(~rx z9Aan_AJN_(8LroK3c*vtiEb&85s95TMZ|X|dtnrAPwhZAYXeD3vD?iEw%UgAe>dGW zZ%Ut)`s_pPzPJNB_L>u9?{V(Q(Y6W6&(^PBw7u%Mn6TpQiIeE$sYsHwwSKAWS5>?@BG>0nor8Sccfg|8|QcQ zT=mbr3x8YhZMk@E$K4k@cD%Uo_k}Gxta*R;d{U##-O>YopM9pwqHWdAY+8Qx@bd{x zgDSRQErZAVPc2t`GQ2ux*Ex#*X!n_zZusQSXJt_X%FRv#Kh5k)_22(Va=qUs&Y1V} zZ@F8C1neJu^;G}E3qNKQE#E8F;+kPa-3@0-bWa#^#eZtxjJiA8FU%>SN!Ms@{k3@K z;Rn5glZiY{yQ!l(ZrihBN2IQMK#dt4jt;q$essu#>?-qOuJ-O6+VEQS((0v?K8~u= zzFvh&3);6iIQYu+o_|anjt_pcu*0S?PtNC@-a7UDV&iKM&COi-&86$dha?AQ{FRXx zv8T!~w(FDb3+DVB)@Z=9HD^nnEW57X(iKN$_&w*K^LDogzjGAy2L$$==Ejc}JpVGd`|!Vlu11Yp@+7G(tXg5`s0$Aix~{PWI^rD1DI@B6UPAD2sC-lM5rYu2NWz8ihXx~Rs;Bm9vT{$K zqcwJx1C#vYi;YuMo!qh2(VJy!tY0!Nv)F;=%S%k}txPRrpn@AuX`|aUf9AeL_8$hn zcPRJZrqw?jO0LJ zy?y68%h&<6hkkXW((`o(mXG_u_(Qv0#oGU%sx#p5+7TPo@ZOiI4lBZ^L_`_4B|WXa zy!egV$8(k%D>we#JnBr3vLy9Qm zU4E{4@#D$&vyPRZ9*jy4ik`UZ+-Q4)N_$Ixl>BQ}_5KaJ4C-_$q2|K_t=JoTb9O%1 zz4QJv&9F+J#3>#Qc&y529qMu6t?NFlAE+<2!_+%yX3hSCOVkQ6 z>`r~~Ns|Hoo!8F)VcUaYS!?@G&x+gs?f3+L)0x`%(^LIBJz8F@Q~eIJ+VvU@rgd-U z|HtGi12**1#&4{BZ2y8v+v`zkU^-o#71UD_Z^wGg8dzRXhzUagJQ$ZbZ z=2DA$gPYFO4Cu1G*u#?R(qE)D=0bMWYw>(CH)F__)L#!Bi}_^aCx4gUpfSen z92NON6rhyB`w zXeQNH|Fm!Jh5Kt#<31*)@0}Yt^6c&(F4cZ9{M{cv{-^VSjp?mFKHsnLk9Wu_%e2?W z<2Bej7aJYy+Uvp}S3ca49JFE0IK_$@`Qnyt zAb5DKT202ZoDouUOM~zZTDb4)g*EIq-+%hej6H_Eo4-z8JFLT87}()>$(avJt~)V3 zjOS0Q5_H@HV!*7nu>%d%(QK2it4}m1e2Q3*}E$ zXxZeaTiyFSn^bjN+l+O|>sM)OX4m^@xT5~hxnt*Qs^7Lh-Z-^)6aT>83iZH0 zN=}D*RfXnP)e91+$gfX4TQs6U_@k0*j*Y6;V%9%LD~&joot&b+{_TUsM+4_|>)qx1 zYEjqY7LU7?aB5YZCAU6VJ4;ckq@|N-erIEq_2)ZHUD4|P-IzU#zbiAfXL{@UEh3L^ zdN(O!-h(Sg)2x--e**1<{cdD(OxMrnzVmV3#*zU#y2P#l8@}9HHK*Z%?|R?9|LOOY zZEbV|yB$AO@AkP;)!%7x{OFl@^T4_GuYQ_-^@n}w4FmFyO{o<=@z$PE?Kbw@S$Ry4 zw0@13SAIA+t>&}g@ws2w2X<_z)c!d!t#pl&{v)dVv|vb$3*7QA+UG8Nr{RLDeGZSG zG^X5|fa1%#_F8$=Zsq!%37fwknC|Zk@3g(D$}> z_}6=jhHvUP`23@0s}%F9jy*hU{~*JN$9DCx?>e{M79F^u=HHWc&FFBnp5LQ=ZSSo- zSEA{qtwg!Q%j_dsZNHo`a`b9q-yrpz)ti#8tgbs^Pe`lt&A!Q5vSGxY;O9;Dj=C{2 zr_)dI!v{9}qi%!Vx1x(zcv=Sz>b$8J=KEbLEvS8S zGTgtj>-7b7(v;)7TD}`_x6HtRMx7RZwX*y#V6*BS?(pB2jt^V(^!noKIXlkKow}?$ zrw%x^cI2qwN8|qttJ*(ddFNIX_G{|a$A<@Ppvxz8*-ktut2?^vuWl!n-IPG3$r z+OtN>*u0fHz8(ozv~Ktr{CxGoVYRPa&KXg+^nuRQrtqfIo;0wH+S6+EJG0qlorX5~ zVBFVBmQ-Ig{lnNfyJ9-{H;i0J_8xz5pZ)QTk0+R}EcgGq%;KRf8@As(B5_2cuaE@ z+daqs#&@55_X#(-WkUCE$}FN;rEiIPqv{SBvvKsp{ew^XmwAzXqv3?Ajm-lRhCOR~ z;Zsm0p(TM&S}}ch`IuG}#}93|;8s~~afAMGRU4nZH!3ZxW9Pwbht(*X8(Dne#Ye}6 zFTQqh$lr5yt*6buj8!_+djOTSTUF+lsjaI1JnGioVZFa$h>@yFD^e3e%yKFj#>;=@<`mJ=2L-7NiGfyg)nKQ3Nuj(n!nJ4R?9k55VIbUzPwn?(N zL&H+rzFD0);`HI}krQHfkDn5xJ<_cBn468B^eh`(-|t;6G@{t;3cr-@adB0^sD3+& zy;nSIVBV&`TOYf+^W*T6zh@PTO&@mCpAMVb*$8F~?LFHV6jyV@>2#HjqdwpE$D=JP z`Yn#n-J4mn+M)Q_Ni(@kbHe8>9=YP#*+aIL{@auQ4us&DTMT|KHs zrOHtkl^32R&(tp&w`a+?-2<()$CogYhqwMQ{qW2a+7s=&)nZ1awry3v?X(Fe;i@C* zuR6b9sZHwCkN2#&Ik;`yBF(KC~w z7^=40^~3a@>;7qQCFaDT)6HY^GM=SYYw==B%34*;C&jM}95_DiX>_gl>gz7Vez0ML z--TblZCr2hwpLh+WkX(Y_bV!f>ATLXy+-Z7ypvyi@!toI?f2Ip`u6g%Rhr!0GIhAQ z)0p{J0SIsbd*mjO#SH^^GADV?M)&UF`et=cRhv-rj#H?R|)DsvRr$ zR;s7()@aqOgmUF=_L=DyG#59o+C8AgPyUm;Dw4-^8UMui<^Hsk#UH6kcA2ikR#e=3 zV1B>cormpHn%lb8>(l(7W15FwpX`n+{+t@?zisM|{_3b_%Z9C(^?lQ(-&DVQB(dA8&~_(qzn4K zOI9ts_bIe1%f8pVU--agist1XKJ9tl7(TE@7jJ)5xp$jQE^ zp5-a9o=@+!$jPz&vS!Zh8mBiBdH3$FIlJ(A?vcmuZ#n%ux7_`l)R60^mbVB!eQu^g zV?1M2*K83x$2j=Zr$gG}6>nr5JTa@5D)_6+0~HUKxwkd=+|Cn~+HbNYq%;~(_Taal zr_`xkVp;be4r`Tl`dqmhU2eq+ZQ5{dt-q@0TzCZtA@uuywTqeeP~AHKNRo=eHx~81|RjGkNU!V^b%UZ+d$E9KU7<>sP#A zN_$rE$rSbTjHxvvHB%>9Hr{z}+tr|`XH8qxANurY>3$c>YA0>@^}WY?w@+jSp?V zpSiL6eD!^0LRy|PYS4@d|TH1#Hx$D~Hv|Vc! z3?KB<(fXBEjNY)JRmky{r%G`zdTq`PpHK8XfdtCw&UMc?N()0?w!x--#*i8`wR2%XJa3AU!2$XpO~eC&);l(^uwpQxw-Vs ze~#SknVJ3K9ya^n!Oi`S8Y(}^`RL}Yz^`kSU62^GL9=j=Nzvxd!@6kerA zT22}BdH7g(B4dv&z1q|tADr)Nn0Dyw@AGm;Cw<&+I@|2=Ty3JZ;q)UV^6*VD*Ft7j zJk@36S=F9)$tCr%rN>;paeZRi7r|?ejp}!`>!@03pB&vW_v+&jchyyir|R)O~~u4(mt1#EJ5%O>>*^_+)O_cp%&&x2MQ?qpUOIxyr!WcdxVe%W3! zar~jDmpANMac6Y>yUBYa>P<;i>!NP8%3E@B#(-$kg!O4%Hjp*zwqMZhk10okW;~tu z9<}rAmtRcYgn#>o$u{cADC6^s!KSOXUKmGjfq#GaH2KTr`nex|ar(|L=LTob&{+1A zIP&9&_x@?lu{Bb9oLrv9En46E$+i8rBetxp)5jL@!>r-IeRBWwxHE|-!^>PfZ7$w@ zX~)iM2WR}+WnY(lH&2p_jE|JzgGQe`kTUK_-@jX~JJ*uCJ*{8Wy34Pu>_vX_C~M8y zN0(GTc9}3}%9Jbha-u)qIrQsx}^;?%0VFP=R+GUayO#T_qxd67Hr`scUv*5Azg z>HfiAif=4e?z6i8OiXM=oxGWz+0uN+TB4Yx_3bgc8*MuMbT~Dv__;IVKf2vNb?uO^ z)}NfTeo5A*sLHg>5BIAxc2JFP7oV|jN)7Wbrq|UcWb3jsO^Z*hUTACB`TRib z#!fG)ocp;(@Ssg`FIv>P8@w{FX|>yyt9j8)oBwe*x1sH`=ihw!G~WN9;;RbJV!u4` z(+lmiZX*+NFSq<2q%FF1%Q~)N+QnIQYGibI-l*M#h3`Dxuw(l@fd9B~$l2f99&c8; z+T`WSCtEsw)fn4|-G3Un>cgcUe^D`l`fB~0oLyROqLF>FZbJPT$@}juqbl7y7aP~3 zVP20vSJe1^%fjXxiRZa_f3KGNBk59fEG@(P~@>6$STWL-R zu87?p+pgBR%@^MPZ1R=qb>}af^lmZ#D`AOKsVI$R;hksS>{sE(i*1hyyzC$F;q|Gi zDxWrL*|->WXa`+w_DOOWwV_&0vwGdPe!u$a-uExEBoY!M|nH$K6skfGi%j}ymI%$Vrk4zf^1otLN55HON>5TF{elw2nuRJ*J02!8@Hvjj&9UiyScKv+Vm87Mk zo9;;arE;+iBSHV%8xbE(SloQy&sMtDu}&dR+TEMKci~^-t0wH-(xhF~lEa1@JMQ%~ zl7C&8w*UAy!bS$-2>aY^BYnZ{rT+kpRr%2n-dN{!{-NM z3r@_+AirO~?FGAK>GH#sD~{~9VPLJy56V=W|Mji5*1r~|9eVNf@a|8Cj4+((vNLno z=Z{9PXRWpqy8Ej)##ddjqSf*vljCn5jh_GIgj2Qy%RfEQiL0e|tdR%^+ja_H{nVo1n-8rzG2qyE?2AF`&TPwx5gPSBEs% z(3N=b!ITY|M?0=N+2H=8zecX_f9gTtkB6pwn{(n?$;=JiPd^Cxbx5g6Wfyl?U-Pr< zg-c`R_Iv-R?UQBQpFK#@XJTiDj_g_Tw*%=EwR&U6ubNe2Mns!ezQ3C4aHx1_`F530 zCcPN-eQ?KT4`-A*c{A_YFTVtjedpZVy!Vehp8P29!f!h|<+a#%{7l{kF2wpMVS8xH zjcZGhTTb4)yZQN!%$e)1=a!4ByC|9HmuNW?mRD}*`^*2)4Z8Dp-uw0&r`jJ}x8>r` zA8y&QH*4m|!H+3pkk*uvFnmMv!zHdx-9K^tdq0k?z+&v zs(9w@wS>SM=t+W@@Cm>jkoqR+4}6Lr_Igjwxe?A zoU;oCs@5H>H7EIxJs0*|SX$@f>SfH}#Ku zo4MS(%X0lM*JAeXs`Jn3ceX9*{K;q4?A^u=>b9^#qhYz3%dSFl2)KFUyIM;h;4H|fBKx2pJ^IjM?HZ1?4~%EfeBYuZ1}X!}v0 zj@)(aCwImj-_mU#5&hXx|Ac!hMm9cM?3_5)V)OpfhhhH@AJ6k& z;R=<~z~HaT2`K;b-_WoyO{m-XZ+Jvl=-cz(H}N>4_%qN%upDF8I)15~?jiEH$qcm8 zEWy!yQOrQ#UVSq-%%lYwYLx~#16ge(nIz(-9@jf>AaEKy*5iqNO>~WAc?4Xw(+;K9T4z z6D%a05B3)kBPf#+z2bW$#8PCHG^NCpRvK6Md*I3stiK~Xt;L=!4sAPV7fK{x7N}6T zg?|Crefd}Ig&^7dUkc%hQ!2)oAg2X>l1o&y{N_`1ats^xb1(^hA28mL9+D?wkjS5E= z9s`5xWC3j@zjuGYuQ?82lpRno(;55DO5+GBZR3RdOTvHvngXoZ;=Ft-CG9p_bfW*P z6Ex%uINxDP41Os1a0(HN1qUe=h_zTbJFu~k1??KMq8Am2iB3`Rz&Qhsa{Fn4pC1C5 zVkJmg45=qfu|kTlCuYIOcg+N&Rf7Dnlm^_YREKKSstBc39ib3!w2C)35w%M(wRjSC z4{xmkN;PN(C|VdD1&XGnb5zj+rCI<Cz;f0T}~r zu@D?jxdOF8_iU&-juvx|03$}SP$ABQl?NnVlM|Q+;9O^(6(9;lT(ZmwUjUSXWBQ4| z1PT%V;4qV)(3o?67}~}Wlo-xa;-P@GVwn_b-pbGh$g+;{;YZFO1h6Lg)sRUu=_qJn z7{B_Rt6OxZ)br%;n09pSMO^nCr7JN04(~Zfj7t6oj#iD#uii0tge*$3?7L2?t(ETL zxdL~%$fL34`=*BumaVsoVcSs1Y%64B&>X&yBIZtqP62%}=PigyNC7x);pu`l@@rcn zbC8J~${nLi$h}0Rj7CCDtz&7Bv;kf3@|Ruy5mOfcjAaQE1##iNJan`p`0TSXsBXCq zq_NUEJT(0Um@Xh?7v)8W@^O+AimWA+W>ZLPF1=Io@7%i}%c8)yh2f}4h_0*4d9kqO zgygG2_Ij5PW&VQGDy9{j_c7-yz4%Hmkh;LGS+_8ie4C7o0+>O-L=TNL14U*_u)-dJ zE_8|P-Xln-gXeR!$;3Cn?;jY&5kShZ&+i=kU^&R&D&!gvA-bpJm$-o(W>QQ`9#a-_ z{Qu$!Dk=jb_(29CfM$T&6&g;kj19>%I=u}?o3(Ri5{E(JF9y${AZg0Pf0jl;z@Ft7 zi!dM3&L`dEbwWogN{R`KWDBVQe*F`ak-=Dwu^BiU0|7(xhzJZ0qi|rwSQa@SD};;V z^1>*O8mp7#ZkH#wP`VaYqQb(olLU(b6o@MZ1t8`*xE@NknRpJv&=!{kWWgxHD5NFu zcb1c_B7fwBF2C!;zkyT+!O#@q`NI4O0sRmQV{Itjwb4cb3try2u7;il0>m;zCAfVNz)v>i9qT{eczNwz$ruBmzR|0?+{eHZjV^nL`0br$Y+u zEtr4+{|)7+15rtF2o{amII|8F^f~tq9waeg$2KNq_ZQ@@n3f=&Q8idO;%NH+hC%Cme01N!nyb5s~?}JXnB2h;k@j<~pes z3V;s$Ty+6n^y4w{^B7V=+K$Jpg6JzFIgo)dgs{5#L8YxW)Onf`xvIoWc}!&0#O#C? z%!*`9fs#ZONQkZ=U35T=Sdj{aL=+C=AWzOLuLYruD6V21D|XviTW6INFq%PRhUm)f z=VJr%;F=(SCu1=OHybQPv1c2IEun0S;^rgBy5eO=e>XT7!;=SNAtr4!0fbN2&3$ZyiFeGP)IvQ}e`+M0wWz?;cKxu(rwj_S^RXHe7 zWndrA@}*A6;u`$1V~r;B=#>b zeT)SZO(_|g<|NUtfk+tyN$ZTPn9oQZq7f)2(+oK?8aQd+aO^A+tKp#&yzES55F|7~ z99RV2Cct#Qm+ycMVJTA#H!rL&aV{mXMBJmq2sDzu&5Na|7Or!=(*nOhKWSqS*bhdM z&N?hFzDlYpbOr}<$h~~~p1ZkFLwc7)-Qb=h_^&5#0_7{N)(wTJt2JN+yC?V{|B4qT353nfnRt83T)lD&uX9^ta)}A9%P;_#Pt0seVA`G_+jY+Vz zK*M?nCPLU2q!`NPi-#zsIdTQ&z+4l;x?;}J2*3P$z|lf{DM2XW=ag5G91(I9&M>Ju zRL(c@UZ>ZJjCuyNUt$ggUETx;fGU#;q;PByFBLTKY|&+IT=;<=nz0OmMk+H&V>r-@ z5g;T&bk7;!kJLew;)2Ieq6+>c+SO4XTp;mkhoeDUUW;EidSn%XxQ+*@1-M=%D)r(8 zIDZet%4#L;g7ScNO^+6lWX_60y^Fdf*6^WnWr_M#A8%Tii}oV5%-OBbt*>bNMslIB z+dG9}N#1exT~s?nIhco|(>vmCcAgXf;pc&9Q zI!PSIs7yAIX>G6yBN*j0I|(NqAOOo``5GpJ70ATt20XqN+D36wha{!RX}OXVQB+I| z{P-CuM_c%P zTaw6m5;EH?7$xT7Ld5K_ok_Bfq#W_@9oq?CN{8VVfab|w+ z5Raw;94A-&CXeiv0u#4l7M;rhxRYf$EgbwrU^tEp60!ji=Rn%*1-lM1=kjI6G(Tt? z=Cw?bzlwHJF?5A6QSC)rxIibFX2M{0j5!7Qy}}G-(}JjjpoF+?UXmO;&(xd~A&jDA zA@}?)TID57#GZ*pc`?WmodKClhTu3z@w0_da!Y%VFfzQr=y{r;K%u5MXy6qj$0&$- ztPy>p)=|f#!#{x-3q^u%D@ZE?EjM=<36oq9lNZqH{0oh6j$h#5@I=e&L9*1$r znyTd~1vSf4r3a-;Oh8)nh`4n&h7=r7w86nfEFCJ*CnaVjRH9Wz#by{(uE&oGe~@Cp zNIOzad2PWYHwkkzO$w7JMrskf8c>X2@sLR5L;?^|C`*cS7|fB?P-sA^fL0%Rh0vCm zv!GJT&cX{&U)mzY+74Tso2kkJfLL8_ic|YJKrr6SUHQTAw86#8Y;XkxK=g(KS=qsI(d(c?tQ( Date: Thu, 2 Jan 2020 13:41:58 +0100 Subject: [PATCH 04/31] update operator release image (#777) --- charts/postgres-operator/values-crd.yaml | 2 +- charts/postgres-operator/values.yaml | 2 +- manifests/postgres-operator.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index 594533747..bcf37f693 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -1,7 +1,7 @@ image: registry: registry.opensource.zalan.do repository: acid/postgres-operator - tag: v1.3.0 + tag: v1.3.0-1-gf2695c7f pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index ab0785a0c..237c58008 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -1,7 +1,7 @@ image: registry: registry.opensource.zalan.do repository: acid/postgres-operator - tag: v1.3.0 + tag: v1.3.0-1-gf2695c7f pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index 498796b61..1f61fe6d4 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: zalando-postgres-operator containers: - name: postgres-operator - image: registry.opensource.zalan.do/acid/postgres-operator:v1.3.0 + image: registry.opensource.zalan.do/acid/postgres-operator:v1.3.0-1-gf2695c7f imagePullPolicy: IfNotPresent resources: requests: From 9d7604ecf0f4cd43ec7bb18d40cc3411849a9d38 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Thu, 2 Jan 2020 14:06:23 +0100 Subject: [PATCH 05/31] use v1.3.0-dirty tag (#778) --- charts/postgres-operator/values-crd.yaml | 2 +- charts/postgres-operator/values.yaml | 2 +- manifests/postgres-operator.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index bcf37f693..4ba6266b2 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -1,7 +1,7 @@ image: registry: registry.opensource.zalan.do repository: acid/postgres-operator - tag: v1.3.0-1-gf2695c7f + tag: v1.3.0-dirty pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 237c58008..b32405690 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -1,7 +1,7 @@ image: registry: registry.opensource.zalan.do repository: acid/postgres-operator - tag: v1.3.0-1-gf2695c7f + tag: v1.3.0-dirty pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index 1f61fe6d4..5ebfa1e8f 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: zalando-postgres-operator containers: - name: postgres-operator - image: registry.opensource.zalan.do/acid/postgres-operator:v1.3.0-1-gf2695c7f + image: registry.opensource.zalan.do/acid/postgres-operator:v1.3.0-dirty imagePullPolicy: IfNotPresent resources: requests: From b54458ee3c49dee39d6e365402e455d0717d6faf Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Thu, 2 Jan 2020 17:34:49 +0100 Subject: [PATCH 06/31] update copyright in generated code (#779) * update year in copyright of generated code and LICENSE file --- LICENSE | 2 +- pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go | 2 +- pkg/generated/clientset/versioned/clientset.go | 2 +- pkg/generated/clientset/versioned/doc.go | 2 +- pkg/generated/clientset/versioned/fake/clientset_generated.go | 2 +- pkg/generated/clientset/versioned/fake/doc.go | 2 +- pkg/generated/clientset/versioned/fake/register.go | 2 +- pkg/generated/clientset/versioned/scheme/doc.go | 2 +- pkg/generated/clientset/versioned/scheme/register.go | 2 +- .../versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go | 2 +- pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go | 2 +- .../clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go | 2 +- .../typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go | 2 +- .../typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go | 2 +- .../versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go | 2 +- .../versioned/typed/acid.zalan.do/v1/generated_expansion.go | 2 +- .../versioned/typed/acid.zalan.do/v1/operatorconfiguration.go | 2 +- .../clientset/versioned/typed/acid.zalan.do/v1/postgresql.go | 2 +- .../informers/externalversions/acid.zalan.do/interface.go | 2 +- .../informers/externalversions/acid.zalan.do/v1/interface.go | 2 +- .../informers/externalversions/acid.zalan.do/v1/postgresql.go | 2 +- pkg/generated/informers/externalversions/factory.go | 2 +- pkg/generated/informers/externalversions/generic.go | 2 +- .../externalversions/internalinterfaces/factory_interfaces.go | 2 +- pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go | 2 +- pkg/generated/listers/acid.zalan.do/v1/postgresql.go | 2 +- 26 files changed, 26 insertions(+), 26 deletions(-) diff --git a/LICENSE b/LICENSE index 72dcd53e1..da62089ec 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2019 Zalando SE +Copyright (c) 2020 Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index b68a72d1f..dc07aa2cf 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/clientset.go b/pkg/generated/clientset/versioned/clientset.go index 45c1893a8..cb72ec50f 100644 --- a/pkg/generated/clientset/versioned/clientset.go +++ b/pkg/generated/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/doc.go b/pkg/generated/clientset/versioned/doc.go index d514b90a4..9ec677ac7 100644 --- a/pkg/generated/clientset/versioned/doc.go +++ b/pkg/generated/clientset/versioned/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/fake/clientset_generated.go b/pkg/generated/clientset/versioned/fake/clientset_generated.go index fd63d0ba4..55771905f 100644 --- a/pkg/generated/clientset/versioned/fake/clientset_generated.go +++ b/pkg/generated/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/fake/doc.go b/pkg/generated/clientset/versioned/fake/doc.go index 960df4951..7c9574952 100644 --- a/pkg/generated/clientset/versioned/fake/doc.go +++ b/pkg/generated/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/fake/register.go b/pkg/generated/clientset/versioned/fake/register.go index 2883940ca..5363e8cc4 100644 --- a/pkg/generated/clientset/versioned/fake/register.go +++ b/pkg/generated/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/scheme/doc.go b/pkg/generated/clientset/versioned/scheme/doc.go index ea0df2783..02fd3d592 100644 --- a/pkg/generated/clientset/versioned/scheme/doc.go +++ b/pkg/generated/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/scheme/register.go b/pkg/generated/clientset/versioned/scheme/register.go index acdd09c25..381948a4a 100644 --- a/pkg/generated/clientset/versioned/scheme/register.go +++ b/pkg/generated/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go index 388ec6178..1879b9514 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go index 8bff3bf2d..55338c4de 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go index c9373b6d8..1ae436a9b 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go index 31f2ec817..8cd4dc9da 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go index 8023d3b07..732b48250 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go index 8cc50e598..1ab20dbfc 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go index e1d824486..fd5707c75 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go index 43a915ca3..e9cc0de77 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go index 88645dab6..78c0fc390 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/interface.go b/pkg/generated/informers/externalversions/acid.zalan.do/interface.go index edf03dce7..4ff4a3d06 100644 --- a/pkg/generated/informers/externalversions/acid.zalan.do/interface.go +++ b/pkg/generated/informers/externalversions/acid.zalan.do/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go b/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go index 77e7c9b34..30090afee 100644 --- a/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go +++ b/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go index 16a055f50..da7f91669 100644 --- a/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go +++ b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/factory.go b/pkg/generated/informers/externalversions/factory.go index c36d22a29..4e6b36614 100644 --- a/pkg/generated/informers/externalversions/factory.go +++ b/pkg/generated/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go index 3af7bb7ec..562dec419 100644 --- a/pkg/generated/informers/externalversions/generic.go +++ b/pkg/generated/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go index 712305087..9f4e14a1a 100644 --- a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go b/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go index 4c353bec3..1b96a7c76 100644 --- a/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go +++ b/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/listers/acid.zalan.do/v1/postgresql.go b/pkg/generated/listers/acid.zalan.do/v1/postgresql.go index da41e1358..9a60c8281 100644 --- a/pkg/generated/listers/acid.zalan.do/v1/postgresql.go +++ b/pkg/generated/listers/acid.zalan.do/v1/postgresql.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Compose, Zalando SE +Copyright 2020 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal From 077f9af4e399d7d50177c0c5074672911d88fc1c Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Mon, 6 Jan 2020 14:08:47 +0100 Subject: [PATCH 07/31] bump to v1.3.1 (#780) --- charts/postgres-operator/values-crd.yaml | 2 +- charts/postgres-operator/values.yaml | 2 +- manifests/postgres-operator.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index 4ba6266b2..5b67258fa 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -1,7 +1,7 @@ image: registry: registry.opensource.zalan.do repository: acid/postgres-operator - tag: v1.3.0-dirty + tag: v1.3.1 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index b32405690..60190f49a 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -1,7 +1,7 @@ image: registry: registry.opensource.zalan.do repository: acid/postgres-operator - tag: v1.3.0-dirty + tag: v1.3.1 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index 5ebfa1e8f..fa8682809 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: zalando-postgres-operator containers: - name: postgres-operator - image: registry.opensource.zalan.do/acid/postgres-operator:v1.3.0-dirty + image: registry.opensource.zalan.do/acid/postgres-operator:v1.3.1 imagePullPolicy: IfNotPresent resources: requests: From e6ce00050f84dd2d894da1e7718e54e80f9945f4 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Mon, 13 Jan 2020 18:23:04 +0100 Subject: [PATCH 08/31] reduce tracing of dumps and include Pg12 (#791) --- docker/logical-backup/Dockerfile | 3 ++- docker/logical-backup/dump.sh | 13 ++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/docker/logical-backup/Dockerfile b/docker/logical-backup/Dockerfile index 1da6f7386..94c524381 100644 --- a/docker/logical-backup/Dockerfile +++ b/docker/logical-backup/Dockerfile @@ -19,6 +19,7 @@ RUN apt-get update \ && curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ && apt-get update \ && apt-get install --no-install-recommends -y \ + postgresql-client-12 \ postgresql-client-11 \ postgresql-client-10 \ postgresql-client-9.6 \ @@ -28,6 +29,6 @@ RUN apt-get update \ COPY dump.sh ./ -ENV PG_DIR=/usr/lib/postgresql/ +ENV PG_DIR=/usr/lib/postgresql ENTRYPOINT ["/dump.sh"] diff --git a/docker/logical-backup/dump.sh b/docker/logical-backup/dump.sh index 78217322b..673f09038 100755 --- a/docker/logical-backup/dump.sh +++ b/docker/logical-backup/dump.sh @@ -6,12 +6,10 @@ set -o nounset set -o pipefail IFS=$'\n\t' -# make script trace visible via `kubectl logs` -set -o xtrace - ALL_DB_SIZE_QUERY="select sum(pg_database_size(datname)::numeric) from pg_database;" PG_BIN=$PG_DIR/$PG_VERSION/bin DUMP_SIZE_COEFF=5 +ERRORCOUNT=0 TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) K8S_API_URL=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/api/v1 @@ -42,9 +40,9 @@ function aws_upload { [[ ! -z "$EXPECTED_SIZE" ]] && args+=("--expected-size=$EXPECTED_SIZE") [[ ! -z "$LOGICAL_BACKUP_S3_ENDPOINT" ]] && args+=("--endpoint-url=$LOGICAL_BACKUP_S3_ENDPOINT") - [[ ! "$LOGICAL_BACKUP_S3_SSE" == "" ]] && args+=("--sse=$LOGICAL_BACKUP_S3_SSE") + [[ ! -z "$LOGICAL_BACKUP_S3_SSE" ]] && args+=("--sse=$LOGICAL_BACKUP_S3_SSE") - aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}" --debug + aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}" } function get_pods { @@ -93,4 +91,9 @@ for search in "${search_strategy[@]}"; do done +set -x dump | compress | aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF)) +[[ ${PIPESTATUS[0]} != 0 || ${PIPESTATUS[1]} != 0 || ${PIPESTATUS[2]} != 0 ]] && (( ERRORCOUNT += 1 )) +set +x + +exit $ERRORCOUNT From 7fb163252c0f7477d1d432ceaabbc075c619236c Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Thu, 16 Jan 2020 10:47:34 +0100 Subject: [PATCH 09/31] standby clusters can only have 1 pod for now (#797) --- pkg/cluster/k8sres.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index c69c7a076..aed0c6e83 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1051,6 +1051,7 @@ func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 { /* Limit the max number of pods to one, if this is standby-cluster */ if spec.StandbyCluster != nil { c.logger.Info("Standby cluster can have maximum of 1 pod") + min = 1 max = 1 } if max >= 0 && newcur > max { From fddaf0fb73731a6234b223596a4cb87eb33e5299 Mon Sep 17 00:00:00 2001 From: Jonathan Juares Beber Date: Mon, 27 Jan 2020 14:43:32 +0100 Subject: [PATCH 10/31] Change error computation on JSON Unmarshal and create subtests on table test scenarios (#801) * Change error computation on JSON Unmarshall The [Unmarshall function][1] on the encoding/JSON default library returns different errors for different go versions. On Go 1.12, the version used currently on the CI system it returns `json: cannot unmarshal number into Go struct field PostgresSpec.teamId of type string`. On Go 1.13.5 it returns `json: cannot unmarshal number into Go struct field PostgresSpec.spec.teamId of type string`. The new version includes more details of the whole structure being unmarshelled. This commit introduces the same error but one level deeper on the JSON structure. It creates consistency across different Go versions. [1]: https://godoc.org/encoding/json#Unmarshal * Create subtests on table test scenarios The Run method of T allows defining subtests creating hierarchical tests. It provides better visibility of tests in case of failure. More details on https://golang.org/pkg/testing/. This commit converts each test scenario on pkg/apis/acid.zalan.do/v1/util_test.go to subtests, providing a better visibility and the debugging environment when working with tests. The following code snippet shows an error during test execution with subtests: ``` --- FAIL: TestUnmarshalMaintenanceWindow (0.00s) --- FAIL: TestUnmarshalMaintenanceWindow/expect_error_as_'From'_is_later_than_'To' (0.00s) ``` It included a `about` field on test scenarios describing the test purpose and/or it expected output. When a description was provided with comments it was moved to the about field. --- pkg/apis/acid.zalan.do/v1/util_test.go | 479 ++++++++++++++----------- 1 file changed, 263 insertions(+), 216 deletions(-) diff --git a/pkg/apis/acid.zalan.do/v1/util_test.go b/pkg/apis/acid.zalan.do/v1/util_test.go index fc068b322..a1e01825f 100644 --- a/pkg/apis/acid.zalan.do/v1/util_test.go +++ b/pkg/apis/acid.zalan.do/v1/util_test.go @@ -13,127 +13,139 @@ import ( ) var parseTimeTests = []struct { - in string - out metav1.Time - err error + about string + in string + out metav1.Time + err error }{ - {"16:08", mustParseTime("16:08"), nil}, - {"11:00", mustParseTime("11:00"), nil}, - {"23:59", mustParseTime("23:59"), nil}, + {"parse common time with minutes", "16:08", mustParseTime("16:08"), nil}, + {"parse time with zeroed minutes", "11:00", mustParseTime("11:00"), nil}, + {"parse corner case last minute of the day", "23:59", mustParseTime("23:59"), nil}, - {"26:09", metav1.Now(), errors.New(`parsing time "26:09": hour out of range`)}, - {"23:69", metav1.Now(), errors.New(`parsing time "23:69": minute out of range`)}, + {"expect error as hour is out of range", "26:09", metav1.Now(), errors.New(`parsing time "26:09": hour out of range`)}, + {"expect error as minute is out of range", "23:69", metav1.Now(), errors.New(`parsing time "23:69": minute out of range`)}, } var parseWeekdayTests = []struct { - in string - out time.Weekday - err error + about string + in string + out time.Weekday + err error }{ - {"Wed", time.Wednesday, nil}, - {"Sunday", time.Weekday(0), errors.New("incorrect weekday")}, - {"", time.Weekday(0), errors.New("incorrect weekday")}, + {"parse common weekday", "Wed", time.Wednesday, nil}, + {"expect error as weekday is invalid", "Sunday", time.Weekday(0), errors.New("incorrect weekday")}, + {"expect error as weekday is empty", "", time.Weekday(0), errors.New("incorrect weekday")}, } var clusterNames = []struct { + about string in string inTeam string clusterName string err error }{ - {"acid-test", "acid", "test", nil}, - {"test-my-name", "test", "my-name", nil}, - {"my-team-another-test", "my-team", "another-test", nil}, - {"------strange-team-cluster", "-----", "strange-team-cluster", + {"common team and cluster name", "acid-test", "acid", "test", nil}, + {"cluster name with hyphen", "test-my-name", "test", "my-name", nil}, + {"cluster and team name with hyphen", "my-team-another-test", "my-team", "another-test", nil}, + {"expect error as cluster name is just hyphens", "------strange-team-cluster", "-----", "strange-team-cluster", errors.New(`name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)}, - {"fooobar-fooobarfooobarfooobarfooobarfooobarfooobarfooobarfooobar", "fooobar", "", + {"expect error as cluster name is too long", "fooobar-fooobarfooobarfooobarfooobarfooobarfooobarfooobarfooobar", "fooobar", "", errors.New("name cannot be longer than 58 characters")}, - {"acid-test", "test", "", errors.New("name must match {TEAM}-{NAME} format")}, - {"-test", "", "", errors.New("team name is empty")}, - {"-test", "-", "", errors.New("name must match {TEAM}-{NAME} format")}, - {"", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '', team name '-'")}, - {"-", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '-', team name '-'")}, + {"expect error as cluster name does not match {TEAM}-{NAME} format", "acid-test", "test", "", errors.New("name must match {TEAM}-{NAME} format")}, + {"expect error as team and cluster name are empty", "-test", "", "", errors.New("team name is empty")}, + {"expect error as cluster name is empty and team name is a hyphen", "-test", "-", "", errors.New("name must match {TEAM}-{NAME} format")}, + {"expect error as cluster name is empty, team name is a hyphen and cluster name is empty", "", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '', team name '-'")}, + {"expect error as cluster and team name are hyphens", "-", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '-', team name '-'")}, // user may specify the team part of the full cluster name differently from the team name returned by the Teams API // in the case the actual Teams API name is long enough, this will fail the check - {"foo-bar", "qwerty", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name 'foo-bar', team name 'qwerty'")}, + {"expect error as team name does not match", "foo-bar", "qwerty", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name 'foo-bar', team name 'qwerty'")}, } var cloneClusterDescriptions = []struct { - in *CloneDescription - err error + about string + in *CloneDescription + err error }{ - {&CloneDescription{"foo+bar", "", "NotEmpty", "", "", "", "", nil}, nil}, - {&CloneDescription{"foo+bar", "", "", "", "", "", "", nil}, + {"cluster name invalid but EndTimeSet is not empty", &CloneDescription{"foo+bar", "", "NotEmpty", "", "", "", "", nil}, nil}, + {"expect error as cluster name does not match DNS-1035", &CloneDescription{"foo+bar", "", "", "", "", "", "", nil}, errors.New(`clone cluster name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)}, - {&CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", "", "", "", "", "", nil}, + {"expect error as cluster name is too long", &CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", "", "", "", "", "", nil}, errors.New("clone cluster name must be no longer than 63 characters")}, - {&CloneDescription{"foobar", "", "", "", "", "", "", nil}, nil}, + {"common cluster name", &CloneDescription{"foobar", "", "", "", "", "", "", nil}, nil}, } var maintenanceWindows = []struct { - in []byte - out MaintenanceWindow - err error -}{{[]byte(`"Tue:10:00-20:00"`), + about string + in []byte + out MaintenanceWindow + err error +}{{"regular scenario", + []byte(`"Tue:10:00-20:00"`), MaintenanceWindow{ Everyday: false, Weekday: time.Tuesday, StartTime: mustParseTime("10:00"), EndTime: mustParseTime("20:00"), }, nil}, - {[]byte(`"Mon:10:00-10:00"`), + {"starts and ends at the same time", + []byte(`"Mon:10:00-10:00"`), MaintenanceWindow{ Everyday: false, Weekday: time.Monday, StartTime: mustParseTime("10:00"), EndTime: mustParseTime("10:00"), }, nil}, - {[]byte(`"Sun:00:00-00:00"`), + {"starts and ends 00:00 on sunday", + []byte(`"Sun:00:00-00:00"`), MaintenanceWindow{ Everyday: false, Weekday: time.Sunday, StartTime: mustParseTime("00:00"), EndTime: mustParseTime("00:00"), }, nil}, - {[]byte(`"01:00-10:00"`), + {"without day indication should define to sunday", + []byte(`"01:00-10:00"`), MaintenanceWindow{ Everyday: true, Weekday: time.Sunday, StartTime: mustParseTime("01:00"), EndTime: mustParseTime("10:00"), }, nil}, - {[]byte(`"Mon:12:00-11:00"`), MaintenanceWindow{}, errors.New(`'From' time must be prior to the 'To' time`)}, - {[]byte(`"Wed:33:00-00:00"`), MaintenanceWindow{}, errors.New(`could not parse start time: parsing time "33:00": hour out of range`)}, - {[]byte(`"Wed:00:00-26:00"`), MaintenanceWindow{}, errors.New(`could not parse end time: parsing time "26:00": hour out of range`)}, - {[]byte(`"Sunday:00:00-00:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)}, - {[]byte(`":00:00-10:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)}, - {[]byte(`"Mon:10:00-00:00"`), MaintenanceWindow{}, errors.New(`'From' time must be prior to the 'To' time`)}, - {[]byte(`"Mon:00:00:00-10:00:00"`), MaintenanceWindow{}, errors.New(`incorrect maintenance window format`)}, - {[]byte(`"Mon:00:00"`), MaintenanceWindow{}, errors.New("incorrect maintenance window format")}, - {[]byte(`"Mon:00:00-00:00:00"`), MaintenanceWindow{}, errors.New("could not parse end time: incorrect time format")}} + {"expect error as 'From' is later than 'To'", []byte(`"Mon:12:00-11:00"`), MaintenanceWindow{}, errors.New(`'From' time must be prior to the 'To' time`)}, + {"expect error as 'From' is later than 'To' with 00:00 corner case", []byte(`"Mon:10:00-00:00"`), MaintenanceWindow{}, errors.New(`'From' time must be prior to the 'To' time`)}, + {"expect error as 'From' time is not valid", []byte(`"Wed:33:00-00:00"`), MaintenanceWindow{}, errors.New(`could not parse start time: parsing time "33:00": hour out of range`)}, + {"expect error as 'To' time is not valid", []byte(`"Wed:00:00-26:00"`), MaintenanceWindow{}, errors.New(`could not parse end time: parsing time "26:00": hour out of range`)}, + {"expect error as weekday is not valid", []byte(`"Sunday:00:00-00:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)}, + {"expect error as weekday is empty", []byte(`":00:00-10:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)}, + {"expect error as maintenance window set seconds", []byte(`"Mon:00:00:00-10:00:00"`), MaintenanceWindow{}, errors.New(`incorrect maintenance window format`)}, + {"expect error as 'To' time set seconds", []byte(`"Mon:00:00-00:00:00"`), MaintenanceWindow{}, errors.New("could not parse end time: incorrect time format")}, + {"expect error as 'To' time is missing", []byte(`"Mon:00:00"`), MaintenanceWindow{}, errors.New("incorrect maintenance window format")}} var postgresStatus = []struct { - in []byte - out PostgresStatus - err error + about string + in []byte + out PostgresStatus + err error }{ - {[]byte(`{"PostgresClusterStatus":"Running"}`), + {"cluster running", []byte(`{"PostgresClusterStatus":"Running"}`), PostgresStatus{PostgresClusterStatus: ClusterStatusRunning}, nil}, - {[]byte(`{"PostgresClusterStatus":""}`), + {"cluster status undefined", []byte(`{"PostgresClusterStatus":""}`), PostgresStatus{PostgresClusterStatus: ClusterStatusUnknown}, nil}, - {[]byte(`"Running"`), + {"cluster running without full JSON format", []byte(`"Running"`), PostgresStatus{PostgresClusterStatus: ClusterStatusRunning}, nil}, - {[]byte(`""`), + {"cluster status empty", []byte(`""`), PostgresStatus{PostgresClusterStatus: ClusterStatusUnknown}, nil}} +var tmp postgresqlCopy var unmarshalCluster = []struct { + about string in []byte out Postgresql marshal []byte err error }{ - // example with simple status field { + about: "example with simple status field", in: []byte(`{ "kind": "Postgresql","apiVersion": "acid.zalan.do/v1", "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), @@ -147,12 +159,14 @@ var unmarshalCluster = []struct { }, Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid}, // This error message can vary between Go versions, so compute it for the current version. - Error: json.Unmarshal([]byte(`{"teamId": 0}`), &PostgresSpec{}).Error(), + Error: json.Unmarshal([]byte(`{ + "kind": "Postgresql","apiVersion": "acid.zalan.do/v1", + "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(), }, marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), err: nil}, - // example with /status subresource { + about: "example with /status subresource", in: []byte(`{ "kind": "Postgresql","apiVersion": "acid.zalan.do/v1", "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), @@ -166,13 +180,14 @@ var unmarshalCluster = []struct { }, Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid}, // This error message can vary between Go versions, so compute it for the current version. - Error: json.Unmarshal([]byte(`{"teamId": 0}`), &PostgresSpec{}).Error(), + Error: json.Unmarshal([]byte(`{ + "kind": "Postgresql","apiVersion": "acid.zalan.do/v1", + "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(), }, marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), err: nil}, - // example with detailed input manifest - // and deprecated pod_priority_class_name -> podPriorityClassName { + about: "example with detailed input manifest and deprecated pod_priority_class_name -> podPriorityClassName", in: []byte(`{ "kind": "Postgresql", "apiVersion": "acid.zalan.do/v1", @@ -321,9 +336,9 @@ var unmarshalCluster = []struct { }, marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`), err: nil}, - // example with teamId set in input { - in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "teapot-testcluster1"}, "spec": {"teamId": "acid"}}`), + about: "example with teamId set in input", + in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "teapot-testcluster1"}, "spec": {"teamId": "acid"}}`), out: Postgresql{ TypeMeta: metav1.TypeMeta{ Kind: "Postgresql", @@ -338,9 +353,9 @@ var unmarshalCluster = []struct { }, marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), err: nil}, - // clone example { - in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "clone": {"cluster": "team-batman"}}}`), + about: "example with clone", + in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "clone": {"cluster": "team-batman"}}}`), out: Postgresql{ TypeMeta: metav1.TypeMeta{ Kind: "Postgresql", @@ -360,9 +375,9 @@ var unmarshalCluster = []struct { }, marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}},"status":{"PostgresClusterStatus":""}}`), err: nil}, - // standby example { - in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "standby": {"s3_wal_path": "s3://custom/path/to/bucket/"}}}`), + about: "standby example", + in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "standby": {"s3_wal_path": "s3://custom/path/to/bucket/"}}}`), out: Postgresql{ TypeMeta: metav1.TypeMeta{ Kind: "Postgresql", @@ -382,24 +397,28 @@ var unmarshalCluster = []struct { }, marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"standby":{"s3_wal_path":"s3://custom/path/to/bucket/"}},"status":{"PostgresClusterStatus":""}}`), err: nil}, - // erroneous examples { + about: "expect error on malformatted JSON", in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1"`), out: Postgresql{}, marshal: []byte{}, err: errors.New("unexpected end of JSON input")}, { + about: "expect error on JSON with field's value malformatted", in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), out: Postgresql{}, marshal: []byte{}, - err: errors.New("invalid character 'q' looking for beginning of value")}} + err: errors.New("invalid character 'q' looking for beginning of value"), + }, +} var postgresqlList = []struct { - in []byte - out PostgresqlList - err error + about string + in []byte + out PostgresqlList + err error }{ - {[]byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"9.6"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`), + {"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"9.6"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`), PostgresqlList{ TypeMeta: metav1.TypeMeta{ Kind: "List", @@ -433,15 +452,17 @@ var postgresqlList = []struct { }}, }, nil}, - {[]byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace"`), + {"expect error on malformatted JSON", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace"`), PostgresqlList{}, errors.New("unexpected end of JSON input")}} var annotations = []struct { + about string in []byte annotations map[string]string err error }{{ + about: "common annotations", in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"podAnnotations": {"foo": "bar"},"teamId": "acid", "clone": {"cluster": "team-batman"}}}`), annotations: map[string]string{"foo": "bar"}, err: nil}, @@ -458,230 +479,256 @@ func mustParseTime(s string) metav1.Time { func TestParseTime(t *testing.T) { for _, tt := range parseTimeTests { - aTime, err := parseTime(tt.in) - if err != nil { - if tt.err == nil || err.Error() != tt.err.Error() { - t.Errorf("ParseTime expected error: %v, got: %v", tt.err, err) + t.Run(tt.about, func(t *testing.T) { + aTime, err := parseTime(tt.in) + if err != nil { + if tt.err == nil || err.Error() != tt.err.Error() { + t.Errorf("ParseTime expected error: %v, got: %v", tt.err, err) + } + return + } else if tt.err != nil { + t.Errorf("Expected error: %v", tt.err) } - continue - } else if tt.err != nil { - t.Errorf("Expected error: %v", tt.err) - } - if aTime != tt.out { - t.Errorf("Expected time: %v, got: %v", tt.out, aTime) - } + if aTime != tt.out { + t.Errorf("Expected time: %v, got: %v", tt.out, aTime) + } + }) } } func TestWeekdayTime(t *testing.T) { for _, tt := range parseWeekdayTests { - aTime, err := parseWeekday(tt.in) - if err != nil { - if tt.err == nil || err.Error() != tt.err.Error() { - t.Errorf("ParseWeekday expected error: %v, got: %v", tt.err, err) + t.Run(tt.about, func(t *testing.T) { + aTime, err := parseWeekday(tt.in) + if err != nil { + if tt.err == nil || err.Error() != tt.err.Error() { + t.Errorf("ParseWeekday expected error: %v, got: %v", tt.err, err) + } + return + } else if tt.err != nil { + t.Errorf("Expected error: %v", tt.err) } - continue - } else if tt.err != nil { - t.Errorf("Expected error: %v", tt.err) - } - if aTime != tt.out { - t.Errorf("Expected weekday: %v, got: %v", tt.out, aTime) - } + if aTime != tt.out { + t.Errorf("Expected weekday: %v, got: %v", tt.out, aTime) + } + }) } } func TestClusterAnnotations(t *testing.T) { for _, tt := range annotations { - var cluster Postgresql - err := cluster.UnmarshalJSON(tt.in) - if err != nil { - if tt.err == nil || err.Error() != tt.err.Error() { - t.Errorf("Unable to marshal cluster with annotations: expected %v got %v", tt.err, err) + t.Run(tt.about, func(t *testing.T) { + var cluster Postgresql + err := cluster.UnmarshalJSON(tt.in) + if err != nil { + if tt.err == nil || err.Error() != tt.err.Error() { + t.Errorf("Unable to marshal cluster with annotations: expected %v got %v", tt.err, err) + } + return } - continue - } - for k, v := range cluster.Spec.PodAnnotations { - found, expected := v, tt.annotations[k] - if found != expected { - t.Errorf("Didn't find correct value for key %v in for podAnnotations: Expected %v found %v", k, expected, found) + for k, v := range cluster.Spec.PodAnnotations { + found, expected := v, tt.annotations[k] + if found != expected { + t.Errorf("Didn't find correct value for key %v in for podAnnotations: Expected %v found %v", k, expected, found) + } } - } + }) } } func TestClusterName(t *testing.T) { for _, tt := range clusterNames { - name, err := extractClusterName(tt.in, tt.inTeam) - if err != nil { - if tt.err == nil || err.Error() != tt.err.Error() { - t.Errorf("extractClusterName expected error: %v, got: %v", tt.err, err) + t.Run(tt.about, func(t *testing.T) { + name, err := extractClusterName(tt.in, tt.inTeam) + if err != nil { + if tt.err == nil || err.Error() != tt.err.Error() { + t.Errorf("extractClusterName expected error: %v, got: %v", tt.err, err) + } + return + } else if tt.err != nil { + t.Errorf("Expected error: %v", tt.err) } - continue - } else if tt.err != nil { - t.Errorf("Expected error: %v", tt.err) - } - if name != tt.clusterName { - t.Errorf("Expected cluserName: %q, got: %q", tt.clusterName, name) - } + if name != tt.clusterName { + t.Errorf("Expected cluserName: %q, got: %q", tt.clusterName, name) + } + }) } } func TestCloneClusterDescription(t *testing.T) { for _, tt := range cloneClusterDescriptions { - if err := validateCloneClusterDescription(tt.in); err != nil { - if tt.err == nil || err.Error() != tt.err.Error() { - t.Errorf("testCloneClusterDescription expected error: %v, got: %v", tt.err, err) + t.Run(tt.about, func(t *testing.T) { + if err := validateCloneClusterDescription(tt.in); err != nil { + if tt.err == nil || err.Error() != tt.err.Error() { + t.Errorf("testCloneClusterDescription expected error: %v, got: %v", tt.err, err) + } + } else if tt.err != nil { + t.Errorf("Expected error: %v", tt.err) } - } else if tt.err != nil { - t.Errorf("Expected error: %v", tt.err) - } + }) } } func TestUnmarshalMaintenanceWindow(t *testing.T) { for _, tt := range maintenanceWindows { - var m MaintenanceWindow - err := m.UnmarshalJSON(tt.in) - if err != nil { - if tt.err == nil || err.Error() != tt.err.Error() { - t.Errorf("MaintenanceWindow unmarshal expected error: %v, got %v", tt.err, err) + t.Run(tt.about, func(t *testing.T) { + var m MaintenanceWindow + err := m.UnmarshalJSON(tt.in) + if err != nil { + if tt.err == nil || err.Error() != tt.err.Error() { + t.Errorf("MaintenanceWindow unmarshal expected error: %v, got %v", tt.err, err) + } + return + } else if tt.err != nil { + t.Errorf("Expected error: %v", tt.err) } - continue - } else if tt.err != nil { - t.Errorf("Expected error: %v", tt.err) - } - if !reflect.DeepEqual(m, tt.out) { - t.Errorf("Expected maintenance window: %#v, got: %#v", tt.out, m) - } + if !reflect.DeepEqual(m, tt.out) { + t.Errorf("Expected maintenance window: %#v, got: %#v", tt.out, m) + } + }) } } func TestMarshalMaintenanceWindow(t *testing.T) { for _, tt := range maintenanceWindows { - if tt.err != nil { - continue - } + t.Run(tt.about, func(t *testing.T) { + if tt.err != nil { + return + } - s, err := tt.out.MarshalJSON() - if err != nil { - t.Errorf("Marshal Error: %v", err) - } + s, err := tt.out.MarshalJSON() + if err != nil { + t.Errorf("Marshal Error: %v", err) + } - if !bytes.Equal(s, tt.in) { - t.Errorf("Expected Marshal: %q, got: %q", string(tt.in), string(s)) - } + if !bytes.Equal(s, tt.in) { + t.Errorf("Expected Marshal: %q, got: %q", string(tt.in), string(s)) + } + }) } } func TestUnmarshalPostgresStatus(t *testing.T) { for _, tt := range postgresStatus { - var ps PostgresStatus - err := ps.UnmarshalJSON(tt.in) - if err != nil { - if tt.err == nil || err.Error() != tt.err.Error() { - t.Errorf("CR status unmarshal expected error: %v, got %v", tt.err, err) - } - continue - //} else if tt.err != nil { - //t.Errorf("Expected error: %v", tt.err) - } + t.Run(tt.about, func(t *testing.T) { - if !reflect.DeepEqual(ps, tt.out) { - t.Errorf("Expected status: %#v, got: %#v", tt.out, ps) - } + var ps PostgresStatus + err := ps.UnmarshalJSON(tt.in) + if err != nil { + if tt.err == nil || err.Error() != tt.err.Error() { + t.Errorf("CR status unmarshal expected error: %v, got %v", tt.err, err) + } + return + } + + if !reflect.DeepEqual(ps, tt.out) { + t.Errorf("Expected status: %#v, got: %#v", tt.out, ps) + } + }) } } func TestPostgresUnmarshal(t *testing.T) { for _, tt := range unmarshalCluster { - var cluster Postgresql - err := cluster.UnmarshalJSON(tt.in) - if err != nil { - if tt.err == nil || err.Error() != tt.err.Error() { - t.Errorf("Unmarshal expected error: %v, got: %v", tt.err, err) + t.Run(tt.about, func(t *testing.T) { + var cluster Postgresql + err := cluster.UnmarshalJSON(tt.in) + if err != nil { + if tt.err == nil || err.Error() != tt.err.Error() { + t.Errorf("Unmarshal expected error: %v, got: %v", tt.err, err) + } + return + } else if tt.err != nil { + t.Errorf("Expected error: %v", tt.err) } - continue - } else if tt.err != nil { - t.Errorf("Expected error: %v", tt.err) - } - if !reflect.DeepEqual(cluster, tt.out) { - t.Errorf("Expected Postgresql: %#v, got %#v", tt.out, cluster) - } + if !reflect.DeepEqual(cluster, tt.out) { + t.Errorf("Expected Postgresql: %#v, got %#v", tt.out, cluster) + } + }) } } func TestMarshal(t *testing.T) { for _, tt := range unmarshalCluster { - if tt.err != nil { - continue - } + t.Run(tt.about, func(t *testing.T) { - // Unmarshal and marshal example to capture api changes - var cluster Postgresql - err := cluster.UnmarshalJSON(tt.marshal) - if err != nil { - if tt.err == nil || err.Error() != tt.err.Error() { - t.Errorf("Backwards compatibility unmarshal expected error: %v, got: %v", tt.err, err) + if tt.err != nil { + return } - continue - } - expected, err := json.Marshal(cluster) - if err != nil { - t.Errorf("Backwards compatibility marshal error: %v", err) - } - m, err := json.Marshal(tt.out) - if err != nil { - t.Errorf("Marshal error: %v", err) - } - if !bytes.Equal(m, expected) { - t.Errorf("Marshal Postgresql \nexpected: %q, \ngot: %q", string(expected), string(m)) - } + // Unmarshal and marshal example to capture api changes + var cluster Postgresql + err := cluster.UnmarshalJSON(tt.marshal) + if err != nil { + if tt.err == nil || err.Error() != tt.err.Error() { + t.Errorf("Backwards compatibility unmarshal expected error: %v, got: %v", tt.err, err) + } + return + } + expected, err := json.Marshal(cluster) + if err != nil { + t.Errorf("Backwards compatibility marshal error: %v", err) + } + + m, err := json.Marshal(tt.out) + if err != nil { + t.Errorf("Marshal error: %v", err) + } + if !bytes.Equal(m, expected) { + t.Errorf("Marshal Postgresql \nexpected: %q, \ngot: %q", string(expected), string(m)) + } + }) } } func TestPostgresMeta(t *testing.T) { for _, tt := range unmarshalCluster { - if a := tt.out.GetObjectKind(); a != &tt.out.TypeMeta { - t.Errorf("GetObjectKindMeta \nexpected: %v, \ngot: %v", tt.out.TypeMeta, a) - } + t.Run(tt.about, func(t *testing.T) { - if a := tt.out.GetObjectMeta(); reflect.DeepEqual(a, tt.out.ObjectMeta) { - t.Errorf("GetObjectMeta \nexpected: %v, \ngot: %v", tt.out.ObjectMeta, a) - } + if a := tt.out.GetObjectKind(); a != &tt.out.TypeMeta { + t.Errorf("GetObjectKindMeta \nexpected: %v, \ngot: %v", tt.out.TypeMeta, a) + } + + if a := tt.out.GetObjectMeta(); reflect.DeepEqual(a, tt.out.ObjectMeta) { + t.Errorf("GetObjectMeta \nexpected: %v, \ngot: %v", tt.out.ObjectMeta, a) + } + }) } } func TestPostgresListMeta(t *testing.T) { for _, tt := range postgresqlList { - if tt.err != nil { - continue - } + t.Run(tt.about, func(t *testing.T) { + if tt.err != nil { + return + } - if a := tt.out.GetObjectKind(); a != &tt.out.TypeMeta { - t.Errorf("GetObjectKindMeta expected: %v, got: %v", tt.out.TypeMeta, a) - } + if a := tt.out.GetObjectKind(); a != &tt.out.TypeMeta { + t.Errorf("GetObjectKindMeta expected: %v, got: %v", tt.out.TypeMeta, a) + } - if a := tt.out.GetListMeta(); reflect.DeepEqual(a, tt.out.ListMeta) { - t.Errorf("GetObjectMeta expected: %v, got: %v", tt.out.ListMeta, a) - } + if a := tt.out.GetListMeta(); reflect.DeepEqual(a, tt.out.ListMeta) { + t.Errorf("GetObjectMeta expected: %v, got: %v", tt.out.ListMeta, a) + } - return + return + }) } } func TestPostgresqlClone(t *testing.T) { for _, tt := range unmarshalCluster { - cp := &tt.out - cp.Error = "" - clone := cp.Clone() - if !reflect.DeepEqual(clone, cp) { - t.Errorf("TestPostgresqlClone expected: \n%#v\n, got \n%#v", cp, clone) - } - + t.Run(tt.about, func(t *testing.T) { + cp := &tt.out + cp.Error = "" + clone := cp.Clone() + if !reflect.DeepEqual(clone, cp) { + t.Errorf("TestPostgresqlClone expected: \n%#v\n, got \n%#v", cp, clone) + } + }) } } From 1f0312a01451b544231089dd6b703b93e68bd2c5 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Mon, 3 Feb 2020 11:43:18 +0100 Subject: [PATCH 11/31] make minimum limits boundaries configurable (#808) * make minimum limits boundaries configurable * add e2e test --- .../crds/operatorconfigurations.yaml | 6 ++ charts/postgres-operator/values-crd.yaml | 26 ++++--- charts/postgres-operator/values.yaml | 24 ++++--- docs/reference/operator_parameters.md | 14 +++- docs/user.md | 10 +-- e2e/tests/test_e2e.py | 68 ++++++++++++++++--- manifests/complete-postgres-manifest.yaml | 4 +- manifests/configmap.yaml | 6 +- manifests/operatorconfiguration.crd.yaml | 6 ++ manifests/postgres-operator.yaml | 4 +- ...gresql-operator-default-configuration.yaml | 8 ++- pkg/apis/acid.zalan.do/v1/crds.go | 8 +++ .../v1/operator_configuration_type.go | 2 + pkg/cluster/cluster.go | 56 +++++++-------- pkg/cluster/sync.go | 16 ++--- pkg/controller/operator_config.go | 2 + pkg/util/config/config.go | 8 ++- 17 files changed, 175 insertions(+), 93 deletions(-) diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index c97e246ab..52d03df9c 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -179,6 +179,12 @@ spec: default_memory_request: type: string pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + min_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + min_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' timeouts: type: object properties: diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index 5b67258fa..61cab3d06 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -115,13 +115,17 @@ configKubernetes: # configure resource requests for the Postgres pods configPostgresPodResources: # CPU limits for the postgres containers - default_cpu_limit: "3" - # cpu request value for the postgres containers + default_cpu_limit: "1" + # CPU request value for the postgres containers default_cpu_request: 100m # memory limits for the postgres containers - default_memory_limit: 1Gi + default_memory_limit: 500Mi # memory request value for the postgres containers default_memory_request: 100Mi + # hard CPU minimum required to properly run a Postgres cluster + min_cpu_limit: 250m + # hard memory minimum required to properly run a Postgres cluster + min_memory_limit: 250Mi # timeouts related to some operator actions configTimeouts: @@ -251,7 +255,7 @@ configScalyr: # CPU rquest value for the Scalyr sidecar scalyr_cpu_request: 100m # Memory limit value for the Scalyr sidecar - scalyr_memory_limit: 1Gi + scalyr_memory_limit: 500Mi # Memory request value for the Scalyr sidecar scalyr_memory_request: 50Mi @@ -272,13 +276,13 @@ serviceAccount: priorityClassName: "" -resources: {} - # limits: - # cpu: 100m - # memory: 300Mi - # requests: - # cpu: 100m - # memory: 300Mi +resources: + limits: + cpu: 500m + memory: 500Mi + requests: + cpu: 100m + memory: 250Mi # Affinity for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 60190f49a..deb506329 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -108,13 +108,17 @@ configKubernetes: # configure resource requests for the Postgres pods configPostgresPodResources: # CPU limits for the postgres containers - default_cpu_limit: "3" - # cpu request value for the postgres containers + default_cpu_limit: "1" + # CPU request value for the postgres containers default_cpu_request: 100m # memory limits for the postgres containers - default_memory_limit: 1Gi + default_memory_limit: 500Mi # memory request value for the postgres containers default_memory_request: 100Mi + # hard CPU minimum required to properly run a Postgres cluster + min_cpu_limit: 250m + # hard memory minimum required to properly run a Postgres cluster + min_memory_limit: 250Mi # timeouts related to some operator actions configTimeouts: @@ -248,13 +252,13 @@ serviceAccount: priorityClassName: "" -resources: {} - # limits: - # cpu: 100m - # memory: 300Mi - # requests: - # cpu: 100m - # memory: 300Mi +resources: + limits: + cpu: 500m + memory: 500Mi + requests: + cpu: 100m + memory: 250Mi # Affinity for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 1055d89b6..d6dde8c0e 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -318,11 +318,19 @@ CRD-based configuration. * **default_cpu_limit** CPU limits for the Postgres containers, unless overridden by cluster-specific - settings. The default is `3`. + settings. The default is `1`. * **default_memory_limit** memory limits for the Postgres containers, unless overridden by cluster-specific - settings. The default is `1Gi`. + settings. The default is `500Mi`. + +* **min_cpu_limit** + hard CPU minimum what we consider to be required to properly run Postgres + clusters with Patroni on Kubernetes. The default is `250m`. + +* **min_memory_limit** + hard memory minimum what we consider to be required to properly run Postgres + clusters with Patroni on Kubernetes. The default is `250Mi`. ## Operator timeouts @@ -579,4 +587,4 @@ scalyr sidecar. In the CRD-based configuration they are grouped under the CPU limit value for the Scalyr sidecar. The default is `1`. * **scalyr_memory_limit** - Memory limit value for the Scalyr sidecar. The default is `1Gi`. + Memory limit value for the Scalyr sidecar. The default is `500Mi`. diff --git a/docs/user.md b/docs/user.md index 45f345c87..f81e11ede 100644 --- a/docs/user.md +++ b/docs/user.md @@ -232,11 +232,11 @@ spec: memory: 300Mi ``` -The minimum limit to properly run the `postgresql` resource is `256m` for `cpu` -and `256Mi` for `memory`. If a lower value is set in the manifest the operator -will cancel ADD or UPDATE events on this resource with an error. If no -resources are defined in the manifest the operator will obtain the configured -[default requests](reference/operator_parameters.md#kubernetes-resource-requests). +The minimum limits to properly run the `postgresql` resource are configured to +`250m` for `cpu` and `250Mi` for `memory`. If a lower value is set in the +manifest the operator will raise the limits to the configured minimum values. +If no resources are defined in the manifest they will be obtained from the +configured [default requests](reference/operator_parameters.md#kubernetes-resource-requests). ## Use taints and tolerations for dedicated PostgreSQL nodes diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 88a7f1f34..fc87c9887 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -58,6 +58,57 @@ class EndToEndTestCase(unittest.TestCase): k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml") k8s.wait_for_pod_start('spilo-role=master') + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_min_resource_limits(self): + ''' + Lower resource limits below configured minimum and let operator fix it + ''' + k8s = self.k8s + cluster_label = 'version=acid-minimal-cluster' + _, failover_targets = k8s.get_pg_nodes(cluster_label) + + # configure minimum boundaries for CPU and memory limits + minCPULimit = '250m' + minMemoryLimit = '250Mi' + patch_min_resource_limits = { + "data": { + "min_cpu_limit": minCPULimit, + "min_memory_limit": minMemoryLimit + } + } + k8s.update_config(patch_min_resource_limits) + + # lower resource limits below minimum + pg_patch_resources = { + "spec": { + "resources": { + "requests": { + "cpu": "10m", + "memory": "50Mi" + }, + "limits": { + "cpu": "200m", + "memory": "200Mi" + } + } + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources) + k8s.wait_for_master_failover(failover_targets) + + pods = k8s.api.core_v1.list_namespaced_pod( + 'default', label_selector='spilo-role=master,' + cluster_label).items + self.assert_master_is_unique() + masterPod = pods[0] + + self.assertEqual(masterPod.spec.containers[0].resources.limits['cpu'], minCPULimit, + "Expected CPU limit {}, found {}" + .format(minCPULimit, masterPod.spec.containers[0].resources.limits['cpu'])) + self.assertEqual(masterPod.spec.containers[0].resources.limits['memory'], minMemoryLimit, + "Expected memory limit {}, found {}" + .format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory'])) + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_multi_namespace_support(self): ''' @@ -76,10 +127,9 @@ class EndToEndTestCase(unittest.TestCase): @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_scaling(self): - """ + ''' Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime. - """ - + ''' k8s = self.k8s labels = "version=acid-minimal-cluster" @@ -93,9 +143,9 @@ class EndToEndTestCase(unittest.TestCase): @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_taint_based_eviction(self): - """ + ''' Add taint "postgres=:NoExecute" to node with master. This must cause a failover. - """ + ''' k8s = self.k8s cluster_label = 'version=acid-minimal-cluster' @@ -145,7 +195,7 @@ class EndToEndTestCase(unittest.TestCase): @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_logical_backup_cron_job(self): - """ + ''' Ensure we can (a) create the cron job at user request for a specific PG cluster (b) update the cluster-wide image for the logical backup pod (c) delete the job at user request @@ -153,7 +203,7 @@ class EndToEndTestCase(unittest.TestCase): Limitations: (a) Does not run the actual batch job because there is no S3 mock to upload backups to (b) Assumes 'acid-minimal-cluster' exists as defined in setUp - """ + ''' k8s = self.k8s @@ -208,10 +258,10 @@ class EndToEndTestCase(unittest.TestCase): "Expected 0 logical backup jobs, found {}".format(len(jobs))) def assert_master_is_unique(self, namespace='default', version="acid-minimal-cluster"): - """ + ''' Check that there is a single pod in the k8s cluster with the label "spilo-role=master" To be called manually after operations that affect pods - """ + ''' k8s = self.k8s labels = 'spilo-role=master,version=' + version diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index cf450ef94..2478156d6 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -42,8 +42,8 @@ spec: cpu: 10m memory: 100Mi limits: - cpu: 300m - memory: 300Mi + cpu: 500m + memory: 500Mi patroni: initdb: encoding: "UTF8" diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index afb5957da..7d11198da 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -15,9 +15,9 @@ data: # custom_pod_annotations: "keya:valuea,keyb:valueb" db_hosted_zone: db.example.com debug_logging: "true" - # default_cpu_limit: "3" + # default_cpu_limit: "1" # default_cpu_request: 100m - # default_memory_limit: 1Gi + # default_memory_limit: 500Mi # default_memory_request: 100Mi docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16 # enable_admin_role_for_users: "true" @@ -48,6 +48,8 @@ data: # master_pod_move_timeout: 10m # max_instances: "-1" # min_instances: "-1" + # min_cpu_limit: 250m + # min_memory_limit: 250Mi # node_readiness_label: "" # oauth_token_secret_name: postgresql-operator # pam_configuration: | diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index 810624bc4..509d9aefc 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -155,6 +155,12 @@ spec: default_memory_request: type: string pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + min_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + min_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' timeouts: type: object properties: diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index fa8682809..a06abfc68 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -19,10 +19,10 @@ spec: imagePullPolicy: IfNotPresent resources: requests: - cpu: 500m + cpu: 100m memory: 250Mi limits: - cpu: 2000m + cpu: 500m memory: 500Mi securityContext: runAsUser: 1000 diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index 5e10ff66e..f13a1eed9 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -54,10 +54,12 @@ configuration: # toleration: {} # watched_namespace: "" postgres_pod_resources: - default_cpu_limit: "3" + default_cpu_limit: "1" default_cpu_request: 100m - default_memory_limit: 1Gi + default_memory_limit: 500Mi default_memory_request: 100Mi + # min_cpu_limit: 250m + # min_memory_limit: 250Mi timeouts: pod_label_wait_timeout: 10m pod_deletion_wait_timeout: 10m @@ -115,6 +117,6 @@ configuration: scalyr_cpu_limit: "1" scalyr_cpu_request: 100m # scalyr_image: "" - scalyr_memory_limit: 1Gi + scalyr_memory_limit: 500Mi scalyr_memory_request: 50Mi # scalyr_server_url: "" diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 20fa37138..4d5a6f024 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -810,6 +810,14 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation Type: "string", Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", }, + "min_cpu_limit": { + Type: "string", + Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", + }, + "min_memory_limit": { + Type: "string", + Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", + }, }, }, "timeouts": { diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index 948c7cbbf..1e6a3b459 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -79,6 +79,8 @@ type PostgresPodResourcesDefaults struct { DefaultMemoryRequest string `json:"default_memory_request,omitempty"` DefaultCPULimit string `json:"default_cpu_limit,omitempty"` DefaultMemoryLimit string `json:"default_memory_limit,omitempty"` + MinCPULimit string `json:"min_cpu_limit,omitempty"` + MinMemoryLimit string `json:"min_memory_limit,omitempty"` } // OperatorTimeouts defines the timeout of ResourceCheck, PodWait, ReadyWait diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 0a7377389..c560c4cdf 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -227,8 +227,8 @@ func (c *Cluster) Create() error { c.setStatus(acidv1.ClusterStatusCreating) - if err = c.validateResources(&c.Spec); err != nil { - return fmt.Errorf("insufficient resource limits specified: %v", err) + if err = c.enforceMinResourceLimits(&c.Spec); err != nil { + return fmt.Errorf("could not enforce minimum resource limits: %v", err) } for _, role := range []PostgresRole{Master, Replica} { @@ -495,38 +495,38 @@ func compareResourcesAssumeFirstNotNil(a *v1.ResourceRequirements, b *v1.Resourc } -func (c *Cluster) validateResources(spec *acidv1.PostgresSpec) error { - - // setting limits too low can cause unnecessary evictions / OOM kills - const ( - cpuMinLimit = "256m" - memoryMinLimit = "256Mi" - ) +func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error { var ( isSmaller bool err error ) + // setting limits too low can cause unnecessary evictions / OOM kills + minCPULimit := c.OpConfig.MinCPULimit + minMemoryLimit := c.OpConfig.MinMemoryLimit + cpuLimit := spec.Resources.ResourceLimits.CPU if cpuLimit != "" { - isSmaller, err = util.IsSmallerQuantity(cpuLimit, cpuMinLimit) + isSmaller, err = util.IsSmallerQuantity(cpuLimit, minCPULimit) if err != nil { - return fmt.Errorf("error validating CPU limit: %v", err) + return fmt.Errorf("could not compare defined CPU limit %s with configured minimum value %s: %v", cpuLimit, minCPULimit, err) } if isSmaller { - return fmt.Errorf("defined CPU limit %s is below required minimum %s to properly run postgresql resource", cpuLimit, cpuMinLimit) + c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit) + spec.Resources.ResourceLimits.CPU = minCPULimit } } memoryLimit := spec.Resources.ResourceLimits.Memory if memoryLimit != "" { - isSmaller, err = util.IsSmallerQuantity(memoryLimit, memoryMinLimit) + isSmaller, err = util.IsSmallerQuantity(memoryLimit, minMemoryLimit) if err != nil { - return fmt.Errorf("error validating memory limit: %v", err) + return fmt.Errorf("could not compare defined memory limit %s with configured minimum value %s: %v", memoryLimit, minMemoryLimit, err) } if isSmaller { - return fmt.Errorf("defined memory limit %s is below required minimum %s to properly run postgresql resource", memoryLimit, memoryMinLimit) + c.logger.Warningf("defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit) + spec.Resources.ResourceLimits.Memory = minMemoryLimit } } @@ -543,7 +543,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { c.mu.Lock() defer c.mu.Unlock() - oldStatus := c.Status c.setStatus(acidv1.ClusterStatusUpdating) c.setSpec(newSpec) @@ -555,22 +554,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { } }() - if err := c.validateResources(&newSpec.Spec); err != nil { - err = fmt.Errorf("insufficient resource limits specified: %v", err) - - // cancel update only when (already too low) pod resources were edited - // if cluster was successfully running before the update, continue but log a warning - isCPULimitSmaller, err2 := util.IsSmallerQuantity(newSpec.Spec.Resources.ResourceLimits.CPU, oldSpec.Spec.Resources.ResourceLimits.CPU) - isMemoryLimitSmaller, err3 := util.IsSmallerQuantity(newSpec.Spec.Resources.ResourceLimits.Memory, oldSpec.Spec.Resources.ResourceLimits.Memory) - - if oldStatus.Running() && !isCPULimitSmaller && !isMemoryLimitSmaller && err2 == nil && err3 == nil { - c.logger.Warning(err) - } else { - updateFailed = true - return err - } - } - if oldSpec.Spec.PgVersion != newSpec.Spec.PgVersion { // PG versions comparison c.logger.Warningf("postgresql version change(%q -> %q) has no effect", oldSpec.Spec.PgVersion, newSpec.Spec.PgVersion) //we need that hack to generate statefulset with the old version @@ -616,6 +599,12 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { // Statefulset func() { + if err := c.enforceMinResourceLimits(&c.Spec); err != nil { + c.logger.Errorf("could not sync resources: %v", err) + updateFailed = true + return + } + oldSs, err := c.generateStatefulSet(&oldSpec.Spec) if err != nil { c.logger.Errorf("could not generate old statefulset spec: %v", err) @@ -623,6 +612,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { return } + // update newSpec to for latter comparison with oldSpec + c.enforceMinResourceLimits(&newSpec.Spec) + newSs, err := c.generateStatefulSet(&newSpec.Spec) if err != nil { c.logger.Errorf("could not generate new statefulset spec: %v", err) diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index abe579fb5..fa4fc9ec1 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -23,7 +23,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { c.mu.Lock() defer c.mu.Unlock() - oldStatus := c.Status c.setSpec(newSpec) defer func() { @@ -35,16 +34,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { } }() - if err = c.validateResources(&c.Spec); err != nil { - err = fmt.Errorf("insufficient resource limits specified: %v", err) - if oldStatus.Running() { - c.logger.Warning(err) - err = nil - } else { - return err - } - } - if err = c.initUsers(); err != nil { err = fmt.Errorf("could not init users: %v", err) return err @@ -76,6 +65,11 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { return err } + if err = c.enforceMinResourceLimits(&c.Spec); err != nil { + err = fmt.Errorf("could not enforce minimum resource limits: %v", err) + return err + } + c.logger.Debugf("syncing statefulsets") if err = c.syncStatefulSet(); err != nil { if !k8sutil.ResourceAlreadyExists(err) { diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 56ba91d02..d0357d222 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -75,6 +75,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit + result.MinCPULimit = fromCRD.PostgresPodResources.MinCPULimit + result.MinMemoryLimit = fromCRD.PostgresPodResources.MinMemoryLimit // timeout config result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval) diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 224691120..339f06ce0 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -37,8 +37,10 @@ type Resources struct { PodToleration map[string]string `name:"toleration" default:""` DefaultCPURequest string `name:"default_cpu_request" default:"100m"` DefaultMemoryRequest string `name:"default_memory_request" default:"100Mi"` - DefaultCPULimit string `name:"default_cpu_limit" default:"3"` - DefaultMemoryLimit string `name:"default_memory_limit" default:"1Gi"` + DefaultCPULimit string `name:"default_cpu_limit" default:"1"` + DefaultMemoryLimit string `name:"default_memory_limit" default:"500Mi"` + MinCPULimit string `name:"min_cpu_limit" default:"250m"` + MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"` PodEnvironmentConfigMap string `name:"pod_environment_configmap" default:""` NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""` MaxInstances int32 `name:"max_instances" default:"-1"` @@ -66,7 +68,7 @@ type Scalyr struct { ScalyrCPURequest string `name:"scalyr_cpu_request" default:"100m"` ScalyrMemoryRequest string `name:"scalyr_memory_request" default:"50Mi"` ScalyrCPULimit string `name:"scalyr_cpu_limit" default:"1"` - ScalyrMemoryLimit string `name:"scalyr_memory_limit" default:"1Gi"` + ScalyrMemoryLimit string `name:"scalyr_memory_limit" default:"500Mi"` } // LogicalBackup defines configuration for logical backup From 8794e4f9acad87c8acbb0e4dd13b952fcd38f89e Mon Sep 17 00:00:00 2001 From: siku4 <44839490+siku4@users.noreply.github.com> Date: Mon, 3 Feb 2020 15:30:26 +0100 Subject: [PATCH 12/31] add service for exposing operator api (#794) --- .../postgres-operator/templates/service.yaml | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 charts/postgres-operator/templates/service.yaml diff --git a/charts/postgres-operator/templates/service.yaml b/charts/postgres-operator/templates/service.yaml new file mode 100644 index 000000000..52990c5d4 --- /dev/null +++ b/charts/postgres-operator/templates/service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: {{ template "postgres-operator.name" . }} + helm.sh/chart: {{ template "postgres-operator.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ template "postgres-operator.fullname" . }} +spec: + ports: + - port: 8080 + protocol: TCP + targetPort: 8080 + selector: + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ template "postgres-operator.name" . }} + sessionAffinity: None + type: ClusterIP +status: + loadBalancer: {} \ No newline at end of file From 1ee99b81944752a0d6c3e6cc4d71f4bd662601e6 Mon Sep 17 00:00:00 2001 From: siku4 <44839490+siku4@users.noreply.github.com> Date: Wed, 5 Feb 2020 16:54:46 +0100 Subject: [PATCH 13/31] add first version of ui helm chart (#795) * add first version of ui helm chart * add postgres versions 12 and 9.5 --- charts/postgres-operator-ui/.helmignore | 22 +++++ charts/postgres-operator-ui/Chart.yaml | 19 +++++ .../postgres-operator-ui/templates/NOTES.txt | 3 + .../templates/_helpers.tpl | 32 ++++++++ .../templates/deployment.yaml | 69 ++++++++++++++++ .../templates/ingress.yaml | 44 ++++++++++ .../templates/service.yaml | 20 +++++ .../templates/serviceaccount.yaml | 81 +++++++++++++++++++ charts/postgres-operator-ui/values.yaml | 47 +++++++++++ 9 files changed, 337 insertions(+) create mode 100644 charts/postgres-operator-ui/.helmignore create mode 100644 charts/postgres-operator-ui/Chart.yaml create mode 100644 charts/postgres-operator-ui/templates/NOTES.txt create mode 100644 charts/postgres-operator-ui/templates/_helpers.tpl create mode 100644 charts/postgres-operator-ui/templates/deployment.yaml create mode 100644 charts/postgres-operator-ui/templates/ingress.yaml create mode 100644 charts/postgres-operator-ui/templates/service.yaml create mode 100644 charts/postgres-operator-ui/templates/serviceaccount.yaml create mode 100644 charts/postgres-operator-ui/values.yaml diff --git a/charts/postgres-operator-ui/.helmignore b/charts/postgres-operator-ui/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/charts/postgres-operator-ui/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/postgres-operator-ui/Chart.yaml b/charts/postgres-operator-ui/Chart.yaml new file mode 100644 index 000000000..4be7d8af1 --- /dev/null +++ b/charts/postgres-operator-ui/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +name: postgres-operator-ui +version: 0.1.0 +appVersion: 1.2.0 +home: https://github.com/zalando/postgres-operator +description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience +keywords: +- postgres +- operator +- ui +- cloud-native +- patroni +- spilo +maintainers: +- name: siku4 + email: sk@sik-net.de +sources: +- https://github.com/zalando/postgres-operator +engine: gotpl diff --git a/charts/postgres-operator-ui/templates/NOTES.txt b/charts/postgres-operator-ui/templates/NOTES.txt new file mode 100644 index 000000000..7e9142891 --- /dev/null +++ b/charts/postgres-operator-ui/templates/NOTES.txt @@ -0,0 +1,3 @@ +To verify that postgres-operator has started, run: + + kubectl --namespace={{ .Release.Namespace }} get pods -l "app.kubernetes.io/name={{ template "postgres-operator-ui.name" . }}" \ No newline at end of file diff --git a/charts/postgres-operator-ui/templates/_helpers.tpl b/charts/postgres-operator-ui/templates/_helpers.tpl new file mode 100644 index 000000000..a5e97081d --- /dev/null +++ b/charts/postgres-operator-ui/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgres-operator-ui.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "postgres-operator-ui.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgres-operator-ui.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/postgres-operator-ui/templates/deployment.yaml b/charts/postgres-operator-ui/templates/deployment.yaml new file mode 100644 index 000000000..924bcf000 --- /dev/null +++ b/charts/postgres-operator-ui/templates/deployment.yaml @@ -0,0 +1,69 @@ +apiVersion: "apps/v1" +kind: "Deployment" +metadata: + labels: + app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} + helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ template "postgres-operator-ui.fullname" . }} +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + team: "acid" # Parameterize? + spec: + serviceAccountName: {{ template "postgres-operator-ui.name" . }} + containers: + - name: "service" + image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: 8081 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/health" + port: 8081 + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: + {{- toYaml .Values.resources | nindent 12 }} + env: + - name: "APP_URL" + value: "http://localhost:8081" + - name: "OPERATOR_API_URL" + value: {{ .Values.envs.operatorApiUrl }} + - name: "TARGET_NAMESPACE" + value: {{ .Values.envs.targetNamespace }} + - name: "TEAMS" + value: |- + [ + "acid" + ] + - name: "OPERATOR_UI_CONFIG" + value: |- + { + "docs_link":"https://postgres-operator.readthedocs.io/en/latest/", + "dns_format_string": "{1}-{0}.{2}", + "databases_visible": true, + "master_load_balancer_visible": true, + "nat_gateways_visible": false, + "replica_load_balancer_visible": true, + "resources_visible": true, + "users_visible": true, + "postgresql_versions": [ + "12", + "11", + "10", + "9.6", + "9.5" + ] + } \ No newline at end of file diff --git a/charts/postgres-operator-ui/templates/ingress.yaml b/charts/postgres-operator-ui/templates/ingress.yaml new file mode 100644 index 000000000..73fa2e817 --- /dev/null +++ b/charts/postgres-operator-ui/templates/ingress.yaml @@ -0,0 +1,44 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "postgres-operator-ui.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} + helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/postgres-operator-ui/templates/service.yaml b/charts/postgres-operator-ui/templates/service.yaml new file mode 100644 index 000000000..09adff26f --- /dev/null +++ b/charts/postgres-operator-ui/templates/service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} + helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ template "postgres-operator-ui.fullname" . }} +spec: + ports: + - port: {{ .Values.service.port }} + targetPort: 8081 + protocol: TCP + selector: + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} + type: {{ .Values.service.type }} + + diff --git a/charts/postgres-operator-ui/templates/serviceaccount.yaml b/charts/postgres-operator-ui/templates/serviceaccount.yaml new file mode 100644 index 000000000..4148938b0 --- /dev/null +++ b/charts/postgres-operator-ui/templates/serviceaccount.yaml @@ -0,0 +1,81 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "postgres-operator-ui.name" . }} + labels: + app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} + helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ template "postgres-operator-ui.name" . }} + labels: + app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} + helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +rules: +- apiGroups: + - acid.zalan.do + resources: + - postgresqls + verbs: + - create + - delete + - get + - list + - patch + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - get + - list +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "postgres-operator-ui.name" . }} + labels: + app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} + helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "postgres-operator-ui.name" . }} +subjects: +- kind: ServiceAccount +# note: the cluster role binding needs to be defined +# for every namespace the operator-ui service account lives in. + name: {{ template "postgres-operator-ui.name" . }} + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/charts/postgres-operator-ui/values.yaml b/charts/postgres-operator-ui/values.yaml new file mode 100644 index 000000000..9351d470e --- /dev/null +++ b/charts/postgres-operator-ui/values.yaml @@ -0,0 +1,47 @@ +# Default values for postgres-operator-ui. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +# configure ui image +image: + registry: registry.opensource.zalan.do + repository: acid/postgres-operator-ui + tag: v1.2.0 + pullPolicy: "IfNotPresent" + +# configure UI pod resources +resources: + limits: + cpu: 300m + memory: 3000Mi + requests: + cpu: 100m + memory: 100Mi + +# configure UI ENVs +envs: + # IMPORTANT: While operator chart and UI chart are idendependent, this is the interface between + # UI and operator API. Insert the service name of the operator API here! + operatorApiUrl: "http://postgres-operator:8080" + targetNamespace: "default" + +# configure UI service +service: + type: "ClusterIP" + port: "8080" + +# configure UI ingress. If needed: "enabled: true" +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: ui.example.org + paths: [""] + tls: [] + # - secretName: ui-tls + # hosts: + # - ui.exmaple.org \ No newline at end of file From a660d758a5850530bb7f7d3e1a049dd01f2bdda2 Mon Sep 17 00:00:00 2001 From: Vito Botta Date: Mon, 10 Feb 2020 12:48:24 +0200 Subject: [PATCH 14/31] Add region setting for logical backups to non-AWS storage (#813) * Add region setting for logical backups to non-AWS storage --- charts/postgres-operator/crds/operatorconfigurations.yaml | 2 ++ charts/postgres-operator/values-crd.yaml | 2 ++ charts/postgres-operator/values.yaml | 2 ++ docker/logical-backup/dump.sh | 1 + docs/reference/operator_parameters.md | 5 ++++- manifests/configmap.yaml | 1 + manifests/operatorconfiguration.crd.yaml | 2 ++ manifests/postgresql-operator-default-configuration.yaml | 1 + pkg/apis/acid.zalan.do/v1/crds.go | 3 +++ pkg/apis/acid.zalan.do/v1/operator_configuration_type.go | 1 + pkg/cluster/k8sres.go | 4 ++++ pkg/controller/operator_config.go | 1 + pkg/util/config/config.go | 1 + 13 files changed, 25 insertions(+), 1 deletion(-) diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index 52d03df9c..9725c2708 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -243,6 +243,8 @@ spec: type: string logical_backup_s3_endpoint: type: string + logical_backup_s3_region: + type: string logical_backup_s3_secret_access_key: type: string logical_backup_s3_sse: diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index 61cab3d06..1f9b5e495 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -204,6 +204,8 @@ configLogicalBackup: logical_backup_s3_access_key_id: "" # S3 bucket to store backup results logical_backup_s3_bucket: "my-bucket-url" + # S3 region of bucket + logical_backup_s3_region: "" # S3 endpoint url when not using AWS logical_backup_s3_endpoint: "" # S3 Secret Access Key diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index deb506329..1be5851d2 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -195,6 +195,8 @@ configLogicalBackup: logical_backup_s3_access_key_id: "" # S3 bucket to store backup results logical_backup_s3_bucket: "my-bucket-url" + # S3 region of bucket + logical_backup_s3_region: "" # S3 endpoint url when not using AWS logical_backup_s3_endpoint: "" # S3 Secret Access Key diff --git a/docker/logical-backup/dump.sh b/docker/logical-backup/dump.sh index 673f09038..2d9a39e02 100755 --- a/docker/logical-backup/dump.sh +++ b/docker/logical-backup/dump.sh @@ -40,6 +40,7 @@ function aws_upload { [[ ! -z "$EXPECTED_SIZE" ]] && args+=("--expected-size=$EXPECTED_SIZE") [[ ! -z "$LOGICAL_BACKUP_S3_ENDPOINT" ]] && args+=("--endpoint-url=$LOGICAL_BACKUP_S3_ENDPOINT") + [[ ! -z "$LOGICAL_BACKUP_S3_REGION" ]] && args+=("--region=$LOGICAL_BACKUP_S3_REGION") [[ ! -z "$LOGICAL_BACKUP_S3_SSE" ]] && args+=("--sse=$LOGICAL_BACKUP_S3_SSE") aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}" diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index d6dde8c0e..7a8acc232 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -461,8 +461,11 @@ grouped under the `logical_backup` key. S3 bucket to store backup results. The bucket has to be present and accessible by Postgres pods. Default: empty. +* **logical_backup_s3_region** + Specifies the region of the bucket which is required with some non-AWS S3 storage services. The default is empty. + * **logical_backup_s3_endpoint** - When using non-AWS S3 storage, endpoint can be set as a ENV variable. + When using non-AWS S3 storage, endpoint can be set as a ENV variable. The default is empty. * **logical_backup_s3_sse** Specify server side encription that S3 storage is using. If empty string diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index 7d11198da..d26c83edf 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -40,6 +40,7 @@ data: # logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" # logical_backup_s3_access_key_id: "" # logical_backup_s3_bucket: "my-bucket-url" + # logical_backup_s3_region: "" # logical_backup_s3_endpoint: "" # logical_backup_s3_secret_access_key: "" # logical_backup_s3_sse: "AES256" diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index 509d9aefc..7bd5c529c 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -219,6 +219,8 @@ spec: type: string logical_backup_s3_endpoint: type: string + logical_backup_s3_region: + type: string logical_backup_s3_secret_access_key: type: string logical_backup_s3_sse: diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index f13a1eed9..efd1a5396 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -88,6 +88,7 @@ configuration: # logical_backup_s3_access_key_id: "" logical_backup_s3_bucket: "my-bucket-url" # logical_backup_s3_endpoint: "" + # logical_backup_s3_region: "" # logical_backup_s3_secret_access_key: "" logical_backup_s3_sse: "AES256" logical_backup_schedule: "30 00 * * *" diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 4d5a6f024..bc33f11f6 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -909,6 +909,9 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "logical_backup_s3_endpoint": { Type: "string", }, + "logical_backup_s3_region": { + Type: "string", + }, "logical_backup_s3_secret_access_key": { Type: "string", }, diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index 1e6a3b459..35c51e08d 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -157,6 +157,7 @@ type OperatorLogicalBackupConfiguration struct { Schedule string `json:"logical_backup_schedule,omitempty"` DockerImage string `json:"logical_backup_docker_image,omitempty"` S3Bucket string `json:"logical_backup_s3_bucket,omitempty"` + S3Region string `json:"logical_backup_s3_region,omitempty"` S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"` S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"` S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"` diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index aed0c6e83..4c2f50296 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1589,6 +1589,10 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { Name: "LOGICAL_BACKUP_S3_BUCKET", Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket, }, + { + Name: "LOGICAL_BACKUP_S3_REGION", + Value: c.OpConfig.LogicalBackup.LogicalBackupS3Region, + }, { Name: "LOGICAL_BACKUP_S3_ENDPOINT", Value: c.OpConfig.LogicalBackup.LogicalBackupS3Endpoint, diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index d0357d222..98b56a298 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -106,6 +106,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.LogicalBackupSchedule = fromCRD.LogicalBackup.Schedule result.LogicalBackupDockerImage = fromCRD.LogicalBackup.DockerImage result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket + result.LogicalBackupS3Region = fromCRD.LogicalBackup.S3Region result.LogicalBackupS3Endpoint = fromCRD.LogicalBackup.S3Endpoint result.LogicalBackupS3AccessKeyID = fromCRD.LogicalBackup.S3AccessKeyID result.LogicalBackupS3SecretAccessKey = fromCRD.LogicalBackup.S3SecretAccessKey diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 339f06ce0..e4e429abb 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -76,6 +76,7 @@ type LogicalBackup struct { LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"` LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup"` LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""` + LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""` LogicalBackupS3Endpoint string `name:"logical_backup_s3_endpoint" default:""` LogicalBackupS3AccessKeyID string `name:"logical_backup_s3_access_key_id" default:""` LogicalBackupS3SecretAccessKey string `name:"logical_backup_s3_secret_access_key" default:""` From ba60e15d073e0b254df142cf71adb5725d6b1a16 Mon Sep 17 00:00:00 2001 From: Jonathan Juares Beber Date: Mon, 10 Feb 2020 12:03:25 +0100 Subject: [PATCH 15/31] Add ServiceAnnotations cluster config (#803) The [operator parameters][1] already support the `custom_service_annotations` config.With this parameter is possible to define custom annotations that will be used on the services created by the operator. The `custom_service_annotations` as all the other [operator parameters][1] are defined on the operator level and do not allow customization on the cluster level. A cluster may require different service annotations, as for example, set up different cloud load balancers timeouts, different ingress annotations, and/or enable more customizable environments. This commit introduces a new parameter on the cluster level, called `serviceAnnotations`, responsible for defining custom annotations just for the services created by the operator to the specifically defined cluster. It allows a mix of configuration between `custom_service_annotations` and `serviceAnnotations` where the latest one will have priority. In order to allow custom service annotations to be used on services without LoadBalancers (as for example, service mesh services annotations) both `custom_service_annotations` and `serviceAnnotations` are applied independently of load-balancing configuration. For retro-compatibility purposes, `custom_service_annotations` is still under [Load balancer related options][2]. The two default annotations when using LoadBalancer services, `external-dns.alpha.kubernetes.io/hostname` and `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` are still defined by the operator. `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` can be overridden by `custom_service_annotations` or `serviceAnnotations`, allowing a more customizable environment. `external-dns.alpha.kubernetes.io/hostname` can not be overridden once there is no differentiation between custom service annotations for replicas and masters. It updates the documentation and creates the necessary unit and e2e tests to the above-described feature too. [1]: https://github.com/zalando/postgres-operator/blob/master/docs/reference/operator_parameters.md [2]: https://github.com/zalando/postgres-operator/blob/master/docs/reference/operator_parameters.md#load-balancer-related-options --- .../postgres-operator/crds/postgresqls.yaml | 4 + docs/administrator.md | 11 + docs/reference/cluster_manifest.md | 5 + docs/reference/operator_parameters.md | 5 +- e2e/README.md | 1 + e2e/tests/test_e2e.py | 51 ++- manifests/complete-postgres-manifest.yaml | 2 + ...res-manifest-with-service-annotations.yaml | 20 ++ manifests/postgresql.crd.yaml | 4 + pkg/apis/acid.zalan.do/v1/crds.go | 8 + pkg/apis/acid.zalan.do/v1/postgresql_type.go | 1 + pkg/apis/acid.zalan.do/v1/util_test.go | 101 +++++- .../acid.zalan.do/v1/zz_generated.deepcopy.go | 7 + pkg/cluster/cluster_test.go | 322 ++++++++++++++++++ pkg/cluster/k8sres.go | 60 ++-- 15 files changed, 565 insertions(+), 37 deletions(-) create mode 100644 manifests/postgres-manifest-with-service-annotations.yaml diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index 198afe119..b4b676236 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -266,6 +266,10 @@ spec: pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' # Note: the value specified here must not be zero or be higher # than the corresponding limit. + serviceAnnotations: + type: object + additionalProperties: + type: string sidecars: type: array nullable: true diff --git a/docs/administrator.md b/docs/administrator.md index 5b8769edb..2e86193c0 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -376,6 +376,17 @@ cluster manifest. In the case any of these variables are omitted from the manifest, the operator configuration settings `enable_master_load_balancer` and `enable_replica_load_balancer` apply. Note that the operator settings affect all Postgresql services running in all namespaces watched by the operator. +If load balancing is enabled two default annotations will be applied to its +services: + +- `external-dns.alpha.kubernetes.io/hostname` with the value defined by the + operator configs `master_dns_name_format` and `replica_dns_name_format`. + This value can't be overwritten. If any changing in its value is needed, it + MUST be done changing the DNS format operator config parameters; and +- `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` with + a default value of "3600". This value can be overwritten with the operator + config parameter `custom_service_annotations` or the cluster parameter + `serviceAnnotations`. To limit the range of IP addresses that can reach a load balancer, specify the desired ranges in the `allowedSourceRanges` field (applies to both master and diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index bf6df681b..7b049b6fa 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -122,6 +122,11 @@ These parameters are grouped directly under the `spec` key in the manifest. A map of key value pairs that gets attached as [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to each pod created for the database. +* **serviceAnnotations** + A map of key value pairs that gets attached as [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + to the services created for the database cluster. Check the + [administrator docs](https://github.com/zalando/postgres-operator/blob/master/docs/administrator.md#load-balancers-and-allowed-ip-ranges) + for more information regarding default values and overwrite rules. * **enableShmVolume** Start a database pod without limitations on shm memory. By default Docker diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 7a8acc232..e3893ea31 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -388,8 +388,9 @@ In the CRD-based configuration they are grouped under the `load_balancer` key. `false`. * **custom_service_annotations** - when load balancing is enabled, LoadBalancer service is created and - this parameter takes service annotations that are applied to service. + This key/value map provides a list of annotations that get attached to each + service of a cluster created by the operator. If the annotation key is also + provided by the cluster definition, the manifest value is used. Optional. * **master_dns_name_format** defines the DNS name string template for the diff --git a/e2e/README.md b/e2e/README.md index 1d611bcd0..f1bc5f9ed 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -44,3 +44,4 @@ The current tests are all bundled in [`test_e2e.py`](tests/test_e2e.py): * taint-based eviction of Postgres pods * invoking logical backup cron job * uniqueness of master pod +* custom service annotations diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index fc87c9887..5f34dcb16 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -211,8 +211,8 @@ class EndToEndTestCase(unittest.TestCase): schedule = "7 7 7 7 *" pg_patch_enable_backup = { "spec": { - "enableLogicalBackup": True, - "logicalBackupSchedule": schedule + "enableLogicalBackup": True, + "logicalBackupSchedule": schedule } } k8s.api.custom_objects_api.patch_namespaced_custom_object( @@ -234,7 +234,7 @@ class EndToEndTestCase(unittest.TestCase): image = "test-image-name" patch_logical_backup_image = { "data": { - "logical_backup_docker_image": image, + "logical_backup_docker_image": image, } } k8s.update_config(patch_logical_backup_image) @@ -247,7 +247,7 @@ class EndToEndTestCase(unittest.TestCase): # delete the logical backup cron job pg_patch_disable_backup = { "spec": { - "enableLogicalBackup": False, + "enableLogicalBackup": False, } } k8s.api.custom_objects_api.patch_namespaced_custom_object( @@ -257,6 +257,37 @@ class EndToEndTestCase(unittest.TestCase): self.assertEqual(0, len(jobs), "Expected 0 logical backup jobs, found {}".format(len(jobs))) + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_service_annotations(self): + ''' + Create a Postgres cluster with service annotations and check them. + ''' + k8s = self.k8s + patch_custom_service_annotations = { + "data": { + "custom_service_annotations": "foo:bar", + } + } + k8s.update_config(patch_custom_service_annotations) + + k8s.create_with_kubectl("manifests/postgres-manifest-with-service-annotations.yaml") + annotations = { + "annotation.key": "value", + "foo": "bar", + } + self.assertTrue(k8s.check_service_annotations( + "version=acid-service-annotations,spilo-role=master", annotations)) + self.assertTrue(k8s.check_service_annotations( + "version=acid-service-annotations,spilo-role=replica", annotations)) + + # clean up + unpatch_custom_service_annotations = { + "data": { + "custom_service_annotations": "", + } + } + k8s.update_config(unpatch_custom_service_annotations) + def assert_master_is_unique(self, namespace='default', version="acid-minimal-cluster"): ''' Check that there is a single pod in the k8s cluster with the label "spilo-role=master" @@ -322,6 +353,16 @@ class K8s: pod_phase = pods[0].status.phase time.sleep(self.RETRY_TIMEOUT_SEC) + def check_service_annotations(self, svc_labels, annotations, namespace='default'): + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + if len(svc.metadata.annotations) != len(annotations): + return False + for key in svc.metadata.annotations: + if svc.metadata.annotations[key] != annotations[key]: + return False + return True + def wait_for_pg_to_scale(self, number_of_instances, namespace='default'): body = { @@ -330,7 +371,7 @@ class K8s: } } _ = self.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body) + "acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body) labels = 'version=acid-minimal-cluster' while self.count_pods_with_label(labels) != number_of_instances: diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index 2478156d6..9e3b891c3 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -32,6 +32,8 @@ spec: # spiloFSGroup: 103 # podAnnotations: # annotation.key: value +# serviceAnnotations: +# annotation.key: value # podPriorityClassName: "spilo-pod-priority" # tolerations: # - key: postgres diff --git a/manifests/postgres-manifest-with-service-annotations.yaml b/manifests/postgres-manifest-with-service-annotations.yaml new file mode 100644 index 000000000..ab3096740 --- /dev/null +++ b/manifests/postgres-manifest-with-service-annotations.yaml @@ -0,0 +1,20 @@ +apiVersion: "acid.zalan.do/v1" +kind: postgresql +metadata: + name: acid-service-annotations +spec: + teamId: "acid" + volume: + size: 1Gi + numberOfInstances: 2 + users: + zalando: # database owner + - superuser + - createdb + foo_user: [] # role for application foo + databases: + foo: zalando # dbname: owner + postgresql: + version: "11" + serviceAnnotations: + annotation.key: value diff --git a/manifests/postgresql.crd.yaml b/manifests/postgresql.crd.yaml index 3b0f652ea..276bc94b8 100644 --- a/manifests/postgresql.crd.yaml +++ b/manifests/postgresql.crd.yaml @@ -230,6 +230,10 @@ spec: pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' # Note: the value specified here must not be zero or be higher # than the corresponding limit. + serviceAnnotations: + type: object + additionalProperties: + type: string sidecars: type: array nullable: true diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index bc33f11f6..4cfc9a9e6 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -383,6 +383,14 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, }, }, + "serviceAnnotations": { + Type: "object", + AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ + Schema: &apiextv1beta1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "sidecars": { Type: "array", Items: &apiextv1beta1.JSONSchemaPropsOrArray{ diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index 515a73ff0..07b42d4d4 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -60,6 +60,7 @@ type PostgresSpec struct { LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"` StandbyCluster *StandbyDescription `json:"standby"` PodAnnotations map[string]string `json:"podAnnotations"` + ServiceAnnotations map[string]string `json:"serviceAnnotations"` // deprecated json tags InitContainersOld []v1.Container `json:"init_containers,omitempty"` diff --git a/pkg/apis/acid.zalan.do/v1/util_test.go b/pkg/apis/acid.zalan.do/v1/util_test.go index a1e01825f..28e9e8ca4 100644 --- a/pkg/apis/acid.zalan.do/v1/util_test.go +++ b/pkg/apis/acid.zalan.do/v1/util_test.go @@ -456,18 +456,84 @@ var postgresqlList = []struct { PostgresqlList{}, errors.New("unexpected end of JSON input")}} -var annotations = []struct { +var podAnnotations = []struct { about string in []byte annotations map[string]string err error }{{ - about: "common annotations", - in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"podAnnotations": {"foo": "bar"},"teamId": "acid", "clone": {"cluster": "team-batman"}}}`), + about: "common annotations", + in: []byte(`{ + "kind": "Postgresql", + "apiVersion": "acid.zalan.do/v1", + "metadata": { + "name": "acid-testcluster1" + }, + "spec": { + "podAnnotations": { + "foo": "bar" + }, + "teamId": "acid", + "clone": { + "cluster": "team-batman" + } + } + }`), annotations: map[string]string{"foo": "bar"}, err: nil}, } +var serviceAnnotations = []struct { + about string + in []byte + annotations map[string]string + err error +}{ + { + about: "common single annotation", + in: []byte(`{ + "kind": "Postgresql", + "apiVersion": "acid.zalan.do/v1", + "metadata": { + "name": "acid-testcluster1" + }, + "spec": { + "serviceAnnotations": { + "foo": "bar" + }, + "teamId": "acid", + "clone": { + "cluster": "team-batman" + } + } + }`), + annotations: map[string]string{"foo": "bar"}, + err: nil, + }, + { + about: "common two annotations", + in: []byte(`{ + "kind": "Postgresql", + "apiVersion": "acid.zalan.do/v1", + "metadata": { + "name": "acid-testcluster1" + }, + "spec": { + "serviceAnnotations": { + "foo": "bar", + "post": "gres" + }, + "teamId": "acid", + "clone": { + "cluster": "team-batman" + } + } + }`), + annotations: map[string]string{"foo": "bar", "post": "gres"}, + err: nil, + }, +} + func mustParseTime(s string) metav1.Time { v, err := time.Parse("15:04", s) if err != nil { @@ -517,21 +583,42 @@ func TestWeekdayTime(t *testing.T) { } } -func TestClusterAnnotations(t *testing.T) { - for _, tt := range annotations { +func TestPodAnnotations(t *testing.T) { + for _, tt := range podAnnotations { t.Run(tt.about, func(t *testing.T) { var cluster Postgresql err := cluster.UnmarshalJSON(tt.in) if err != nil { if tt.err == nil || err.Error() != tt.err.Error() { - t.Errorf("Unable to marshal cluster with annotations: expected %v got %v", tt.err, err) + t.Errorf("Unable to marshal cluster with podAnnotations: expected %v got %v", tt.err, err) } return } for k, v := range cluster.Spec.PodAnnotations { found, expected := v, tt.annotations[k] if found != expected { - t.Errorf("Didn't find correct value for key %v in for podAnnotations: Expected %v found %v", k, expected, found) + t.Errorf("Didn't find correct value for key %v in for podAnnotations: Expected %v found %v", k, expected, found) + } + } + }) + } +} + +func TestServiceAnnotations(t *testing.T) { + for _, tt := range serviceAnnotations { + t.Run(tt.about, func(t *testing.T) { + var cluster Postgresql + err := cluster.UnmarshalJSON(tt.in) + if err != nil { + if tt.err == nil || err.Error() != tt.err.Error() { + t.Errorf("Unable to marshal cluster with serviceAnnotations: expected %v got %v", tt.err, err) + } + return + } + for k, v := range cluster.Spec.ServiceAnnotations { + found, expected := v, tt.annotations[k] + if found != expected { + t.Errorf("Didn't find correct value for key %v in for serviceAnnotations: Expected %v found %v", k, expected, found) } } }) diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index dc07aa2cf..aaae1f04b 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -514,6 +514,13 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { (*out)[key] = val } } + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.InitContainersOld != nil { in, out := &in.InitContainersOld, &out.InitContainersOld *out = make([]corev1.Container, len(*in)) diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 85d014d8a..9efbc51c6 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -355,6 +355,12 @@ func TestPodAnnotations(t *testing.T) { database: map[string]string{"foo": "bar"}, merged: map[string]string{"foo": "bar"}, }, + { + subTest: "Both Annotations", + operator: map[string]string{"foo": "bar"}, + database: map[string]string{"post": "gres"}, + merged: map[string]string{"foo": "bar", "post": "gres"}, + }, { subTest: "Database Config overrides Operator Config Annotations", operator: map[string]string{"foo": "bar", "global": "foo"}, @@ -382,3 +388,319 @@ func TestPodAnnotations(t *testing.T) { } } } + +func TestServiceAnnotations(t *testing.T) { + enabled := true + disabled := false + tests := []struct { + about string + role PostgresRole + enableMasterLoadBalancerSpec *bool + enableMasterLoadBalancerOC bool + enableReplicaLoadBalancerSpec *bool + enableReplicaLoadBalancerOC bool + operatorAnnotations map[string]string + clusterAnnotations map[string]string + expect map[string]string + }{ + //MASTER + { + about: "Master with no annotations and EnableMasterLoadBalancer disabled on spec and OperatorConfig", + role: "master", + enableMasterLoadBalancerSpec: &disabled, + enableMasterLoadBalancerOC: false, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: make(map[string]string), + }, + { + about: "Master with no annotations and EnableMasterLoadBalancer enabled on spec", + role: "master", + enableMasterLoadBalancerSpec: &enabled, + enableMasterLoadBalancerOC: false, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + { + about: "Master with no annotations and EnableMasterLoadBalancer enabled only on operator config", + role: "master", + enableMasterLoadBalancerSpec: &disabled, + enableMasterLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: make(map[string]string), + }, + { + about: "Master with no annotations and EnableMasterLoadBalancer defined only on operator config", + role: "master", + enableMasterLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + { + about: "Master with cluster annotations and load balancer enabled", + role: "master", + enableMasterLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{"foo": "bar"}, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + "foo": "bar", + }, + }, + { + about: "Master with cluster annotations and load balancer disabled", + role: "master", + enableMasterLoadBalancerSpec: &disabled, + enableMasterLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{"foo": "bar"}, + expect: map[string]string{"foo": "bar"}, + }, + { + about: "Master with operator annotations and load balancer enabled", + role: "master", + enableMasterLoadBalancerOC: true, + operatorAnnotations: map[string]string{"foo": "bar"}, + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + "foo": "bar", + }, + }, + { + about: "Master with operator annotations override default annotations", + role: "master", + enableMasterLoadBalancerOC: true, + operatorAnnotations: map[string]string{ + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + }, + { + about: "Master with cluster annotations override default annotations", + role: "master", + enableMasterLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{ + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + }, + { + about: "Master with cluster annotations do not override external-dns annotations", + role: "master", + enableMasterLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com", + }, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + { + about: "Master with operator annotations do not override external-dns annotations", + role: "master", + enableMasterLoadBalancerOC: true, + clusterAnnotations: make(map[string]string), + operatorAnnotations: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com", + }, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + // REPLICA + { + about: "Replica with no annotations and EnableReplicaLoadBalancer disabled on spec and OperatorConfig", + role: "replica", + enableReplicaLoadBalancerSpec: &disabled, + enableReplicaLoadBalancerOC: false, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: make(map[string]string), + }, + { + about: "Replica with no annotations and EnableReplicaLoadBalancer enabled on spec", + role: "replica", + enableReplicaLoadBalancerSpec: &enabled, + enableReplicaLoadBalancerOC: false, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + { + about: "Replica with no annotations and EnableReplicaLoadBalancer enabled only on operator config", + role: "replica", + enableReplicaLoadBalancerSpec: &disabled, + enableReplicaLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: make(map[string]string), + }, + { + about: "Replica with no annotations and EnableReplicaLoadBalancer defined only on operator config", + role: "replica", + enableReplicaLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + { + about: "Replica with cluster annotations and load balancer enabled", + role: "replica", + enableReplicaLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{"foo": "bar"}, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + "foo": "bar", + }, + }, + { + about: "Replica with cluster annotations and load balancer disabled", + role: "replica", + enableReplicaLoadBalancerSpec: &disabled, + enableReplicaLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{"foo": "bar"}, + expect: map[string]string{"foo": "bar"}, + }, + { + about: "Replica with operator annotations and load balancer enabled", + role: "replica", + enableReplicaLoadBalancerOC: true, + operatorAnnotations: map[string]string{"foo": "bar"}, + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + "foo": "bar", + }, + }, + { + about: "Replica with operator annotations override default annotations", + role: "replica", + enableReplicaLoadBalancerOC: true, + operatorAnnotations: map[string]string{ + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + }, + { + about: "Replica with cluster annotations override default annotations", + role: "replica", + enableReplicaLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{ + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + }, + { + about: "Replica with cluster annotations do not override external-dns annotations", + role: "replica", + enableReplicaLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com", + }, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + { + about: "Replica with operator annotations do not override external-dns annotations", + role: "replica", + enableReplicaLoadBalancerOC: true, + clusterAnnotations: make(map[string]string), + operatorAnnotations: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com", + }, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + // COMMON + { + about: "cluster annotations append to operator annotations", + role: "replica", + enableReplicaLoadBalancerOC: false, + operatorAnnotations: map[string]string{"foo": "bar"}, + clusterAnnotations: map[string]string{"post": "gres"}, + expect: map[string]string{"foo": "bar", "post": "gres"}, + }, + { + about: "cluster annotations override operator annotations", + role: "replica", + enableReplicaLoadBalancerOC: false, + operatorAnnotations: map[string]string{"foo": "bar", "post": "gres"}, + clusterAnnotations: map[string]string{"post": "greSQL"}, + expect: map[string]string{"foo": "bar", "post": "greSQL"}, + }, + } + + for _, tt := range tests { + t.Run(tt.about, func(t *testing.T) { + cl.OpConfig.CustomServiceAnnotations = tt.operatorAnnotations + cl.OpConfig.EnableMasterLoadBalancer = tt.enableMasterLoadBalancerOC + cl.OpConfig.EnableReplicaLoadBalancer = tt.enableReplicaLoadBalancerOC + cl.OpConfig.MasterDNSNameFormat = "{cluster}.{team}.{hostedzone}" + cl.OpConfig.ReplicaDNSNameFormat = "{cluster}-repl.{team}.{hostedzone}" + cl.OpConfig.DbHostedZone = "db.example.com" + + cl.Postgresql.Spec.ClusterName = "test" + cl.Postgresql.Spec.TeamID = "acid" + cl.Postgresql.Spec.ServiceAnnotations = tt.clusterAnnotations + cl.Postgresql.Spec.EnableMasterLoadBalancer = tt.enableMasterLoadBalancerSpec + cl.Postgresql.Spec.EnableReplicaLoadBalancer = tt.enableReplicaLoadBalancerSpec + + got := cl.generateServiceAnnotations(tt.role, &cl.Postgresql.Spec) + if len(tt.expect) != len(got) { + t.Errorf("expected %d annotation(s), got %d", len(tt.expect), len(got)) + return + } + for k, v := range got { + if tt.expect[k] != v { + t.Errorf("expected annotation '%v' with value '%v', got value '%v'", k, tt.expect[k], v) + } + } + }) + } +} diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 4c2f50296..e6561e0f3 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1230,14 +1230,6 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *ac } func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) *v1.Service { - var dnsName string - - if role == Master { - dnsName = c.masterDNSName() - } else { - dnsName = c.replicaDNSName() - } - serviceSpec := v1.ServiceSpec{ Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}}, Type: v1.ServiceTypeClusterIP, @@ -1247,8 +1239,6 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) serviceSpec.Selector = c.roleLabelsSet(false, role) } - var annotations map[string]string - if c.shouldCreateLoadBalancerForService(role, spec) { // spec.AllowedSourceRanges evaluates to the empty slice of zero length @@ -1262,18 +1252,6 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges) serviceSpec.Type = v1.ServiceTypeLoadBalancer - - annotations = map[string]string{ - constants.ZalandoDNSNameAnnotation: dnsName, - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - } - - if len(c.OpConfig.CustomServiceAnnotations) != 0 { - c.logger.Debugf("There are custom annotations defined, creating them.") - for customAnnotationKey, customAnnotationValue := range c.OpConfig.CustomServiceAnnotations { - annotations[customAnnotationKey] = customAnnotationValue - } - } } else if role == Replica { // before PR #258, the replica service was only created if allocated a LB // now we always create the service but warn if the LB is absent @@ -1285,7 +1263,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) Name: c.serviceName(role), Namespace: c.Namespace, Labels: c.roleLabelsSet(true, role), - Annotations: annotations, + Annotations: c.generateServiceAnnotations(role, spec), }, Spec: serviceSpec, } @@ -1293,6 +1271,42 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) return service } +func (c *Cluster) generateServiceAnnotations(role PostgresRole, spec *acidv1.PostgresSpec) map[string]string { + annotations := make(map[string]string) + + for k, v := range c.OpConfig.CustomServiceAnnotations { + annotations[k] = v + } + if spec != nil || spec.ServiceAnnotations != nil { + for k, v := range spec.ServiceAnnotations { + annotations[k] = v + } + } + + if c.shouldCreateLoadBalancerForService(role, spec) { + var dnsName string + if role == Master { + dnsName = c.masterDNSName() + } else { + dnsName = c.replicaDNSName() + } + + // Just set ELB Timeout annotation with default value, if it does not + // have a cutom value + if _, ok := annotations[constants.ElbTimeoutAnnotationName]; !ok { + annotations[constants.ElbTimeoutAnnotationName] = constants.ElbTimeoutAnnotationValue + } + // External DNS name annotation is not customizable + annotations[constants.ZalandoDNSNameAnnotation] = dnsName + } + + if len(annotations) == 0 { + return nil + } + + return annotations +} + func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubset) *v1.Endpoints { endpoints := &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ From be6c8cd5737f21a03ed7d671ddd31bc1e9abbef9 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Mon, 10 Feb 2020 16:41:51 +0100 Subject: [PATCH 16/31] specify cluster in e2e taint test (#823) --- e2e/tests/test_e2e.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 5f34dcb16..e3f00da9c 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -68,8 +68,8 @@ class EndToEndTestCase(unittest.TestCase): _, failover_targets = k8s.get_pg_nodes(cluster_label) # configure minimum boundaries for CPU and memory limits - minCPULimit = '250m' - minMemoryLimit = '250Mi' + minCPULimit = '500m' + minMemoryLimit = '500Mi' patch_min_resource_limits = { "data": { "min_cpu_limit": minCPULimit, @@ -176,7 +176,7 @@ class EndToEndTestCase(unittest.TestCase): # patch node and test if master is failing over to one of the expected nodes k8s.api.core_v1.patch_node(current_master_node, body) k8s.wait_for_master_failover(failover_targets) - k8s.wait_for_pod_start('spilo-role=replica') + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) new_master_node, new_replica_nodes = k8s.get_pg_nodes(cluster_label) self.assertNotEqual(current_master_node, new_master_node, From 00f00af2e84786eb4a87a35f2b987f5f946af3c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fredrik=20=C3=98strem?= Date: Tue, 11 Feb 2020 17:16:38 +0100 Subject: [PATCH 17/31] Fix MasterPodMoveTimeout field that cannot be unmarshalled (#816) * Update operator_configuration_type.go * Update operator_config.go --- pkg/apis/acid.zalan.do/v1/operator_configuration_type.go | 2 +- pkg/controller/operator_config.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index 35c51e08d..ded5261fb 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -67,7 +67,7 @@ type KubernetesMetaConfiguration struct { // TODO: use namespacedname PodEnvironmentConfigMap string `json:"pod_environment_configmap,omitempty"` PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` - MasterPodMoveTimeout time.Duration `json:"master_pod_move_timeout,omitempty"` + MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"` EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"` PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"` PodManagementPolicy string `json:"pod_management_policy,omitempty"` diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 98b56a298..c6f10faa0 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -66,7 +66,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName result.PodManagementPolicy = fromCRD.Kubernetes.PodManagementPolicy - result.MasterPodMoveTimeout = fromCRD.Kubernetes.MasterPodMoveTimeout + result.MasterPodMoveTimeout = time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout) result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity result.PodAntiAffinityTopologyKey = fromCRD.Kubernetes.PodAntiAffinityTopologyKey From 744c71d16bd93b2d94b74fabc5e1f37a1eaa58c2 Mon Sep 17 00:00:00 2001 From: Jonathan Juares Beber Date: Thu, 13 Feb 2020 10:55:30 +0100 Subject: [PATCH 18/31] Allow services update when changing annotations (#818) The current implementations for `pkg.util.k8sutil.SameService` considers only service annotations change on the default annotations created by the operator. Custom annotations are not compared and consequently not applied after the first service creation. This commit introduces a complete annotations comparison between the current service created by the operator and the new one generated based on the configs. Also, it adds tests on the above-mentioned function. --- e2e/tests/test_e2e.py | 11 +- ...res-manifest-with-service-annotations.yaml | 20 -- pkg/util/k8sutil/k8sutil.go | 41 ++- pkg/util/k8sutil/k8sutil_test.go | 310 ++++++++++++++++++ 4 files changed, 348 insertions(+), 34 deletions(-) delete mode 100644 manifests/postgres-manifest-with-service-annotations.yaml create mode 100644 pkg/util/k8sutil/k8sutil_test.go diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index e3f00da9c..e92aba11f 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -270,7 +270,16 @@ class EndToEndTestCase(unittest.TestCase): } k8s.update_config(patch_custom_service_annotations) - k8s.create_with_kubectl("manifests/postgres-manifest-with-service-annotations.yaml") + pg_patch_custom_annotations = { + "spec": { + "serviceAnnotations": { + "annotation.key": "value" + } + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations) + annotations = { "annotation.key": "value", "foo": "bar", diff --git a/manifests/postgres-manifest-with-service-annotations.yaml b/manifests/postgres-manifest-with-service-annotations.yaml deleted file mode 100644 index ab3096740..000000000 --- a/manifests/postgres-manifest-with-service-annotations.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: "acid.zalan.do/v1" -kind: postgresql -metadata: - name: acid-service-annotations -spec: - teamId: "acid" - volume: - size: 1Gi - numberOfInstances: 2 - users: - zalando: # database owner - - superuser - - createdb - foo_user: [] # role for application foo - databases: - foo: zalando # dbname: owner - postgresql: - version: "11" - serviceAnnotations: - annotation.key: value diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index 118d1df53..c7b2366b0 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -9,7 +9,6 @@ import ( batchv1beta1 "k8s.io/api/batch/v1beta1" clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" - "github.com/zalando/postgres-operator/pkg/util/constants" v1 "k8s.io/api/core/v1" policybeta1 "k8s.io/api/policy/v1beta1" apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -136,21 +135,37 @@ func SameService(cur, new *v1.Service) (match bool, reason string) { } } - oldDNSAnnotation := cur.Annotations[constants.ZalandoDNSNameAnnotation] - newDNSAnnotation := new.Annotations[constants.ZalandoDNSNameAnnotation] - oldELBAnnotation := cur.Annotations[constants.ElbTimeoutAnnotationName] - newELBAnnotation := new.Annotations[constants.ElbTimeoutAnnotationName] + match = true - if oldDNSAnnotation != newDNSAnnotation { - return false, fmt.Sprintf("new service's %q annotation value %q doesn't match the current one %q", - constants.ZalandoDNSNameAnnotation, newDNSAnnotation, oldDNSAnnotation) - } - if oldELBAnnotation != newELBAnnotation { - return false, fmt.Sprintf("new service's %q annotation value %q doesn't match the current one %q", - constants.ElbTimeoutAnnotationName, oldELBAnnotation, newELBAnnotation) + reasonPrefix := "new service's annotations doesn't match the current one:" + for ann := range cur.Annotations { + if _, ok := new.Annotations[ann]; !ok { + match = false + if len(reason) == 0 { + reason = reasonPrefix + } + reason += fmt.Sprintf(" Removed '%s'.", ann) + } } - return true, "" + for ann := range new.Annotations { + v, ok := cur.Annotations[ann] + if !ok { + if len(reason) == 0 { + reason = reasonPrefix + } + reason += fmt.Sprintf(" Added '%s' with value '%s'.", ann, new.Annotations[ann]) + match = false + } else if v != new.Annotations[ann] { + if len(reason) == 0 { + reason = reasonPrefix + } + reason += fmt.Sprintf(" '%s' changed from '%s' to '%s'.", ann, v, new.Annotations[ann]) + match = false + } + } + + return match, reason } // SamePDB compares the PodDisruptionBudgets diff --git a/pkg/util/k8sutil/k8sutil_test.go b/pkg/util/k8sutil/k8sutil_test.go new file mode 100644 index 000000000..12288243e --- /dev/null +++ b/pkg/util/k8sutil/k8sutil_test.go @@ -0,0 +1,310 @@ +package k8sutil + +import ( + "strings" + "testing" + + "github.com/zalando/postgres-operator/pkg/util/constants" + + v1 "k8s.io/api/core/v1" +) + +func newsService(ann map[string]string, svcT v1.ServiceType, lbSr []string) *v1.Service { + svc := &v1.Service{ + Spec: v1.ServiceSpec{ + Type: svcT, + LoadBalancerSourceRanges: lbSr, + }, + } + svc.Annotations = ann + return svc +} + +func TestServiceAnnotations(t *testing.T) { + tests := []struct { + about string + current *v1.Service + new *v1.Service + reason string + match bool + }{ + { + about: "two equal services", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeClusterIP, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeClusterIP, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: true, + }, + { + about: "services differ on service type", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeClusterIP, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's type "LoadBalancer" doesn't match the current one "ClusterIP"`, + }, + { + about: "services differ on lb source ranges", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"185.249.56.0/22"}), + match: false, + reason: `new service's LoadBalancerSourceRange doesn't match the current one`, + }, + { + about: "new service doesn't have lb source ranges", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{}), + match: false, + reason: `new service's LoadBalancerSourceRange doesn't match the current one`, + }, + { + about: "services differ on DNS annotation", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "new_clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's annotations doesn't match the current one: 'external-dns.alpha.kubernetes.io/hostname' changed from 'clstr.acid.zalan.do' to 'new_clstr.acid.zalan.do'.`, + }, + { + about: "services differ on AWS ELB annotation", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: "1800", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's annotations doesn't match the current one: 'service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout' changed from '3600' to '1800'.`, + }, + { + about: "service changes existing annotation", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "bar", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "baz", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's annotations doesn't match the current one: 'foo' changed from 'bar' to 'baz'.`, + }, + { + about: "service changes multiple existing annotations", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "bar", + "bar": "foo", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "baz", + "bar": "fooz", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + // Test just the prefix to avoid flakiness and map sorting + reason: `new service's annotations doesn't match the current one:`, + }, + { + about: "service adds a new custom annotation", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "bar", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's annotations doesn't match the current one: Added 'foo' with value 'bar'.`, + }, + { + about: "service removes a custom annotation", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "bar", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's annotations doesn't match the current one: Removed 'foo'.`, + }, + { + about: "service removes a custom annotation and adds a new one", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "bar", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "bar": "foo", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's annotations doesn't match the current one: Removed 'foo'. Added 'bar' with value 'foo'.`, + }, + { + about: "service removes a custom annotation, adds a new one and change another", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "bar", + "zalan": "do", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "bar": "foo", + "zalan": "do.com", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's annotations doesn't match the current one: Removed 'foo'. Added 'bar' with value 'foo'. 'zalan' changed from 'do' to 'do.com'`, + }, + { + about: "service add annotations", + current: newsService( + map[string]string{}, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + // Test just the prefix to avoid flakiness and map sorting + reason: `new service's annotations doesn't match the current one: Added `, + }, + } + for _, tt := range tests { + t.Run(tt.about, func(t *testing.T) { + match, reason := SameService(tt.current, tt.new) + if match && !tt.match { + t.Errorf("expected services to do not match: '%q' and '%q'", tt.current, tt.new) + return + } + if !match && tt.match { + t.Errorf("expected services to be the same: '%q' and '%q'", tt.current, tt.new) + return + } + if !match && !tt.match { + if !strings.HasPrefix(reason, tt.reason) { + t.Errorf("expected reason '%s', found '%s'", tt.reason, reason) + return + } + } + }) + } +} From 3b10dc645dd3f4112df7d15042f335b86c01b3fc Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Thu, 13 Feb 2020 16:24:15 +0100 Subject: [PATCH 19/31] patch/update services on type change (#824) * use Update when disabling LoadBalancer + added e2e test --- e2e/tests/test_e2e.py | 56 ++++++++++ manifests/operator-service-account-rbac.yaml | 1 + pkg/cluster/resources.go | 103 +++++-------------- pkg/cluster/sync.go | 2 +- 4 files changed, 86 insertions(+), 76 deletions(-) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index e92aba11f..2d81a0647 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -58,6 +58,55 @@ class EndToEndTestCase(unittest.TestCase): k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml") k8s.wait_for_pod_start('spilo-role=master') + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_enable_load_balancer(self): + ''' + Test if services are updated when enabling/disabling load balancers + ''' + + k8s = self.k8s + cluster_label = 'version=acid-minimal-cluster' + + # enable load balancer services + pg_patch_enable_lbs = { + "spec": { + "enableMasterLoadBalancer": True, + "enableReplicaLoadBalancer": True + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs) + # wait for service recreation + time.sleep(60) + + master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master') + self.assertEqual(master_svc_type, 'LoadBalancer', + "Expected LoadBalancer service type for master, found {}".format(master_svc_type)) + + repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica') + self.assertEqual(repl_svc_type, 'LoadBalancer', + "Expected LoadBalancer service type for replica, found {}".format(repl_svc_type)) + + # disable load balancer services again + pg_patch_disable_lbs = { + "spec": { + "enableMasterLoadBalancer": False, + "enableReplicaLoadBalancer": False + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs) + # wait for service recreation + time.sleep(60) + + master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master') + self.assertEqual(master_svc_type, 'ClusterIP', + "Expected ClusterIP service type for master, found {}".format(master_svc_type)) + + repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica') + self.assertEqual(repl_svc_type, 'ClusterIP', + "Expected ClusterIP service type for replica, found {}".format(repl_svc_type)) + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_min_resource_limits(self): ''' @@ -362,6 +411,13 @@ class K8s: pod_phase = pods[0].status.phase time.sleep(self.RETRY_TIMEOUT_SEC) + def get_service_type(self, svc_labels, namespace='default'): + svc_type = '' + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + svc_type = svc.spec.type + return svc_type + def check_service_annotations(self, svc_labels, annotations, namespace='default'): svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items for svc in svcs: diff --git a/manifests/operator-service-account-rbac.yaml b/manifests/operator-service-account-rbac.yaml index a37abe476..4761c145e 100644 --- a/manifests/operator-service-account-rbac.yaml +++ b/manifests/operator-service-account-rbac.yaml @@ -114,6 +114,7 @@ rules: - delete - get - patch + - update # to CRUD the StatefulSet which controls the Postgres cluster instances - apiGroups: - apps diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index c94a7bb46..d6c2149bf 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -366,6 +366,11 @@ func (c *Cluster) createService(role PostgresRole) (*v1.Service, error) { } func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error { + var ( + svc *v1.Service + err error + ) + c.setProcessName("updating %v service", role) if c.Services[role] == nil { @@ -373,70 +378,6 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error } serviceName := util.NameFromMeta(c.Services[role].ObjectMeta) - endpointName := util.NameFromMeta(c.Endpoints[role].ObjectMeta) - // TODO: check if it possible to change the service type with a patch in future versions of Kubernetes - if newService.Spec.Type != c.Services[role].Spec.Type { - // service type has changed, need to replace the service completely. - // we cannot use just patch the current service, since it may contain attributes incompatible with the new type. - var ( - currentEndpoint *v1.Endpoints - err error - ) - - if role == Master { - // for the master service we need to re-create the endpoint as well. Get the up-to-date version of - // the addresses stored in it before the service is deleted (deletion of the service removes the endpoint) - currentEndpoint, err = c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("could not get current cluster %s endpoints: %v", role, err) - } - } - err = c.KubeClient.Services(serviceName.Namespace).Delete(serviceName.Name, c.deleteOptions) - if err != nil { - return fmt.Errorf("could not delete service %q: %v", serviceName, err) - } - - // wait until the service is truly deleted - c.logger.Debugf("waiting for service to be deleted") - - err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, - func() (bool, error) { - _, err2 := c.KubeClient.Services(serviceName.Namespace).Get(serviceName.Name, metav1.GetOptions{}) - if err2 == nil { - return false, nil - } - if k8sutil.ResourceNotFound(err2) { - return true, nil - } - return false, err2 - }) - if err != nil { - return fmt.Errorf("could not delete service %q: %v", serviceName, err) - } - - // make sure we clear the stored service and endpoint status if the subsequent create fails. - c.Services[role] = nil - c.Endpoints[role] = nil - if role == Master { - // create the new endpoint using the addresses obtained from the previous one - endpointSpec := c.generateEndpoint(role, currentEndpoint.Subsets) - ep, err := c.KubeClient.Endpoints(endpointSpec.Namespace).Create(endpointSpec) - if err != nil { - return fmt.Errorf("could not create endpoint %q: %v", endpointName, err) - } - - c.Endpoints[role] = ep - } - - svc, err := c.KubeClient.Services(serviceName.Namespace).Create(newService) - if err != nil { - return fmt.Errorf("could not create service %q: %v", serviceName, err) - } - - c.Services[role] = svc - - return nil - } // update the service annotation in order to propagate ELB notation. if len(newService.ObjectMeta.Annotations) > 0 { @@ -454,18 +395,30 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error } } - patchData, err := specPatch(newService.Spec) - if err != nil { - return fmt.Errorf("could not form patch for the service %q: %v", serviceName, err) - } + // now, patch the service spec, but when disabling LoadBalancers do update instead + // patch does not work because of LoadBalancerSourceRanges field (even if set to nil) + oldServiceType := c.Services[role].Spec.Type + newServiceType := newService.Spec.Type + if newServiceType == "ClusterIP" && newServiceType != oldServiceType { + newService.ResourceVersion = c.Services[role].ResourceVersion + newService.Spec.ClusterIP = c.Services[role].Spec.ClusterIP + svc, err = c.KubeClient.Services(serviceName.Namespace).Update(newService) + if err != nil { + return fmt.Errorf("could not update service %q: %v", serviceName, err) + } + } else { + patchData, err := specPatch(newService.Spec) + if err != nil { + return fmt.Errorf("could not form patch for the service %q: %v", serviceName, err) + } - // update the service spec - svc, err := c.KubeClient.Services(serviceName.Namespace).Patch( - serviceName.Name, - types.MergePatchType, - patchData, "") - if err != nil { - return fmt.Errorf("could not patch service %q: %v", serviceName, err) + svc, err = c.KubeClient.Services(serviceName.Namespace).Patch( + serviceName.Name, + types.MergePatchType, + patchData, "") + if err != nil { + return fmt.Errorf("could not patch service %q: %v", serviceName, err) + } } c.Services[role] = svc diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index fa4fc9ec1..053db9ff7 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -116,7 +116,7 @@ func (c *Cluster) syncServices() error { c.logger.Debugf("syncing %s service", role) if err := c.syncEndpoint(role); err != nil { - return fmt.Errorf("could not sync %s endpont: %v", role, err) + return fmt.Errorf("could not sync %s endpoint: %v", role, err) } if err := c.syncService(role); err != nil { From 702a194c414f3fb8ceeb70b4b0cd35d56bd1c5bb Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Mon, 17 Feb 2020 11:25:07 +0100 Subject: [PATCH 20/31] switch to rbac/v1 (#829) * switch to rbac/v1 --- charts/postgres-operator-ui/templates/serviceaccount.yaml | 6 +++--- charts/postgres-operator/templates/clusterrole.yaml | 2 +- manifests/operator-service-account-rbac.yaml | 2 +- pkg/cluster/cluster.go | 4 ++-- pkg/controller/controller.go | 8 ++++---- pkg/util/k8sutil/k8sutil.go | 6 +++--- ui/manifests/ui-service-account-rbac.yaml | 2 +- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/charts/postgres-operator-ui/templates/serviceaccount.yaml b/charts/postgres-operator-ui/templates/serviceaccount.yaml index 4148938b0..7bb715167 100644 --- a/charts/postgres-operator-ui/templates/serviceaccount.yaml +++ b/charts/postgres-operator-ui/templates/serviceaccount.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ template "postgres-operator-ui.name" . }} @@ -17,7 +17,7 @@ metadata: app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/instance: {{ .Release.Name }} rules: - apiGroups: - acid.zalan.do @@ -78,4 +78,4 @@ subjects: # note: the cluster role binding needs to be defined # for every namespace the operator-ui service account lives in. name: {{ template "postgres-operator-ui.name" . }} - namespace: {{ .Release.Namespace }} \ No newline at end of file + namespace: {{ .Release.Namespace }} diff --git a/charts/postgres-operator/templates/clusterrole.yaml b/charts/postgres-operator/templates/clusterrole.yaml index f8550a539..f7fe1634c 100644 --- a/charts/postgres-operator/templates/clusterrole.yaml +++ b/charts/postgres-operator/templates/clusterrole.yaml @@ -1,5 +1,5 @@ {{ if .Values.rbac.create }} -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "postgres-operator.serviceAccountName" . }} diff --git a/manifests/operator-service-account-rbac.yaml b/manifests/operator-service-account-rbac.yaml index 4761c145e..5e43cc03b 100644 --- a/manifests/operator-service-account-rbac.yaml +++ b/manifests/operator-service-account-rbac.yaml @@ -5,7 +5,7 @@ metadata: namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: zalando-postgres-operator diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index c560c4cdf..91e7a5195 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -29,7 +29,7 @@ import ( "github.com/zalando/postgres-operator/pkg/util/patroni" "github.com/zalando/postgres-operator/pkg/util/teams" "github.com/zalando/postgres-operator/pkg/util/users" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" ) var ( @@ -45,7 +45,7 @@ type Config struct { RestConfig *rest.Config InfrastructureRoles map[string]spec.PgUser // inherited from the controller PodServiceAccount *v1.ServiceAccount - PodServiceAccountRoleBinding *rbacv1beta1.RoleBinding + PodServiceAccountRoleBinding *rbacv1.RoleBinding } type kubeResources struct { diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 831078f3e..f67d99c1d 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -7,7 +7,7 @@ import ( "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" @@ -57,7 +57,7 @@ type Controller struct { workerLogs map[uint32]ringlog.RingLogger PodServiceAccount *v1.ServiceAccount - PodServiceAccountRoleBinding *rbacv1beta1.RoleBinding + PodServiceAccountRoleBinding *rbacv1.RoleBinding } // NewController creates a new controller @@ -198,7 +198,7 @@ func (c *Controller) initRoleBinding() { if c.opConfig.PodServiceAccountRoleBindingDefinition == "" { c.opConfig.PodServiceAccountRoleBindingDefinition = fmt.Sprintf(` { - "apiVersion": "rbac.authorization.k8s.io/v1beta1", + "apiVersion": "rbac.authorization.k8s.io/v1", "kind": "RoleBinding", "metadata": { "name": "%s" @@ -227,7 +227,7 @@ func (c *Controller) initRoleBinding() { case groupVersionKind.Kind != "RoleBinding": panic(fmt.Errorf("role binding definition in the operator config map defines another type of resource: %v", groupVersionKind.Kind)) default: - c.PodServiceAccountRoleBinding = obj.(*rbacv1beta1.RoleBinding) + c.PodServiceAccountRoleBinding = obj.(*rbacv1.RoleBinding) c.PodServiceAccountRoleBinding.Namespace = "" c.logger.Info("successfully parsed") diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index c7b2366b0..509b12c19 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -18,7 +18,7 @@ import ( appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" - rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" + rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -39,7 +39,7 @@ type KubernetesClient struct { corev1.NamespacesGetter corev1.ServiceAccountsGetter appsv1.StatefulSetsGetter - rbacv1beta1.RoleBindingsGetter + rbacv1.RoleBindingsGetter policyv1beta1.PodDisruptionBudgetsGetter apiextbeta1.CustomResourceDefinitionsGetter clientbatchv1beta1.CronJobsGetter @@ -103,7 +103,7 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) { kubeClient.StatefulSetsGetter = client.AppsV1() kubeClient.PodDisruptionBudgetsGetter = client.PolicyV1beta1() kubeClient.RESTClient = client.CoreV1().RESTClient() - kubeClient.RoleBindingsGetter = client.RbacV1beta1() + kubeClient.RoleBindingsGetter = client.RbacV1() kubeClient.CronJobsGetter = client.BatchV1beta1() apiextClient, err := apiextclient.NewForConfig(cfg) diff --git a/ui/manifests/ui-service-account-rbac.yaml b/ui/manifests/ui-service-account-rbac.yaml index 4ae218e74..f0a6e8bb7 100644 --- a/ui/manifests/ui-service-account-rbac.yaml +++ b/ui/manifests/ui-service-account-rbac.yaml @@ -5,7 +5,7 @@ metadata: namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: postgres-operator-ui From 4b440e59de984e83cd01af3431e283d03e20d341 Mon Sep 17 00:00:00 2001 From: Jonathan Juares Beber Date: Tue, 18 Feb 2020 16:45:44 +0100 Subject: [PATCH 21/31] Fix test flakiness on TestSameService (#833) The code added on #818 depends on map sorting to return a static reason for service annotation changes. To avoid tests flakiness and map sorting the tests include a `strings.HasPrefix` instead of comparing the whole string. One of the test cases, `service_removes_a_custom_annotation,_adds_a_new_one_and_change_another`, is trying to test the whole reason string. This commit replaces the test case reason, for only the reason prefix. It removes the flakiness from the tests. As all the cases (annotation adding, removing and value changing) are tested before, it's safe to test only prefixes. Also, it renames the test name from `TestServiceAnnotations` to `TestSameService` and introduces a better description in case of test failure, describing that only prefixes are tested. --- pkg/util/k8sutil/k8sutil_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pkg/util/k8sutil/k8sutil_test.go b/pkg/util/k8sutil/k8sutil_test.go index 12288243e..9b4f2eac3 100644 --- a/pkg/util/k8sutil/k8sutil_test.go +++ b/pkg/util/k8sutil/k8sutil_test.go @@ -20,7 +20,7 @@ func newsService(ann map[string]string, svcT v1.ServiceType, lbSr []string) *v1. return svc } -func TestServiceAnnotations(t *testing.T) { +func TestSameService(t *testing.T) { tests := []struct { about string current *v1.Service @@ -267,8 +267,9 @@ func TestServiceAnnotations(t *testing.T) { }, v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), - match: false, - reason: `new service's annotations doesn't match the current one: Removed 'foo'. Added 'bar' with value 'foo'. 'zalan' changed from 'do' to 'do.com'`, + match: false, + // Test just the prefix to avoid flakiness and map sorting + reason: `new service's annotations doesn't match the current one: Removed 'foo'.`, }, { about: "service add annotations", @@ -301,7 +302,7 @@ func TestServiceAnnotations(t *testing.T) { } if !match && !tt.match { if !strings.HasPrefix(reason, tt.reason) { - t.Errorf("expected reason '%s', found '%s'", tt.reason, reason) + t.Errorf("expected reason prefix '%s', found '%s'", tt.reason, reason) return } } From aea9e9bd330c3fa934ca5fabcb8cb389a6356485 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Wed, 19 Feb 2020 12:32:54 +0100 Subject: [PATCH 22/31] postgres-pod clusterrole (#832) * define postgres-pod clusterrole and align rbac in chart * align UI chart rbac with operator and update doc * operator RBAC needs podsecuritypolicy to grant it to postgres-pod --- .../templates/_helpers.tpl | 7 ++ .../templates/clusterrole.yaml | 52 +++++++++ .../templates/clusterrolebinding.yaml | 19 ++++ .../templates/deployment.yaml | 8 +- .../templates/serviceaccount.yaml | 76 +------------ charts/postgres-operator-ui/values.yaml | 15 ++- .../templates/clusterrole-postgres-pod.yaml | 53 +++++++++ .../templates/clusterrole.yaml | 64 ++++++++--- .../templates/clusterrolebinding.yaml | 2 - .../templates/configmap.yaml | 1 - .../templates/operatorconfiguration.yaml | 1 - charts/postgres-operator/values-crd.yaml | 2 + charts/postgres-operator/values.yaml | 2 + docs/administrator.md | 49 +++------ docs/reference/operator_parameters.md | 17 +-- manifests/configmap.yaml | 2 +- manifests/operator-service-account-rbac.yaml | 104 ++++++++++++++---- manifests/postgres-operator.yaml | 2 +- ...gresql-operator-default-configuration.yaml | 2 +- manifests/user-facing-clusterroles.yaml | 10 +- pkg/controller/controller.go | 21 ++-- pkg/util/config/config.go | 2 +- ui/manifests/ui-service-account-rbac.yaml | 2 - 23 files changed, 333 insertions(+), 180 deletions(-) create mode 100644 charts/postgres-operator-ui/templates/clusterrole.yaml create mode 100644 charts/postgres-operator-ui/templates/clusterrolebinding.yaml create mode 100644 charts/postgres-operator/templates/clusterrole-postgres-pod.yaml diff --git a/charts/postgres-operator-ui/templates/_helpers.tpl b/charts/postgres-operator-ui/templates/_helpers.tpl index a5e97081d..d83b9291a 100644 --- a/charts/postgres-operator-ui/templates/_helpers.tpl +++ b/charts/postgres-operator-ui/templates/_helpers.tpl @@ -24,6 +24,13 @@ If release name contains chart name it will be used as a full name. {{- end -}} {{- end -}} +{{/* +Create a service account name. +*/}} +{{- define "postgres-operator-ui.serviceAccountName" -}} +{{ default (include "postgres-operator-ui.fullname" .) .Values.serviceAccount.name }} +{{- end -}} + {{/* Create chart name and version as used by the chart label. */}} diff --git a/charts/postgres-operator-ui/templates/clusterrole.yaml b/charts/postgres-operator-ui/templates/clusterrole.yaml new file mode 100644 index 000000000..4f76400ec --- /dev/null +++ b/charts/postgres-operator-ui/templates/clusterrole.yaml @@ -0,0 +1,52 @@ +{{ if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "postgres-operator-ui.serviceAccountName" . }} + labels: + app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} + helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +rules: +- apiGroups: + - acid.zalan.do + resources: + - postgresqls + verbs: + - create + - delete + - get + - list + - patch + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - get + - list +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list +{{ end }} diff --git a/charts/postgres-operator-ui/templates/clusterrolebinding.yaml b/charts/postgres-operator-ui/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..7c874d3f7 --- /dev/null +++ b/charts/postgres-operator-ui/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{ if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "postgres-operator-ui.serviceAccountName" . }} + labels: + app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} + helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "postgres-operator-ui.serviceAccountName" . }} +subjects: +- kind: ServiceAccount + name: {{ include "postgres-operator-ui.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{ end }} diff --git a/charts/postgres-operator-ui/templates/deployment.yaml b/charts/postgres-operator-ui/templates/deployment.yaml index 924bcf000..da0280e61 100644 --- a/charts/postgres-operator-ui/templates/deployment.yaml +++ b/charts/postgres-operator-ui/templates/deployment.yaml @@ -20,7 +20,7 @@ spec: app.kubernetes.io/instance: {{ .Release.Name }} team: "acid" # Parameterize? spec: - serviceAccountName: {{ template "postgres-operator-ui.name" . }} + serviceAccountName: {{ include "postgres-operator-ui.serviceAccountName" . }} containers: - name: "service" image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" @@ -45,8 +45,8 @@ spec: value: {{ .Values.envs.targetNamespace }} - name: "TEAMS" value: |- - [ - "acid" + [ + "acid" ] - name: "OPERATOR_UI_CONFIG" value: |- @@ -66,4 +66,4 @@ spec: "9.6", "9.5" ] - } \ No newline at end of file + } diff --git a/charts/postgres-operator-ui/templates/serviceaccount.yaml b/charts/postgres-operator-ui/templates/serviceaccount.yaml index 7bb715167..4c5a25543 100644 --- a/charts/postgres-operator-ui/templates/serviceaccount.yaml +++ b/charts/postgres-operator-ui/templates/serviceaccount.yaml @@ -1,81 +1,11 @@ +{{ if .Values.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: - name: {{ template "postgres-operator-ui.name" . }} + name: {{ include "postgres-operator-ui.serviceAccountName" . }} labels: app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "postgres-operator-ui.name" . }} - labels: - app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} - helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -rules: -- apiGroups: - - acid.zalan.do - resources: - - postgresqls - verbs: - - create - - delete - - get - - list - - patch - - update -- apiGroups: - - "" - resources: - - pods - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - services - verbs: - - get - - list -- apiGroups: - - apps - resources: - - statefulsets - verbs: - - get - - list -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "postgres-operator-ui.name" . }} - labels: - app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} - helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "postgres-operator-ui.name" . }} -subjects: -- kind: ServiceAccount -# note: the cluster role binding needs to be defined -# for every namespace the operator-ui service account lives in. - name: {{ template "postgres-operator-ui.name" . }} - namespace: {{ .Release.Namespace }} +{{ end }} diff --git a/charts/postgres-operator-ui/values.yaml b/charts/postgres-operator-ui/values.yaml index 9351d470e..dca093410 100644 --- a/charts/postgres-operator-ui/values.yaml +++ b/charts/postgres-operator-ui/values.yaml @@ -11,6 +11,17 @@ image: tag: v1.2.0 pullPolicy: "IfNotPresent" +rbac: + # Specifies whether RBAC resources should be created + create: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # configure UI pod resources resources: limits: @@ -22,7 +33,7 @@ resources: # configure UI ENVs envs: - # IMPORTANT: While operator chart and UI chart are idendependent, this is the interface between + # IMPORTANT: While operator chart and UI chart are idendependent, this is the interface between # UI and operator API. Insert the service name of the operator API here! operatorApiUrl: "http://postgres-operator:8080" targetNamespace: "default" @@ -44,4 +55,4 @@ ingress: tls: [] # - secretName: ui-tls # hosts: - # - ui.exmaple.org \ No newline at end of file + # - ui.exmaple.org diff --git a/charts/postgres-operator/templates/clusterrole-postgres-pod.yaml b/charts/postgres-operator/templates/clusterrole-postgres-pod.yaml new file mode 100644 index 000000000..c327d9101 --- /dev/null +++ b/charts/postgres-operator/templates/clusterrole-postgres-pod.yaml @@ -0,0 +1,53 @@ +{{ if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: postgres-pod + labels: + app.kubernetes.io/name: {{ template "postgres-operator.name" . }} + helm.sh/chart: {{ template "postgres-operator.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +rules: +# Patroni needs to watch and manage endpoints +- apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +# Patroni needs to watch pods +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - patch + - update + - watch +# to let Patroni create a headless service +- apiGroups: + - "" + resources: + - services + verbs: + - create +# to run privileged pods +- apiGroups: + - extensions + resources: + - podsecuritypolicies + resourceNames: + - privileged + verbs: + - use +{{ end }} diff --git a/charts/postgres-operator/templates/clusterrole.yaml b/charts/postgres-operator/templates/clusterrole.yaml index f7fe1634c..9a4165797 100644 --- a/charts/postgres-operator/templates/clusterrole.yaml +++ b/charts/postgres-operator/templates/clusterrole.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} rules: +# all verbs allowed for custom operator resources - apiGroups: - acid.zalan.do resources: @@ -16,7 +17,15 @@ rules: - postgresqls/status - operatorconfigurations verbs: - - "*" + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +# to create or get/update CRDs when starting up - apiGroups: - apiextensions.k8s.io resources: @@ -26,12 +35,14 @@ rules: - get - patch - update +# to read configuration from ConfigMaps - apiGroups: - "" resources: - configmaps verbs: - get +# to manage endpoints which are also used by Patroni - apiGroups: - "" resources: @@ -43,7 +54,9 @@ rules: - get - list - patch - - watch # needed if zalando-postgres-operator account is used for pods as well + - update + - watch +# to CRUD secrets for database access - apiGroups: - "" resources: @@ -53,6 +66,7 @@ rules: - update - delete - get +# to check nodes for node readiness label - apiGroups: - "" resources: @@ -61,6 +75,7 @@ rules: - get - list - watch +# to read or delete existing PVCs. Creation via StatefulSet - apiGroups: - "" resources: @@ -69,6 +84,7 @@ rules: - delete - get - list + # to read existing PVs. Creation should be done via dynamic provisioning - apiGroups: - "" resources: @@ -77,6 +93,7 @@ rules: - get - list - update # only for resizing AWS volumes +# to watch Spilo pods and do rolling updates. Creation via StatefulSet - apiGroups: - "" resources: @@ -86,13 +103,16 @@ rules: - get - list - watch + - update - patch +# to resize the filesystem in Spilo pods when increasing volume size - apiGroups: - "" resources: - pods/exec verbs: - create +# to CRUD services to point to Postgres cluster instances - apiGroups: - "" resources: @@ -102,6 +122,8 @@ rules: - delete - get - patch + - update +# to CRUD the StatefulSet which controls the Postgres cluster instances - apiGroups: - apps resources: @@ -112,12 +134,26 @@ rules: - get - list - patch +# to CRUD cron jobs for logical backups +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - create + - delete + - get + - list + - patch + - update +# to get namespaces operator resources can run in - apiGroups: - "" resources: - namespaces verbs: - get +# to define PDBs. Update happens via delete/create - apiGroups: - policy resources: @@ -126,6 +162,7 @@ rules: - create - delete - get +# to create ServiceAccounts in each namespace the operator watches - apiGroups: - "" resources: @@ -133,30 +170,21 @@ rules: verbs: - get - create +# to create role bindings to the postgres-pod service account - apiGroups: - - "rbac.authorization.k8s.io" + - rbac.authorization.k8s.io resources: - rolebindings verbs: - get - create +# to grant privilege to run privileged pods - apiGroups: - - "rbac.authorization.k8s.io" + - extensions resources: - - clusterroles - verbs: - - bind + - podsecuritypolicies resourceNames: - - {{ include "postgres-operator.serviceAccountName" . }} -- apiGroups: - - batch - resources: - - cronjobs # enables logical backups + - privileged verbs: - - create - - delete - - get - - list - - patch - - update + - use {{ end }} diff --git a/charts/postgres-operator/templates/clusterrolebinding.yaml b/charts/postgres-operator/templates/clusterrolebinding.yaml index bfa21b42f..dbf65d00e 100644 --- a/charts/postgres-operator/templates/clusterrolebinding.yaml +++ b/charts/postgres-operator/templates/clusterrolebinding.yaml @@ -14,8 +14,6 @@ roleRef: name: {{ include "postgres-operator.serviceAccountName" . }} subjects: - kind: ServiceAccount -# note: the cluster role binding needs to be defined -# for every namespace the operator service account lives in. name: {{ include "postgres-operator.serviceAccountName" . }} namespace: {{ .Release.Namespace }} {{ end }} diff --git a/charts/postgres-operator/templates/configmap.yaml b/charts/postgres-operator/templates/configmap.yaml index 95eeb9546..0b976294e 100644 --- a/charts/postgres-operator/templates/configmap.yaml +++ b/charts/postgres-operator/templates/configmap.yaml @@ -9,7 +9,6 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} data: - pod_service_account_name: {{ include "postgres-operator.serviceAccountName" . }} {{ toYaml .Values.configGeneral | indent 2 }} {{ toYaml .Values.configUsers | indent 2 }} {{ toYaml .Values.configKubernetes | indent 2 }} diff --git a/charts/postgres-operator/templates/operatorconfiguration.yaml b/charts/postgres-operator/templates/operatorconfiguration.yaml index 6a301c1fb..06e9c7605 100644 --- a/charts/postgres-operator/templates/operatorconfiguration.yaml +++ b/charts/postgres-operator/templates/operatorconfiguration.yaml @@ -14,7 +14,6 @@ configuration: {{ toYaml .Values.configUsers | indent 4 }} kubernetes: oauth_token_secret_name: {{ template "postgres-operator.fullname" . }} - pod_service_account_name: {{ include "postgres-operator.serviceAccountName" . }} {{ toYaml .Values.configKubernetes | indent 4 }} postgres_pod_resources: {{ toYaml .Values.configPostgresPodResources | indent 4 }} diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index 1f9b5e495..08c255a04 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -100,6 +100,8 @@ configKubernetes: pod_management_policy: "ordered_ready" # label assigned to the Postgres pods (and services/endpoints) pod_role_label: spilo-role + # name of service account to be used by postgres cluster pods + pod_service_account_name: "postgres-pod" # Postgres pods are terminated forcefully after this timeout pod_terminate_grace_period: 5m # template for database user secrets generated by the operator diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 1be5851d2..60a0a1f04 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -93,6 +93,8 @@ configKubernetes: pod_management_policy: "ordered_ready" # label assigned to the Postgres pods (and services/endpoints) pod_role_label: spilo-role + # name of service account to be used by postgres cluster pods + pod_service_account_name: "postgres-pod" # Postgres pods are terminated forcefully after this timeout pod_terminate_grace_period: 5m # template for database user secrets generated by the operator diff --git a/docs/administrator.md b/docs/administrator.md index 2e86193c0..2175ae595 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -47,6 +47,12 @@ patching the CRD manifest: zk8 patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}' ``` +## Non-default cluster domain + +If your cluster uses a DNS domain other than the default `cluster.local`, this +needs to be set in the operator configuration (`cluster_domain` variable). This +is used by the operator to connect to the clusters after creation. + ## Namespaces ### Select the namespace to deploy to @@ -89,36 +95,13 @@ lacks access rights to any of them (except K8s system namespaces like 'list pods' execute at the cluster scope and fail at the first violation of access rights. -The watched namespace also needs to have a (possibly different) service account -in the case database pods need to talk to the K8s API (e.g. when using -K8s-native configuration of Patroni). The operator checks that the -`pod_service_account_name` exists in the target namespace, and, if not, deploys -there the `pod_service_account_definition` from the operator -[`Config`](../pkg/util/config/config.go) with the default value of: - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: operator -``` - -In this definition, the operator overwrites the account's name to match -`pod_service_account_name` and the `default` namespace to match the target -namespace. The operator performs **no** further syncing of this account. - -## Non-default cluster domain - -If your cluster uses a DNS domain other than the default `cluster.local`, this -needs to be set in the operator configuration (`cluster_domain` variable). This -is used by the operator to connect to the clusters after creation. - ## Role-based access control for the operator The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml) defines the service account, cluster roles and bindings needed for the operator -to function under access control restrictions. To deploy the operator with this -RBAC policy use: +to function under access control restrictions. The file also includes a cluster +role `postgres-pod` with privileges for Patroni to watch and manage pods and +endpoints. To deploy the operator with this RBAC policies use: ```bash kubectl create -f manifests/configmap.yaml @@ -127,14 +110,14 @@ kubectl create -f manifests/postgres-operator.yaml kubectl create -f manifests/minimal-postgres-manifest.yaml ``` -### Service account and cluster roles +### Namespaced service account and role binding -Note that the service account is named `zalando-postgres-operator`. You may have -to change the `service_account_name` in the operator ConfigMap and -`serviceAccountName` in the `postgres-operator` deployment appropriately. This -is done intentionally to avoid breaking those setups that already work with the -default `operator` account. In the future the operator should ideally be run -under the `zalando-postgres-operator` service account. +For each namespace the operator watches it creates (or reads) a service account +and role binding to be used by the Postgres Pods. The service account is bound +to the `postgres-pod` cluster role. The name and definitions of these resources +can be [configured](reference/operator_parameters.md#kubernetes-resources). +Note, that the operator performs **no** further syncing of namespaced service +accounts and role bindings. ### Give K8s users access to create/list `postgresqls` diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index e3893ea31..ca972c22b 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -152,21 +152,22 @@ configuration they are grouped under the `kubernetes` key. service account used by Patroni running on individual Pods to communicate with the operator. Required even if native Kubernetes support in Patroni is not used, because Patroni keeps pod labels in sync with the instance role. - The default is `operator`. + The default is `postgres-pod`. * **pod_service_account_definition** - The operator tries to create the pod Service Account in the namespace that - doesn't define such an account using the YAML definition provided by this - option. If not defined, a simple definition that contains only the name will - be used. The default is empty. + On Postgres cluster creation the operator tries to create the service account + for the Postgres pods if it does not exist in the namespace. The internal + default service account definition (defines only the name) can be overwritten + with this parameter. Make sure to provide a valid YAML or JSON string. The + default is empty. * **pod_service_account_role_binding_definition** - This definition must bind pod service account to a role with permission + This definition must bind the pod service account to a role with permission sufficient for the pods to start and for Patroni to access K8s endpoints; service account on its own lacks any such rights starting with K8s v1.8. If not explicitly defined by the user, a simple definition that binds the - account to the operator's own 'zalando-postgres-operator' cluster role will - be used. The default is empty. + account to the 'postgres-pod' [cluster role](../../manifests/operator-service-account-rbac.yaml#L198) + will be used. The default is empty. * **pod_terminate_grace_period** Postgres pods are [terminated forcefully](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods) diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index d26c83edf..6b00c30f4 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -63,7 +63,7 @@ data: pod_label_wait_timeout: 10m pod_management_policy: "ordered_ready" pod_role_label: spilo-role - pod_service_account_name: "zalando-postgres-operator" + pod_service_account_name: "postgres-pod" pod_terminate_grace_period: 5m # postgres_superuser_teams: "postgres_superusers" # protected_role_names: "admin" diff --git a/manifests/operator-service-account-rbac.yaml b/manifests/operator-service-account-rbac.yaml index 5e43cc03b..80fcd89ef 100644 --- a/manifests/operator-service-account-rbac.yaml +++ b/manifests/operator-service-account-rbac.yaml @@ -1,14 +1,14 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: zalando-postgres-operator + name: postgres-operator namespace: default --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: zalando-postgres-operator + name: postgres-operator rules: # all verbs allowed for custom operator resources - apiGroups: @@ -18,7 +18,14 @@ rules: - postgresqls/status - operatorconfigurations verbs: - - "*" + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch # to create or get/update CRDs when starting up - apiGroups: - apiextensions.k8s.io @@ -48,7 +55,8 @@ rules: - get - list - patch - - watch # needed if zalando-postgres-operator account is used for pods as well + - update + - watch # to CRUD secrets for database access - apiGroups: - "" @@ -96,6 +104,7 @@ rules: - get - list - watch + - update - patch # to resize the filesystem in Spilo pods when increasing volume size - apiGroups: @@ -126,6 +135,18 @@ rules: - get - list - patch +# to CRUD cron jobs for logical backups +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - create + - delete + - get + - list + - patch + - update # to get namespaces operator resources can run in - apiGroups: - "" @@ -150,39 +171,82 @@ rules: verbs: - get - create -# to create role bindings to the operator service account +# to create role bindings to the postgres-pod service account - apiGroups: - - "rbac.authorization.k8s.io" + - rbac.authorization.k8s.io resources: - rolebindings verbs: - get - create -# to CRUD cron jobs for logical backups +# to grant privilege to run privileged pods - apiGroups: - - batch + - extensions resources: - - cronjobs + - podsecuritypolicies + resourceNames: + - privileged verbs: - - create - - delete - - get - - list - - patch - - update + - use --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: zalando-postgres-operator + name: postgres-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: zalando-postgres-operator + name: postgres-operator subjects: - kind: ServiceAccount -# note: the cluster role binding needs to be defined -# for every namespace the operator service account lives in. - name: zalando-postgres-operator + name: postgres-operator namespace: default + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: postgres-pod +rules: +# Patroni needs to watch and manage endpoints +- apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +# Patroni needs to watch pods +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - patch + - update + - watch +# to let Patroni create a headless service +- apiGroups: + - "" + resources: + - services + verbs: + - create +# to run privileged pods +- apiGroups: + - extensions + resources: + - podsecuritypolicies + resourceNames: + - privileged + verbs: + - use diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index a06abfc68..e3bc3e3e4 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -12,7 +12,7 @@ spec: labels: name: postgres-operator spec: - serviceAccountName: zalando-postgres-operator + serviceAccountName: postgres-operator containers: - name: postgres-operator image: registry.opensource.zalan.do/acid/postgres-operator:v1.3.1 diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index efd1a5396..695a4e9c5 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -45,7 +45,7 @@ configuration: # pod_priority_class_name: "" pod_role_label: spilo-role # pod_service_account_definition: "" - pod_service_account_name: zalando-postgres-operator + pod_service_account_name: postgres-pod # pod_service_account_role_binding_definition: "" pod_terminate_grace_period: 5m secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" diff --git a/manifests/user-facing-clusterroles.yaml b/manifests/user-facing-clusterroles.yaml index 800aafdb9..64392a19a 100644 --- a/manifests/user-facing-clusterroles.yaml +++ b/manifests/user-facing-clusterroles.yaml @@ -11,7 +11,14 @@ rules: - postgresqls - postgresqls/status verbs: - - "*" + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch --- apiVersion: rbac.authorization.k8s.io/v1 @@ -48,4 +55,3 @@ rules: - get - list - watch - diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index f67d99c1d..3c49b9a13 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -161,11 +161,12 @@ func (c *Controller) initPodServiceAccount() { if c.opConfig.PodServiceAccountDefinition == "" { c.opConfig.PodServiceAccountDefinition = ` - { "apiVersion": "v1", - "kind": "ServiceAccount", - "metadata": { - "name": "operator" - } + { + "apiVersion": "v1", + "kind": "ServiceAccount", + "metadata": { + "name": "postgres-pod" + } }` } @@ -175,13 +176,13 @@ func (c *Controller) initPodServiceAccount() { switch { case err != nil: - panic(fmt.Errorf("Unable to parse pod service account definition from the operator config map: %v", err)) + panic(fmt.Errorf("Unable to parse pod service account definition from the operator configuration: %v", err)) case groupVersionKind.Kind != "ServiceAccount": - panic(fmt.Errorf("pod service account definition in the operator config map defines another type of resource: %v", groupVersionKind.Kind)) + panic(fmt.Errorf("pod service account definition in the operator configuration defines another type of resource: %v", groupVersionKind.Kind)) default: c.PodServiceAccount = obj.(*v1.ServiceAccount) if c.PodServiceAccount.Name != c.opConfig.PodServiceAccountName { - c.logger.Warnf("in the operator config map, the pod service account name %v does not match the name %v given in the account definition; using the former for consistency", c.opConfig.PodServiceAccountName, c.PodServiceAccount.Name) + c.logger.Warnf("in the operator configuration, the pod service account name %v does not match the name %v given in the account definition; using the former for consistency", c.opConfig.PodServiceAccountName, c.PodServiceAccount.Name) c.PodServiceAccount.Name = c.opConfig.PodServiceAccountName } c.PodServiceAccount.Namespace = "" @@ -223,9 +224,9 @@ func (c *Controller) initRoleBinding() { switch { case err != nil: - panic(fmt.Errorf("Unable to parse the definition of the role binding for the pod service account definition from the operator config map: %v", err)) + panic(fmt.Errorf("unable to parse the definition of the role binding for the pod service account definition from the operator configuration: %v", err)) case groupVersionKind.Kind != "RoleBinding": - panic(fmt.Errorf("role binding definition in the operator config map defines another type of resource: %v", groupVersionKind.Kind)) + panic(fmt.Errorf("role binding definition in the operator configuration defines another type of resource: %v", groupVersionKind.Kind)) default: c.PodServiceAccountRoleBinding = obj.(*rbacv1.RoleBinding) c.PodServiceAccountRoleBinding.Namespace = "" diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index e4e429abb..ec4af6427 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -96,7 +96,7 @@ type Config struct { DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16"` Sidecars map[string]string `name:"sidecar_docker_images"` // default name `operator` enables backward compatibility with the older ServiceAccountName field - PodServiceAccountName string `name:"pod_service_account_name" default:"operator"` + PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"` // value of this string must be valid JSON or YAML; see initPodServiceAccount PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""` PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""` diff --git a/ui/manifests/ui-service-account-rbac.yaml b/ui/manifests/ui-service-account-rbac.yaml index f0a6e8bb7..2e09797a0 100644 --- a/ui/manifests/ui-service-account-rbac.yaml +++ b/ui/manifests/ui-service-account-rbac.yaml @@ -61,7 +61,5 @@ roleRef: name: postgres-operator-ui subjects: - kind: ServiceAccount -# note: the cluster role binding needs to be defined -# for every namespace the operator-ui service account lives in. name: postgres-operator-ui namespace: default From d5660f65bbef956c69bc7d6ea46c7f519432917e Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Wed, 19 Feb 2020 12:58:24 +0100 Subject: [PATCH 23/31] [UI] add tab for monthly costs per cluster (#796) * add tab for monthly costs per cluster * sync run_local and update version number * lowering resources * some Makefile polishing and updated admin docs on UI * extend admin docs on UI * add api-service manifest for operator * set min limits in UI to default min limits of operator * reflect new UI helm charts in docs * make cluster name label configurable --- charts/postgres-operator-ui/Chart.yaml | 4 +- .../postgres-operator/templates/service.yaml | 5 +- docs/administrator.md | 72 ++++++++++++----- docs/operator-ui.md | 10 ++- docs/quickstart.md | 57 ++++++++++++- manifests/api-service.yaml | 12 +++ manifests/kustomization.yaml | 1 + ui/Makefile | 20 ++--- ui/app/package.json | 2 +- ui/app/src/new.tag.pug | 16 ++-- ui/app/src/postgresql.tag.pug | 3 + ui/app/src/postgresqls.tag.pug | 79 +++++++++++++++---- ui/manifests/deployment.yaml | 23 ++++-- ui/operator_ui/main.py | 12 +++ ui/operator_ui/spiloutils.py | 6 +- ui/requirements.txt | 14 ++-- ui/run_local.sh | 7 +- 17 files changed, 257 insertions(+), 86 deletions(-) create mode 100644 manifests/api-service.yaml diff --git a/charts/postgres-operator-ui/Chart.yaml b/charts/postgres-operator-ui/Chart.yaml index 4be7d8af1..4418675b6 100644 --- a/charts/postgres-operator-ui/Chart.yaml +++ b/charts/postgres-operator-ui/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: postgres-operator-ui version: 0.1.0 -appVersion: 1.2.0 +appVersion: 1.3.0 home: https://github.com/zalando/postgres-operator description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience keywords: @@ -12,6 +12,8 @@ keywords: - patroni - spilo maintainers: +- name: Zalando + email: opensource@zalando.de - name: siku4 email: sk@sik-net.de sources: diff --git a/charts/postgres-operator/templates/service.yaml b/charts/postgres-operator/templates/service.yaml index 52990c5d4..38ea9a062 100644 --- a/charts/postgres-operator/templates/service.yaml +++ b/charts/postgres-operator/templates/service.yaml @@ -8,6 +8,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} name: {{ template "postgres-operator.fullname" . }} spec: + type: ClusterIP ports: - port: 8080 protocol: TCP @@ -15,7 +16,3 @@ spec: selector: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/name: {{ template "postgres-operator.name" . }} - sessionAffinity: None - type: ClusterIP -status: - loadBalancer: {} \ No newline at end of file diff --git a/docs/administrator.md b/docs/administrator.md index 2175ae595..3597b65ca 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -480,37 +480,71 @@ A secret can be pre-provisioned in different ways: ## Setting up the Postgres Operator UI -With the v1.2 release the Postgres Operator is shipped with a browser-based +Since the v1.2 release the Postgres Operator is shipped with a browser-based configuration user interface (UI) that simplifies managing Postgres clusters -with the operator. The UI runs with Node.js and comes with it's own Docker -image. +with the operator. -Run NPM to continuously compile `tags/js` code. Basically, it creates an -`app.js` file in: `static/build/app.js` +### Building the UI image -``` -(cd ui/app && npm start) -``` - -To build the Docker image open a shell and change to the `ui` folder. Then run: +The UI runs with Node.js and comes with it's own Docker +image. However, installing Node.js to build the operator UI is not required. It +is handled via Docker containers when running: ```bash -docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.2.0 . +make docker ``` -Apply all manifests for the `ui/manifests` folder to deploy the Postgres -Operator UI on K8s. For local tests you don't need the Ingress resource. +### Configure endpoints and options + +The UI talks to the K8s API server as well as the Postgres Operator [REST API](developer.md#debugging-the-operator). +K8s API server URLs are loaded from the machine's kubeconfig environment by +default. Alternatively, a list can also be passed when starting the Python +application with the `--cluster` option. + +The Operator API endpoint can be configured via the `OPERATOR_API_URL` +environment variables in the [deployment manifest](../ui/manifests/deployment.yaml#L40). +You can also expose the operator API through a [service](../manifests/api-service.yaml). +Some displayed options can be disabled from UI using simple flags under the +`OPERATOR_UI_CONFIG` field in the deployment. + +### Deploy the UI on K8s + +Now, apply all manifests from the `ui/manifests` folder to deploy the Postgres +Operator UI on K8s. Replace the image tag in the deployment manifest if you +want to test the image you've built with `make docker`. Make sure the pods for +the operator and the UI are both running. ```bash -kubectl apply -f ui/manifests +sed -e "s/\(image\:.*\:\).*$/\1$TAG/" manifests/deployment.yaml | kubectl apply -f manifests/ +kubectl get all -l application=postgres-operator-ui ``` -Make sure the pods for the operator and the UI are both running. For local -testing you need to apply proxying and port forwarding so that the UI can talk -to the K8s and Postgres Operator REST API. You can use the provided -`run_local.sh` script for this. Make sure it uses the correct URL to your K8s -API server, e.g. for minikube it would be `https://192.168.99.100:8443`. +### Local testing + +For local testing you need to apply K8s proxying and operator pod port +forwarding so that the UI can talk to the K8s and Postgres Operator REST API. +The Ingress resource is not needed. You can use the provided `run_local.sh` +script for this. Make sure that: + +* Python dependencies are installed on your machine +* the K8s API server URL is set for kubectl commands, e.g. for minikube it would usually be `https://192.168.99.100:8443`. +* the pod label selectors for port forwarding are correct + +When testing with minikube you have to build the image in its docker environment +(running `make docker` doesn't do it for you). From the `ui` directory execute: ```bash +# compile and build operator UI +make docker + +# build in image in minikube docker env +eval $(minikube docker-env) +docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.3.0 . + +# apply UI manifests next to a running Postgres Operator +kubectl apply -f manifests/ + +# install python dependencies to run UI locally +pip3 install -r requirements ./run_local.sh ``` diff --git a/docs/operator-ui.md b/docs/operator-ui.md index 99526bb5c..7912e8264 100644 --- a/docs/operator-ui.md +++ b/docs/operator-ui.md @@ -31,9 +31,13 @@ status page. ![pgui-waiting-for-master](diagrams/pgui-waiting-for-master.png "Waiting for master pod") Usually, the startup should only take up to 1 minute. If you feel the process -got stuck click on the "Logs" button to inspect the operator logs. From the -"Status" field in the top menu you can also retrieve the logs and queue of each -worker the operator is using. The number of concurrent workers can be +got stuck click on the "Logs" button to inspect the operator logs. If the logs +look fine, but the UI seems to got stuck, check if you are have configured the +same [cluster name label](../ui/manifests/deployment.yaml#L45) like for the +[operator](../manifests/configmap.yaml#L13). + +From the "Status" field in the top menu you can also retrieve the logs and queue +of each worker the operator is using. The number of concurrent workers can be [configured](reference/operator_parameters.md#general). ![pgui-operator-logs](diagrams/pgui-operator-logs.png "Checking operator logs") diff --git a/docs/quickstart.md b/docs/quickstart.md index 8cc5bc0c0..d2c88b9a4 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -52,6 +52,7 @@ cd postgres-operator kubectl create -f manifests/configmap.yaml # configuration kubectl create -f manifests/operator-service-account-rbac.yaml # identity and permissions kubectl create -f manifests/postgres-operator.yaml # deployment +kubectl create -f manifests/api-service.yaml # operator API to be used by UI ``` There is a [Kustomization](https://github.com/kubernetes-sigs/kustomize) @@ -104,7 +105,7 @@ kubectl create -f https://operatorhub.io/install/postgres-operator.yaml This installs the operator in the `operators` namespace. More information can be found on [operatorhub.io](https://operatorhub.io/operator/postgres-operator). -## Create a Postgres cluster +## Check if Postgres Operator is running Starting the operator may take a few seconds. Check if the operator pod is running before applying a Postgres cluster manifest. @@ -115,7 +116,61 @@ kubectl get pod -l name=postgres-operator # if you've created the operator using helm chart kubectl get pod -l app.kubernetes.io/name=postgres-operator +``` +If the operator doesn't get into `Running` state, either check the latest K8s +events of the deployment or pod with `kubectl describe` or inspect the operator +logs: + +```bash +kubectl logs "$(kubectl get pod -l name=postgres-operator --output='name')" +``` + +## Deploy the operator UI + +In the following paragraphs we describe how to access and manage PostgreSQL +clusters from the command line with kubectl. But it can also be done from the +browser-based [Postgres Operator UI](operator-ui.md). Before deploying the UI +make sure the operator is running and its REST API is reachable through a +[K8s service](../manifests/api-service.yaml). The URL to this API must be +configured in the [deployment manifest](../ui/manifests/deployment.yaml#L43) +of the UI. + +To deploy the UI simply apply all its manifests files or use the UI helm chart: + +```bash +# manual deployment +kubectl apply -f ui/manifests/ + +# or helm chart +helm install postgres-operator-ui ./charts/postgres-operator-ui +``` + +Like with the operator, check if the UI pod gets into `Running` state: + +```bash +# if you've created the operator using yaml manifests +kubectl get pod -l name=postgres-operator-ui + +# if you've created the operator using helm chart +kubectl get pod -l app.kubernetes.io/name=postgres-operator-ui +``` + +You can now access the web interface by port forwarding the UI pod (mind the +label selector) and enter `localhost:8081` in your browser: + +```bash +kubectl port-forward "$(kubectl get pod -l name=postgres-operator-ui --output='name')" 8081 +``` + +Available option are explained in detail in the [UI docs](operator-ui.md). + +## Create a Postgres cluster + +If the operator pod is running it listens to new events regarding `postgresql` +resources. Now, it's time to submit your first Postgres cluster manifest. + +```bash # create a Postgres cluster kubectl create -f manifests/minimal-postgres-manifest.yaml ``` diff --git a/manifests/api-service.yaml b/manifests/api-service.yaml new file mode 100644 index 000000000..616448177 --- /dev/null +++ b/manifests/api-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: postgres-operator +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + targetPort: 8080 + selector: + name: postgres-operator diff --git a/manifests/kustomization.yaml b/manifests/kustomization.yaml index a39627956..32d81d920 100644 --- a/manifests/kustomization.yaml +++ b/manifests/kustomization.yaml @@ -4,3 +4,4 @@ resources: - configmap.yaml - operator-service-account-rbac.yaml - postgres-operator.yaml +- api-service.yaml diff --git a/ui/Makefile b/ui/Makefile index e4eed45e5..f1cf16840 100644 --- a/ui/Makefile +++ b/ui/Makefile @@ -1,17 +1,6 @@ .PHONY: clean test appjs docker push mock -BINARY ?= postgres-operator-ui -BUILD_FLAGS ?= -v -CGO_ENABLED ?= 0 -ifeq ($(RACE),1) - BUILD_FLAGS += -race -a - CGO_ENABLED=1 -endif - -LOCAL_BUILD_FLAGS ?= $(BUILD_FLAGS) -LDFLAGS ?= -X=main.version=$(VERSION) - -IMAGE ?= registry.opensource.zalan.do/acid/$(BINARY) +IMAGE ?= registry.opensource.zalan.do/acid/postgres-operator-ui VERSION ?= $(shell git describe --tags --always --dirty) TAG ?= $(VERSION) GITHEAD = $(shell git rev-parse --short HEAD) @@ -32,8 +21,11 @@ appjs: docker run $(TTYFLAGS) -u $$(id -u) -v $$(pwd):/workdir -w /workdir/app node:10.1.0-alpine npm run build docker: appjs - docker build --build-arg "VERSION=$(VERSION)" -t "$(IMAGE):$(TAG)" . - @echo 'Docker image $(IMAGE):$(TAG) can now be used.' + echo `(env)` + echo "Tag ${TAG}" + echo "Version ${VERSION}" + echo "git describe $(shell git describe --tags --always --dirty)" + docker build --rm -t "$(IMAGE):$(TAG)" -f Dockerfile . push: docker docker push "$(IMAGE):$(TAG)" diff --git a/ui/app/package.json b/ui/app/package.json index 3fa66f7d0..d0528e0bd 100644 --- a/ui/app/package.json +++ b/ui/app/package.json @@ -1,6 +1,6 @@ { "name": "postgres-operator-ui", - "version": "1.0.0", + "version": "1.3.0", "description": "PostgreSQL Operator UI", "main": "src/app.js", "config": { diff --git a/ui/app/src/new.tag.pug b/ui/app/src/new.tag.pug index bd0cc764e..fe9d78226 100644 --- a/ui/app/src/new.tag.pug +++ b/ui/app/src/new.tag.pug @@ -408,7 +408,7 @@ new ref='cpuLimit' type='number' placeholder='{ cpu.state.limit.initialValue }' - min='1' + min='250' required value='{ cpu.state.limit.state }' onchange='{ cpu.state.limit.edit }' @@ -434,7 +434,7 @@ new onkeyup='{ memory.state.request.edit }' ) .input-group-addon - .input-units Gi + .input-units Mi .input-group .input-group-addon.resource-type Limit @@ -442,14 +442,14 @@ new ref='memoryLimit' type='number' placeholder='{ memory.state.limit.initialValue }' - min='1' + min='250' required value='{ memory.state.limit.state }' onchange='{ memory.state.limit.edit }' onkeyup='{ memory.state.limit.edit }' ) .input-group-addon - .input-units Gi + .input-units Mi .col-lg-3 help-general(config='{ opts.config }') @@ -519,10 +519,10 @@ new resources: requests: cpu: {{ cpu.state.request.state }}m - memory: {{ memory.state.request.state }}Gi + memory: {{ memory.state.request.state }}Mi limits: cpu: {{ cpu.state.limit.state }}m - memory: {{ memory.state.limit.state }}Gi{{#if restoring}} + memory: {{ memory.state.limit.state }}Mi{{#if restoring}} clone: cluster: "{{ backup.state.name.state }}" @@ -786,8 +786,8 @@ new return instance } - this.cpu = DynamicResource({ request: 100, limit: 1000 }) - this.memory = DynamicResource({ request: 1, limit: 1 }) + this.cpu = DynamicResource({ request: 100, limit: 500 }) + this.memory = DynamicResource({ request: 100, limit: 500 }) this.backup = DynamicSet({ type: () => 'empty', diff --git a/ui/app/src/postgresql.tag.pug b/ui/app/src/postgresql.tag.pug index 88e5e130b..be7173dbe 100644 --- a/ui/app/src/postgresql.tag.pug +++ b/ui/app/src/postgresql.tag.pug @@ -76,6 +76,9 @@ postgresql .alert.alert-danger(if='{ progress.requestStatus !== "OK" }') Create request failed .alert.alert-success(if='{ progress.requestStatus === "OK" }') Create request successful ({ new Date(progress.createdTimestamp).toLocaleString() }) + .alert.alert-info(if='{ !progress.postgresql }') PostgreSQL cluster manifest pending + .alert.alert-success(if='{ progress.postgresql }') PostgreSQL cluster manifest created + .alert.alert-info(if='{ !progress.statefulSet }') StatefulSet pending .alert.alert-success(if='{ progress.statefulSet }') StatefulSet created diff --git a/ui/app/src/postgresqls.tag.pug b/ui/app/src/postgresqls.tag.pug index 41d648737..250c175ec 100644 --- a/ui/app/src/postgresqls.tag.pug +++ b/ui/app/src/postgresqls.tag.pug @@ -45,12 +45,14 @@ postgresqls thead tr th(style='width: 120px') Team + th(style='width: 130px') Namespace + th Name th(style='width: 50px') Pods th(style='width: 140px') CPU th(style='width: 130px') Memory th(style='width: 100px') Size - th(style='width: 130px') Namespace - th Name + th(style='width: 120px') Cost/Month + th(stlye='width: 120px') tbody tr( @@ -58,19 +60,21 @@ postgresqls hidden='{ !namespaced_name.toLowerCase().includes(filter.state.toLowerCase()) }' ) td { team } - td { nodes } - td { cpu } / { cpu_limit } - td { memory } / { memory_limit } - td { volume_size } - td(style='white-space: pre') | { namespace } - td a( href='/#/status/{ cluster_path(this) }' ) | { name } + td { nodes } + td { cpu } / { cpu_limit } + td { memory } / { memory_limit } + td { volume_size } + td { calcCosts(nodes, cpu, memory, volume_size) }$ + + td + .btn-group.pull-right( aria-label='Cluster { qname } actions' @@ -124,12 +128,14 @@ postgresqls thead tr th(style='width: 120px') Team + th(style='width: 130px') Namespace + th Name th(style='width: 50px') Pods th(style='width: 140px') CPU th(style='width: 130px') Memory th(style='width: 100px') Size - th(style='width: 130px') Namespace - th Name + th(style='width: 120px') Cost/Month + th(stlye='width: 120px') tbody tr( @@ -137,20 +143,20 @@ postgresqls hidden='{ !namespaced_name.toLowerCase().includes(filter.state.toLowerCase()) }' ) td { team } - td { nodes } - td { cpu } / { cpu_limit } - td { memory } / { memory_limit } - td { volume_size } - td(style='white-space: pre') | { namespace } - td - a( href='/#/status/{ cluster_path(this) }' ) | { name } + td { nodes } + td { cpu } / { cpu_limit } + td { memory } / { memory_limit } + td { volume_size } + td { calcCosts(nodes, cpu, memory, volume_size) }$ + + td .btn-group.pull-right( aria-label='Cluster { qname } actions' @@ -223,6 +229,45 @@ postgresqls + '/' + encodeURI(cluster.name) ) + const calcCosts = this.calcCosts = (nodes, cpu, memory, disk) => { + costs = nodes * (toCores(cpu) * opts.config.cost_core + toMemory(memory) * opts.config.cost_memory + toDisk(disk) * opts.config.cost_ebs) + return costs.toFixed(2) + } + + const toDisk = this.toDisk = value => { + if(value.endsWith("Gi")) { + value = value.substring(0, value.length-2) + value = Number(value) + return value + } + + return value + } + + const toMemory = this.toMemory = value => { + if (value.endsWith("Mi")) { + value = value.substring(0, value.length-2) + value = Number(value) / 1000. + return value + } + else if(value.endsWith("Gi")) { + value = value.substring(0, value.length-2) + value = Number(value) + return value + } + + return value + } + + const toCores = this.toCores = value => { + if (value.endsWith("m")) { + value = value.substring(0, value.length-1) + value = Number(value) / 1000. + return value + } + return value + } + this.on('mount', () => jQuery .get('/postgresqls') diff --git a/ui/manifests/deployment.yaml b/ui/manifests/deployment.yaml index c270cbe11..477e4d655 100644 --- a/ui/manifests/deployment.yaml +++ b/ui/manifests/deployment.yaml @@ -4,23 +4,23 @@ metadata: name: "postgres-operator-ui" namespace: "default" labels: - application: "postgres-operator-ui" + name: "postgres-operator-ui" team: "acid" spec: replicas: 1 selector: matchLabels: - application: "postgres-operator-ui" + name: "postgres-operator-ui" template: metadata: labels: - application: "postgres-operator-ui" + name: "postgres-operator-ui" team: "acid" spec: serviceAccountName: postgres-operator-ui containers: - name: "service" - image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.2.0 + image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.3.0 ports: - containerPort: 8081 protocol: "TCP" @@ -32,8 +32,8 @@ spec: timeoutSeconds: 1 resources: limits: - cpu: "300m" - memory: "3000Mi" + cpu: "200m" + memory: "200Mi" requests: cpu: "100m" memory: "100Mi" @@ -41,7 +41,9 @@ spec: - name: "APP_URL" value: "http://localhost:8081" - name: "OPERATOR_API_URL" - value: "http://localhost:8080" + value: "http://postgres-operator:8080" + - name: "OPERATOR_CLUSTER_NAME_LABEL" + value: "cluster-name" - name: "TARGET_NAMESPACE" value: "default" - name: "TEAMS" @@ -60,9 +62,14 @@ spec: "replica_load_balancer_visible": true, "resources_visible": true, "users_visible": true, + "cost_ebs": 0.119, + "cost_core": 0.0575, + "cost_memory": 0.014375, "postgresql_versions": [ + "12", "11", "10", - "9.6" + "9.6", + "9.5" ] } diff --git a/ui/operator_ui/main.py b/ui/operator_ui/main.py index f34d16492..5a3054f0e 100644 --- a/ui/operator_ui/main.py +++ b/ui/operator_ui/main.py @@ -76,6 +76,7 @@ ACCESS_TOKEN_URL = getenv('ACCESS_TOKEN_URL') TOKENINFO_URL = getenv('OAUTH2_TOKEN_INFO_URL') OPERATOR_API_URL = getenv('OPERATOR_API_URL', 'http://postgres-operator') +OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-name') OPERATOR_UI_CONFIG = getenv('OPERATOR_UI_CONFIG', '{}') OPERATOR_UI_MAINTENANCE_CHECK = getenv('OPERATOR_UI_MAINTENANCE_CHECK', '{}') READ_ONLY_MODE = getenv('READ_ONLY_MODE', False) in [True, 'true'] @@ -84,6 +85,13 @@ SUPERUSER_TEAM = getenv('SUPERUSER_TEAM', 'acid') TARGET_NAMESPACE = getenv('TARGET_NAMESPACE') GOOGLE_ANALYTICS = getenv('GOOGLE_ANALYTICS', False) +# storage pricing, i.e. https://aws.amazon.com/ebs/pricing/ +COST_EBS = float(getenv('COST_EBS', 0.119)) # GB per month + +# compute costs, i.e. https://www.ec2instances.info/?region=eu-central-1&selected=m5.2xlarge +COST_CORE = 30.5 * 24 * float(getenv('COST_CORE', 0.0575)) # Core per hour m5.2xlarge / 8. +COST_MEMORY = 30.5 * 24 * float(getenv('COST_MEMORY', 0.014375)) # Memory GB m5.2xlarge / 32. + WALE_S3_ENDPOINT = getenv( 'WALE_S3_ENDPOINT', 'https+path://s3-eu-central-1.amazonaws.com:443', @@ -293,6 +301,9 @@ DEFAULT_UI_CONFIG = { 'dns_format_string': '{0}.{1}.{2}', 'pgui_link': '', 'static_network_whitelist': {}, + 'cost_ebs': COST_EBS, + 'cost_core': COST_CORE, + 'cost_memory': COST_MEMORY } @@ -1003,6 +1014,7 @@ def main(port, secret_key, debug, clusters: list): logger.info(f'App URL: {APP_URL}') logger.info(f'Authorize URL: {AUTHORIZE_URL}') logger.info(f'Operator API URL: {OPERATOR_API_URL}') + logger.info(f'Operator cluster name label: {OPERATOR_CLUSTER_NAME_LABEL}') logger.info(f'Readonly mode: {"enabled" if READ_ONLY_MODE else "disabled"}') # noqa logger.info(f'Spilo S3 backup bucket: {SPILO_S3_BACKUP_BUCKET}') logger.info(f'Spilo S3 backup prefix: {SPILO_S3_BACKUP_PREFIX}') diff --git a/ui/operator_ui/spiloutils.py b/ui/operator_ui/spiloutils.py index 7f080e3c9..33d07d88a 100644 --- a/ui/operator_ui/spiloutils.py +++ b/ui/operator_ui/spiloutils.py @@ -3,7 +3,7 @@ from datetime import datetime, timezone from furl import furl from json import dumps from logging import getLogger -from os import environ +from os import environ, getenv from requests import Session from urllib.parse import urljoin from uuid import UUID @@ -16,6 +16,8 @@ logger = getLogger(__name__) session = Session() +OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-name') + def request(cluster, path, **kwargs): if 'timeout' not in kwargs: @@ -137,7 +139,7 @@ def read_pods(cluster, namespace, spilo_cluster): cluster=cluster, resource_type='pods', namespace=namespace, - label_selector={'version': spilo_cluster}, + label_selector={OPERATOR_CLUSTER_NAME_LABEL: spilo_cluster}, ) diff --git a/ui/requirements.txt b/ui/requirements.txt index f9bfdcfa0..2f1bd661a 100644 --- a/ui/requirements.txt +++ b/ui/requirements.txt @@ -1,14 +1,14 @@ Flask-OAuthlib==0.9.5 -Flask==1.0.2 -backoff==1.5.0 -boto3==1.5.14 -boto==2.48.0 +Flask==1.1.1 +backoff==1.8.1 +boto3==1.10.4 +boto==2.49.0 click==6.7 -furl==1.0.1 +furl==1.0.2 gevent==1.2.2 jq==0.1.6 json_delta>=2.0 kubernetes==3.0.0 -requests==2.20.1 +requests==2.22.0 stups-tokens>=1.1.19 -wal_e==1.1.0 \ No newline at end of file +wal_e==1.1.0 diff --git a/ui/run_local.sh b/ui/run_local.sh index 2951fe049..e331b2414 100755 --- a/ui/run_local.sh +++ b/ui/run_local.sh @@ -19,10 +19,15 @@ default_operator_ui_config='{ "nat_gateways_visible": false, "resources_visible": true, "users_visible": true, + "cost_ebs": 0.119, + "cost_core": 0.0575, + "cost_memory": 0.014375, "postgresql_versions": [ + "12", "11", "10", - "9.6" + "9.6", + "9.5" ], "static_network_whitelist": { "localhost": ["172.0.0.1/32"] From 54796945f6c5676e2860704a1d95c0d8abfce756 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Wed, 19 Feb 2020 14:19:55 +0100 Subject: [PATCH 24/31] added pinned werkzeug dep to UI requirements (#835) --- ui/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/ui/requirements.txt b/ui/requirements.txt index 2f1bd661a..5d987416c 100644 --- a/ui/requirements.txt +++ b/ui/requirements.txt @@ -12,3 +12,4 @@ kubernetes==3.0.0 requests==2.22.0 stups-tokens>=1.1.19 wal_e==1.1.0 +werkzeug==0.16.1 From 742d7334a1fe69609c66b14878bfece896dd8f15 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Wed, 19 Feb 2020 15:01:01 +0100 Subject: [PATCH 25/31] use cluster-name as default label everywhere (#782) * use cluster-name as default label everywhere * fix e2e test --- charts/postgres-operator/values.yaml | 2 +- docs/user.md | 2 +- e2e/tests/test_e2e.py | 22 +++++++++++----------- manifests/configmap.yaml | 2 +- pkg/cluster/k8sres.go | 4 ++-- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 60a0a1f04..78624e0bd 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -55,7 +55,7 @@ configKubernetes: # additional labels assigned to the cluster objects cluster_labels: application:spilo # label assigned to Kubernetes objects created by the operator - cluster_name_label: version + cluster_name_label: cluster-name # annotations attached to each database pod # custom_pod_annotations: "keya:valuea,keyb:valueb" diff --git a/docs/user.md b/docs/user.md index f81e11ede..e1baf9ad1 100644 --- a/docs/user.md +++ b/docs/user.md @@ -65,7 +65,7 @@ our test cluster. ```bash # get name of master pod of acid-minimal-cluster -export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,version=acid-minimal-cluster,spilo-role=master) +export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,cluster-name=acid-minimal-cluster,spilo-role=master) # set up port forward kubectl port-forward $PGMASTER 6432:5432 diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 2d81a0647..12106601e 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -65,7 +65,7 @@ class EndToEndTestCase(unittest.TestCase): ''' k8s = self.k8s - cluster_label = 'version=acid-minimal-cluster' + cluster_label = 'cluster-name=acid-minimal-cluster' # enable load balancer services pg_patch_enable_lbs = { @@ -113,7 +113,7 @@ class EndToEndTestCase(unittest.TestCase): Lower resource limits below configured minimum and let operator fix it ''' k8s = self.k8s - cluster_label = 'version=acid-minimal-cluster' + cluster_label = 'cluster-name=acid-minimal-cluster' _, failover_targets = k8s.get_pg_nodes(cluster_label) # configure minimum boundaries for CPU and memory limits @@ -172,7 +172,7 @@ class EndToEndTestCase(unittest.TestCase): k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml") k8s.wait_for_pod_start("spilo-role=master", self.namespace) - self.assert_master_is_unique(self.namespace, version="acid-test-cluster") + self.assert_master_is_unique(self.namespace, "acid-test-cluster") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_scaling(self): @@ -180,7 +180,7 @@ class EndToEndTestCase(unittest.TestCase): Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime. ''' k8s = self.k8s - labels = "version=acid-minimal-cluster" + labels = "cluster-name=acid-minimal-cluster" k8s.wait_for_pg_to_scale(3) self.assertEqual(3, k8s.count_pods_with_label(labels)) @@ -196,7 +196,7 @@ class EndToEndTestCase(unittest.TestCase): Add taint "postgres=:NoExecute" to node with master. This must cause a failover. ''' k8s = self.k8s - cluster_label = 'version=acid-minimal-cluster' + cluster_label = 'cluster-name=acid-minimal-cluster' # get nodes of master and replica(s) (expected target of new master) current_master_node, failover_targets = k8s.get_pg_nodes(cluster_label) @@ -334,9 +334,9 @@ class EndToEndTestCase(unittest.TestCase): "foo": "bar", } self.assertTrue(k8s.check_service_annotations( - "version=acid-service-annotations,spilo-role=master", annotations)) + "cluster-name=acid-service-annotations,spilo-role=master", annotations)) self.assertTrue(k8s.check_service_annotations( - "version=acid-service-annotations,spilo-role=replica", annotations)) + "cluster-name=acid-service-annotations,spilo-role=replica", annotations)) # clean up unpatch_custom_service_annotations = { @@ -346,14 +346,14 @@ class EndToEndTestCase(unittest.TestCase): } k8s.update_config(unpatch_custom_service_annotations) - def assert_master_is_unique(self, namespace='default', version="acid-minimal-cluster"): + def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"): ''' Check that there is a single pod in the k8s cluster with the label "spilo-role=master" To be called manually after operations that affect pods ''' k8s = self.k8s - labels = 'spilo-role=master,version=' + version + labels = 'spilo-role=master,cluster-name=' + clusterName num_of_master_pods = k8s.count_pods_with_label(labels, namespace) self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods)) @@ -438,7 +438,7 @@ class K8s: _ = self.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body) - labels = 'version=acid-minimal-cluster' + labels = 'cluster-name=acid-minimal-cluster' while self.count_pods_with_label(labels) != number_of_instances: time.sleep(self.RETRY_TIMEOUT_SEC) @@ -448,7 +448,7 @@ class K8s: def wait_for_master_failover(self, expected_master_nodes, namespace='default'): pod_phase = 'Failing over' new_master_node = '' - labels = 'spilo-role=master,version=acid-minimal-cluster' + labels = 'spilo-role=master,cluster-name=acid-minimal-cluster' while (pod_phase != 'Running') or (new_master_node not in expected_master_nodes): pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index 6b00c30f4..4289a134c 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -10,7 +10,7 @@ data: cluster_domain: cluster.local cluster_history_entries: "1000" cluster_labels: application:spilo - cluster_name_label: version + cluster_name_label: cluster-name # custom_service_annotations: "keyx:valuez,keya:valuea" # custom_pod_annotations: "keya:valuea,keyb:valueb" db_hosted_zone: db.example.com diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index e6561e0f3..4468c8428 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1498,8 +1498,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) { ) labels := map[string]string{ - "version": c.Name, - "application": "spilo-logical-backup", + c.OpConfig.ClusterNameLabel: c.Name, + "application": "spilo-logical-backup", } podAffinityTerm := v1.PodAffinityTerm{ LabelSelector: &metav1.LabelSelector{ From e2a9b0391343e6c7251d72f5612306aa6af46a3e Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Thu, 20 Feb 2020 16:21:21 +0100 Subject: [PATCH 26/31] bump spilo version to latest release (#836) --- charts/postgres-operator/values-crd.yaml | 2 +- charts/postgres-operator/values.yaml | 2 +- manifests/complete-postgres-manifest.yaml | 2 +- manifests/configmap.yaml | 2 +- manifests/postgresql-operator-default-configuration.yaml | 2 +- pkg/util/config/config.go | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index 08c255a04..195a03380 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -24,7 +24,7 @@ configGeneral: # etcd connection string for Patroni. Empty uses K8s-native DCS. etcd_host: "" # Spilo docker image - docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16 + docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2 # max number of instances in Postgres cluster. -1 = no limit min_instances: -1 # min number of instances in Postgres cluster. -1 = no limit diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 78624e0bd..8b52a7d67 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -24,7 +24,7 @@ configGeneral: # etcd connection string for Patroni. Empty uses K8s-native DCS. etcd_host: "" # Spilo docker image - docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16 + docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2 # max number of instances in Postgres cluster. -1 = no limit min_instances: "-1" # min number of instances in Postgres cluster. -1 = no limit diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index 9e3b891c3..5ae817ca3 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -5,7 +5,7 @@ metadata: # labels: # environment: demo spec: - dockerImage: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16 + dockerImage: registry.opensource.zalan.do/acid/spilo-12:1.6-p2 teamId: "acid" volume: size: 1Gi diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index 4289a134c..aa7bef034 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -19,7 +19,7 @@ data: # default_cpu_request: 100m # default_memory_limit: 500Mi # default_memory_request: 100Mi - docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16 + docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2 # enable_admin_role_for_users: "true" # enable_crd_validation: "true" # enable_database_access: "true" diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index 695a4e9c5..bdb131fc5 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -5,7 +5,7 @@ metadata: configuration: # enable_crd_validation: true etcd_host: "" - docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16 + docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2 # enable_shm_volume: true max_instances: -1 min_instances: -1 diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index ec4af6427..0e88c60d7 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -93,7 +93,7 @@ type Config struct { WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS - DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16"` + DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p2"` Sidecars map[string]string `name:"sidecar_docker_images"` // default name `operator` enables backward compatibility with the older ServiceAccountName field PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"` From 7b94060d1754f12ba2466b8011bb84ea09ac051d Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Fri, 21 Feb 2020 16:36:23 +0100 Subject: [PATCH 27/31] fix validation for S3ForcePathStyle (#841) --- charts/postgres-operator/crds/postgresqls.yaml | 2 +- manifests/postgresql.crd.yaml | 2 +- pkg/apis/acid.zalan.do/v1/crds.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index b4b676236..af535e2c8 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -94,7 +94,7 @@ spec: s3_secret_access_key: type: string s3_force_path_style: - type: string + type: boolean s3_wal_path: type: string timestamp: diff --git a/manifests/postgresql.crd.yaml b/manifests/postgresql.crd.yaml index 276bc94b8..453916b26 100644 --- a/manifests/postgresql.crd.yaml +++ b/manifests/postgresql.crd.yaml @@ -58,7 +58,7 @@ spec: s3_secret_access_key: type: string s3_force_path_style: - type: string + type: boolean s3_wal_path: type: string timestamp: diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 4cfc9a9e6..28dfa1566 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -160,7 +160,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ Type: "string", }, "s3_force_path_style": { - Type: "string", + Type: "boolean", }, "s3_wal_path": { Type: "string", From b997e3682f2281188cacd0bf96ac4bca50d686f1 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Mon, 24 Feb 2020 15:14:14 +0100 Subject: [PATCH 28/31] be more permissive with standbys (#842) * be more permissive with standbys * reflect feedback and updated docs --- docs/administrator.md | 10 +- docs/reference/operator_parameters.md | 6 +- docs/user.md | 134 ++++++++++++++++++-------- pkg/cluster/k8sres.go | 10 +- 4 files changed, 109 insertions(+), 51 deletions(-) diff --git a/docs/administrator.md b/docs/administrator.md index 3597b65ca..9d877c783 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -11,11 +11,11 @@ switchover (planned failover) of the master to the Pod with new minor version. The switch should usually take less than 5 seconds, still clients have to reconnect. -Major version upgrades are supported via [cloning](user.md#clone-directly). The -new cluster manifest must have a higher `version` string than the source cluster -and will be created from a basebackup. Depending of the cluster size, downtime -in this case can be significant as writes to the database should be stopped and -all WAL files should be archived first before cloning is started. +Major version upgrades are supported via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster). +The new cluster manifest must have a higher `version` string than the source +cluster and will be created from a basebackup. Depending of the cluster size, +downtime in this case can be significant as writes to the database should be +stopped and all WAL files should be archived first before cloning is started. Note, that simply changing the version string in the `postgresql` manifest does not work at present and leads to errors. Neither Patroni nor Postgres Operator diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index ca972c22b..ad519b657 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -110,8 +110,10 @@ Those are top-level keys, containing both leaf keys and groups. * **min_instances** operator will run at least the number of instances for any given Postgres - cluster equal to the value of this parameter. When `-1` is specified, no - limits are applied. The default is `-1`. + cluster equal to the value of this parameter. Standby clusters can still run + with `numberOfInstances: 1` as this is the [recommended setup](../user.md#setting-up-a-standby-cluster). + When `-1` is specified for `min_instances`, no limits are applied. The default + is `-1`. * **resync_period** period between consecutive sync requests. The default is `30m`. diff --git a/docs/user.md b/docs/user.md index e1baf9ad1..91a010b9c 100644 --- a/docs/user.md +++ b/docs/user.md @@ -254,29 +254,22 @@ spec: ## How to clone an existing PostgreSQL cluster -You can spin up a new cluster as a clone of the existing one, using a clone +You can spin up a new cluster as a clone of the existing one, using a `clone` section in the spec. There are two options here: -* Clone directly from a source cluster using `pg_basebackup` -* Clone from an S3 bucket +* Clone from an S3 bucket (recommended) +* Clone directly from a source cluster -### Clone directly - -```yaml -spec: - clone: - cluster: "acid-batman" -``` - -Here `cluster` is a name of a source cluster that is going to be cloned. The -cluster to clone is assumed to be running and the clone procedure invokes -`pg_basebackup` from it. The operator will setup the cluster to be cloned to -connect to the service of the source cluster by name (if the cluster is called -test, then the connection string will look like host=test port=5432), which -means that you can clone only from clusters within the same namespace. +Note, that cloning can also be used for [major version upgrades](administrator.md#minor-and-major-version-upgrade) +of PostgreSQL. ### Clone from S3 +Cloning from S3 has the advantage that there is no impact on your production +database. A new Postgres cluster is created by restoring the data of another +source cluster. If you create it in the same Kubernetes environment, use a +different name. + ```yaml spec: clone: @@ -287,7 +280,8 @@ spec: Here `cluster` is a name of a source cluster that is going to be cloned. A new cluster will be cloned from S3, using the latest backup before the `timestamp`. -In this case, `uid` field is also mandatory - operator will use it to find a +Note, that a time zone is required for `timestamp` in the format of +00:00 which +is UTC. The `uid` field is also mandatory. The operator will use it to find a correct key inside an S3 bucket. You can find this field in the metadata of the source cluster: @@ -299,9 +293,6 @@ metadata: uid: efd12e58-5786-11e8-b5a7-06148230260c ``` -Note that timezone is required for `timestamp`. Otherwise, offset is relative -to UTC, see [RFC 3339 section 5.6) 3339 section 5.6](https://www.ietf.org/rfc/rfc3339.txt). - For non AWS S3 following settings can be set to support cloning from other S3 implementations: @@ -317,14 +308,35 @@ spec: s3_force_path_style: true ``` +### Clone directly + +Another way to get a fresh copy of your source DB cluster is via basebackup. To +use this feature simply leave out the timestamp field from the clone section. +The operator will connect to the service of the source cluster by name. If the +cluster is called test, then the connection string will look like host=test +port=5432), which means that you can clone only from clusters within the same +namespace. + +```yaml +spec: + clone: + cluster: "acid-batman" +``` + +Be aware that on a busy source database this can result in an elevated load! + ## Setting up a standby cluster -Standby clusters are like normal cluster but they are streaming from a remote -cluster. As the first version of this feature, the only scenario covered by -operator is to stream from a WAL archive of the master. Following the more -popular infrastructure of using Amazon's S3 buckets, it is mentioned as -`s3_wal_path` here. To start a cluster as standby add the following `standby` -section in the YAML file: +Standby cluster is a [Patroni feature](https://github.com/zalando/patroni/blob/master/docs/replica_bootstrap.rst#standby-cluster) +that first clones a database, and keeps replicating changes afterwards. As the +replication is happening by the means of archived WAL files (stored on S3 or +the equivalent of other cloud providers), the standby cluster can exist in a +different location than its source database. Unlike cloning, the PostgreSQL +version between source and target cluster has to be the same. + +To start a cluster as standby, add the following `standby` section in the YAML +file and specify the S3 bucket path. An empty path will result in an error and +no statefulset will be created. ```yaml spec: @@ -332,20 +344,62 @@ spec: s3_wal_path: "s3 bucket path to the master" ``` -Things to note: +At the moment, the operator only allows to stream from the WAL archive of the +master. Thus, it is recommended to deploy standby clusters with only [one pod](../manifests/standby-manifest.yaml#L10). +You can raise the instance count when detaching. Note, that the same pod role +labels like for normal clusters are used: The standby leader is labeled as +`master`. -- An empty string in the `s3_wal_path` field of the standby cluster will result - in an error and no statefulset will be created. -- Only one pod can be deployed for stand-by cluster. -- To manually promote the standby_cluster, use `patronictl` and remove config - entry. -- There is no way to transform a non-standby cluster to a standby cluster - through the operator. Adding the standby section to the manifest of a running - Postgres cluster will have no effect. However, it can be done through Patroni - by adding the [standby_cluster](https://github.com/zalando/patroni/blob/bd2c54581abb42a7d3a3da551edf0b8732eefd27/docs/replica_bootstrap.rst#standby-cluster) - section using `patronictl edit-config`. Note that the transformed standby - cluster will not be doing any streaming. It will be in standby mode and allow - read-only transactions only. +### Providing credentials of source cluster + +A standby cluster is replicating the data (including users and passwords) from +the source database and is read-only. The system and application users (like +standby, postgres etc.) all have a password that does not match the credentials +stored in secrets which are created by the operator. One solution is to create +secrets beforehand and paste in the credentials of the source cluster. +Otherwise, you will see errors in the Postgres logs saying users cannot log in +and the operator logs will complain about not being able to sync resources. +This, however, can safely be ignored as it will be sorted out once the cluster +is detached from the source (and it’s still harmless if you don’t plan to). + +You can also edit the secrets afterwards. Find them by: + +```bash +kubectl get secrets --all-namespaces | grep +``` + +### Promote the standby + +One big advantage of standby clusters is that they can be promoted to a proper +database cluster. This means it will stop replicating changes from the source, +and start accept writes itself. This mechanism makes it possible to move +databases from one place to another with minimal downtime. Currently, the +operator does not support promoting a standby cluster. It has to be done +manually using `patronictl edit-config` inside the postgres container of the +standby leader pod. Remove the following lines from the YAML structure and the +leader promotion happens immediately. Before doing so, make sure that the +standby is not behind the source database. + +```yaml +standby_cluster: + create_replica_methods: + - bootstrap_standby_with_wale + - basebackup_fast_xlog + restore_command: envdir "/home/postgres/etc/wal-e.d/env-standby" /scripts/restore_command.sh + "%f" "%p" +``` + +Finally, remove the `standby` section from the postgres cluster manifest. + +### Turn a normal cluster into a standby + +There is no way to transform a non-standby cluster to a standby cluster through +the operator. Adding the `standby` section to the manifest of a running +Postgres cluster will have no effect. But, as explained in the previous +paragraph it can be done manually through `patronictl edit-config`. This time, +by adding the `standby_cluster` section to the Patroni configuration. However, +the transformed standby cluster will not be doing any streaming. It will be in +standby mode and allow read-only transactions only. ## Sidecar Support diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 4468c8428..e2251a67c 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1048,11 +1048,13 @@ func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 { cur := spec.NumberOfInstances newcur := cur - /* Limit the max number of pods to one, if this is standby-cluster */ if spec.StandbyCluster != nil { - c.logger.Info("Standby cluster can have maximum of 1 pod") - min = 1 - max = 1 + if newcur == 1 { + min = newcur + max = newcur + } else { + c.logger.Warningf("operator only supports standby clusters with 1 pod") + } } if max >= 0 && newcur > max { newcur = max From fb9ef11e4e0199ca0bde1d87f0b87285e8077917 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Mon, 24 Feb 2020 17:48:14 +0100 Subject: [PATCH 29/31] align UI pipeline with operator (#844) * align UI pipeline with operator --- delivery.yaml | 21 +++++++-------------- ui/Makefile | 13 +++++++++---- 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/delivery.yaml b/delivery.yaml index be35d3e27..144448ea9 100644 --- a/delivery.yaml +++ b/delivery.yaml @@ -66,20 +66,13 @@ pipeline: - desc: 'Build and push Docker image' cmd: | cd ui - image_base='registry-write.opensource.zalan.do/acid/postgres-operator-ui' - if [[ "${CDP_TARGET_BRANCH}" == 'master' && -z "${CDP_PULL_REQUEST_NUMBER}" ]] + IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"} + if [[ ${CDP_TARGET_BRANCH} == "master" && ${IS_PR_BUILD} != "true" ]] then - image="${image_base}" + IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-ui else - image="${image_base}-test" + IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-ui-test fi - image_with_tag="${image}:c${CDP_BUILD_VERSION}" - - if docker pull "${image}" - then - docker build --cache-from="${image}" -t "${image_with_tag}" . - else - docker build -t "${image_with_tag}" . - fi - - docker push "${image_with_tag}" + export IMAGE + make docker + make push diff --git a/ui/Makefile b/ui/Makefile index f1cf16840..e7d5df674 100644 --- a/ui/Makefile +++ b/ui/Makefile @@ -5,9 +5,13 @@ VERSION ?= $(shell git describe --tags --always --dirty) TAG ?= $(VERSION) GITHEAD = $(shell git rev-parse --short HEAD) GITURL = $(shell git config --get remote.origin.url) -GITSTATU = $(shell git status --porcelain || echo 'no changes') +GITSTATUS = $(shell git status --porcelain || echo 'no changes') TTYFLAGS = $(shell test -t 0 && echo '-it') +ifdef CDP_PULL_REQUEST_NUMBER + CDP_TAG := -${CDP_BUILD_VERSION} +endif + default: docker clean: @@ -24,11 +28,12 @@ docker: appjs echo `(env)` echo "Tag ${TAG}" echo "Version ${VERSION}" + echo "CDP tag ${CDP_TAG}" echo "git describe $(shell git describe --tags --always --dirty)" - docker build --rm -t "$(IMAGE):$(TAG)" -f Dockerfile . + docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)" -f Dockerfile . -push: docker - docker push "$(IMAGE):$(TAG)" +push: + docker push "$(IMAGE):$(TAG)$(CDP_TAG)" mock: docker run -it -p 8080:8080 "$(IMAGE):$(TAG)" --mock From b24da3201ceeeb8e23ffbb4fd99076822f388898 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Tue, 25 Feb 2020 09:50:54 +0100 Subject: [PATCH 30/31] bump version to 1.4.0 + some polishing (#839) * bump version to 1.4.0 + some polishing * align version for UI chart * update user docs to warn for standby replicas * minor log message changes for RBAC resources --- charts/postgres-operator-ui/Chart.yaml | 4 +-- charts/postgres-operator-ui/index.yaml | 29 ++++++++++++++++++ .../postgres-operator-ui-1.4.0.tgz | Bin 0 -> 3517 bytes charts/postgres-operator-ui/values.yaml | 2 +- charts/postgres-operator/Chart.yaml | 4 +-- charts/postgres-operator/index.yaml | 28 +++++++++++++++-- .../postgres-operator-1.4.0.tgz | Bin 0 -> 42200 bytes .../templates/clusterrole.yaml | 6 ++-- charts/postgres-operator/values-crd.yaml | 8 ++++- charts/postgres-operator/values.yaml | 8 ++++- docs/user.md | 11 ++++--- manifests/configmap.yaml | 2 ++ manifests/operator-service-account-rbac.yaml | 6 ++-- manifests/postgres-operator.yaml | 2 +- pkg/controller/controller.go | 2 +- pkg/controller/postgresql.go | 14 ++++----- pkg/util/config/config.go | 11 +++---- 17 files changed, 102 insertions(+), 35 deletions(-) create mode 100644 charts/postgres-operator-ui/index.yaml create mode 100644 charts/postgres-operator-ui/postgres-operator-ui-1.4.0.tgz create mode 100644 charts/postgres-operator/postgres-operator-1.4.0.tgz diff --git a/charts/postgres-operator-ui/Chart.yaml b/charts/postgres-operator-ui/Chart.yaml index 4418675b6..a6e46ab3e 100644 --- a/charts/postgres-operator-ui/Chart.yaml +++ b/charts/postgres-operator-ui/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: postgres-operator-ui -version: 0.1.0 -appVersion: 1.3.0 +version: 1.4.0 +appVersion: 1.4.0 home: https://github.com/zalando/postgres-operator description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience keywords: diff --git a/charts/postgres-operator-ui/index.yaml b/charts/postgres-operator-ui/index.yaml new file mode 100644 index 000000000..0cd03d6e5 --- /dev/null +++ b/charts/postgres-operator-ui/index.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +entries: + postgres-operator-ui: + - apiVersion: v1 + appVersion: 1.4.0 + created: "2020-02-24T15:32:47.610967635+01:00" + description: Postgres Operator UI provides a graphical interface for a convenient + database-as-a-service user experience + digest: 00e0eff7056d56467cd5c975657fbb76c8d01accd25a4b7aca81bc42aeac961d + home: https://github.com/zalando/postgres-operator + keywords: + - postgres + - operator + - ui + - cloud-native + - patroni + - spilo + maintainers: + - email: opensource@zalando.de + name: Zalando + - email: sk@sik-net.de + name: siku4 + name: postgres-operator-ui + sources: + - https://github.com/zalando/postgres-operator + urls: + - postgres-operator-ui-1.4.0.tgz + version: 1.4.0 +generated: "2020-02-24T15:32:47.610348278+01:00" diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.4.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.4.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..8d1276dd16ab28ad5d176c810c86ad617c682359 GIT binary patch literal 3517 zcmV;u4MOrCiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH;Na@#nP`OT;3rEjZdvLQuE@<&)Tck6Y0Dc4RMmF;9}Z)?g0 zku3>h2;cyqY{$_(`xU?sk(6x7iN~4T3O^)}Xf%LEqr1^)A{9!HB`Qy(D2W!*%V_6l zLZtR@$&|f#iWCGvu-EU~{~!q3|G|ED|3%o_>-P6|_q$>EA_#YT-S7njPty9DlqpT* zi{Pu9B0;v=;nxFy# zV@c8pjR^zFHOdi*5k^7+fmrYva*A9#0144#NECWRdBjsFXEa906iUFG6eUF-qcagx zj9{X5s-jM3O!Xui`mvaHJ`qNEB05XF>s;aOwUCL591qG;$AhZegN!;J#7ty~$BCvh zG>}9~!A*fmDHG0=P_7B(C@mtK63QZjge#HB82?&yz)#Qt;KX$EfAUugaCP-pMXx-L zx(=r3Rn~X%{HmqrbJjMV|CQ^1Mp%aG$ppZf_21p?_uA`!cXxNY{+}c5 zz#ANqjA_Ux-7eQvOyN5_a513@s9EU$Jbd@FHxhD6w8jKRl%WB5gE1o#VMZh+Lxu{p zfFVLkl)}Ua1p&7rlb~^$GNMuWjw3N;G$uzP<2r)SadwQf7}0Si5i$yNO2+8e-^c-w zIHpR=+o;O2w?72+qt0;WPyEet4$X|lX(%z^{ZWNc)gT{i$}#@IkG8sA3Xo{Ua~ z9!ONkwd*)?NMh549XL-hrXz|9t|zD`DBSihhqB4ddedn}P?OdKe35onpA z;}l}=FpllO??Fr8yaBf!vB7gOK`U@Tj6hGYjYA6~Y<&xQG6F6%DAYiBQlK)EY>>SI zlH;KY#>frTFo8@_KDKyA8DqA*oX=9wH7C(136U(Q<5W2_Y(}S4+Z;fgW)bv)U}~SH zIJL9a3xePswNv_AhN=M^wt+(n+iw5(^rLc+&s0w6lXrvnXBUU37ZLnpLK#*n0%BwP zSWc;8B~w%>i7^5<$=A5MM3$l#B{Lf0 z=-bp(Ibxa8C{G3kV=APMz|9+IV5kW0=5{i7;3^xUgFv2Ohm;oo_&ZQ zqdwk{Y0A(Sa%_vhm;nI(GN-Aj?P;d+k{bG*9F3P_pOO;$ zw-)garj`1?lFpse_ZP?KzP{04)5bdczq{9O>Hqy8+~4Z|=SUX_C7W9PZWz7#0 zqQH=s^|Hq4L|3^68|LQrCg@b^k)<%1tlWQnq-p|xKzF;=xT z3x62iTEe_JJvXq%R#qv-Kl;we2xKlHb0o2_eg;+8nYpE{;hHjL4E)UOnTXkLna&rz z-HgtqVZI;_9aW?)3oG)LF!us%M1Q6hnEu0#CHiUeDX#UiB1?{?p;Z=rZM=PS2UAJ8 z9)bI3<^5T?Eo^zak72s|@-1d8jeb&W#2z!H&HdtOZE`Cj|3G=nvSd9|_&-*Y&_rwJ z1Rfehja7{_DEv-_O)a|?yq`lcGDNoN_-qyT-U+JnPo6IvmGIf@^qDa>+`qWsUb>H( zxo<6izj0cl|KofYCi_3RcXxHoAcrLO3m4FQ?$i$l zY!S)yL`eF{?$-UQ18cy~Lg$L|B!a?aa3&Z!Q`96OnnacB+qCqccv>akjJ@wbE=YX<{lYb-;C3wVJe=TS9zo5pRU8V2Xz@E|Voq)e20SXdGosp?(y$RlTZqy$@l! zyR%B>kF8@im;Xb`6UxV5pa#~-|6b7Y|8%=yzrU6L&yjv7`Tw_^S+;86N!5U0c!nc0 z{!&IgFfR_kvReN7Sy0*VSB$k)M>+oH;nI^#G+&%u+~-#OKE&%EKCO}e38qZkPLY32 zIM5pT-`{Wh|GGiH*WJqh=SX!C?;69YGYj29p1W_V*>@X~{NG#-*CgDUF4jCQR(7Ts z=OGL>;z30aIsgh8##+dno>OBR|6C*R6O#B#hFZ2z^sdGQCb33GpR}uA(%C#|BvZr9 zF-=_9fdP?ZiW(*Tg#TrM%6YK)B`x%Is>=o@h(-KZ*(tAJ8n>wHRwa-}xJ2NZK^2ym zedU)CE-;^WRuq=;Eij))OU0UuVLo4KW>AZ7S*o)LeyP(lKmV_dB_B^U%7KjzI0z2H z1_VGVg%+`35x5segL+pal7w;o#%}oVBS(K2v@( z%B0!v8up9Bv$w|=m#2sCj?V{&N5>mtZ$+fskN)`Z-T6H@KYOkB0RLKg1I4mmD)?m$ zwLY9&9=$*P?~}Kiw|=*(^+d$#l2Lx;Ms69vxg5ltqB~q>~ z^I*G6r6uKK*9ePn?%f4*|E@c~r&bYVt1f3$(IG=OkH%REJSAq2aLEKoE{B8}AuBhj z$BDii6OGs8w%(47?O9o&(0z|@saj#10A~v2Mt}<+*=2E*q10BWfnVqIV8A5d zW4s@-j1{dczgivpzHY;KcHr~Pw4Zjxlp{qTT$C3 zH;kc5(&gLcCHKn0t#WZRSaKkKiP!9jCN%$aWW}&dfdO&0a`_LIHk1EFyy6$Cfi?0! z*z0#&=RaY0Z~Ohnv!v$ve=8VtbtFJrO_T?}+q3>B)Hly{y1jD!`-V<*%!S0KXk*>_ z-|g-#egECve*f_-X$J;GYn0r6s-4d{Tu+e0FrzG?d<-dxugDlx^#!=fQftkCnjm8^ zW@4BhTT(uL1rnLPSi$ZRYwrn9oE_jew)=bdA(c3yH<;wEfj|A|!+XwdL2z5sbO}W=-*yYKkJPqJfw%NLVz$NZF!YQ{SY zL}t&LrS{tw<-GLOb?UtIhvdq6>Fa6gy!>Bh2R;%>MW*27&9QR)REl3Q){ajTB%K^i zieDXnrecv`=R2};R<3`_6TEr0`(MKT{?h#qy}j-J?^#kyu_M=#=B}nyoKeQtHtH1W zt#xmVd0X625P{nbx`7vTy>4(3?nm8T6zuy!(0ko|y>}4)B?zM+$PYYzkLzYE-i@xD zv5<6(N=Fcm@F02J>-O*<814<@(QY_8IOz4eqc|S+Uz6ZSYlV=+D+ElJOd$&#gl;3XyHq3-J zV-(ijb7NBTuA4@+_I)=C@cho3#q&MaZ~!#U+8=rUjj57JwnVXdBcA0Dc zVQyr3R8em|NM&qo0PMX7ToXzAIF4OoZ>VP*!GcIaLa4DIO+}>$h+?_8$!?OB&2HS? z08tc0?AUwndiLJ+?ES=k-r0Na?d<$N+fpDwK+k)>|DXHb$2aWGJoC)VGf$rx13}SR z5~0L|0U;rpAU#8L5J^i+kREgTPLEV7mHK*nv;QxZN^}0d)K})`B$NAk$$jK+Fqr?8NxUUe5sXl3 z5;f55t7x8DKon0cKusbLjZgr>VW5X_NQ+QHsn|{(ff_SXG(u8P(XDX#eo}&%OSWAk2tR-;)94z5czteZ1v4*1wO; zTlRDP|A+_GLs~>30)RxcC`FSdh547nvLYlb0ZU+lEodMplqQ&>kQ#;U5H$caq-FRC zOVa=tj2IS8V5r)p050L0NP>&nM6t{T!Y~t{42T-lm;i(Wh$JBsAT)r* zFxrS=v51;PXi6e-1^iE-H&QfEAq?j+vH&=C1OpTYv<_j{E@gn!2vdnfG!{;iXl6kY zl<~z!fr*EEr&bNJx(`%$Kj-mMg~Lq}CAxg#c6oV8nnCCOv}F0yFnC z8eo>Ghs1Yy%d^AzY16 z456BZDlaT0Kr90-0Zwp3BLYB=;!1NB1#l6|IBfMO{tZB(6uUqT2#FFfP$6_Og5b>f zQi$5f4jYLWAe00o(#MEUG{rFk2ql$V9R={we+>j>!qv6_m^Wqsaw!Kv4dK8*B1tS& zA|$gMff{0FkFfU8Y#=t?o6?|3_JGL-75EOGhdYnpg2myDiPoc zT0t`_Y&_-`Rggm;Ml_HSqd{n7EPx3{Q)7my{!Qlu>du6=vec z2sMPU@F5sRIdKCpmcIgsqEIc4z)UBYnG@O}RJ{>3OQQl*jAzJ@!8}e1PNQTXvtMnX zS^6dbvmPRADzR#W878hJGx}oY9f!R|N&twaA+?Tc34zo)8}7<2A|pi;dZmGYm5{Y~ zaY;l>kb>3oAdf`Eq~d<7c#7tjF2&Aah*=ae&5%YMKx(de6`~I>!<08M5`Y;PGnE?%n6kj+xowdhJ+}ZG^%MMi2#ydP~mtLV{jQDiegP67L;va zC0kg@!BpBVzCM`Pwy>Bj%uPRrQZ$1RzdUFiM6>Kzg#Z-SAtcJsuZAS_HmgMs;iv|o zXdV;3oV_LEf#69fN#HEg=H?C6Ba9}%@F>ctW|*YLhygvSUG@>_P5RGtZQfM%6izZ+YYF-2&m|X--IWl9E zcUZ+SB@IF9A!c=$75;ww?DrQNU=>Htj8G<-#Xu7V0wc60kcgPrwsNg?26m41tQ2Y~ z6A@-{+iK~Irp92Bd*zt*VNP4MqA-vIktnx>nL#BCMz-?=4v-{-G?{rNgNao&B`2Nq zkijyCf^ot$$aIEtcd(5ZSmTCOC}@aA80w`M`-ITsQp6~}+m_l&gJ2pFBuiB+8JS!h zNR?dd5x6l_OCe+uszxYJ1cwa-iqlk6z9nXD3-rjTrpz0DEwGgUq*fC~oaP9Hm5%_T zK>OIJNY8|zh>n70&5=K=PpC{5WkBEoJg?>klLxKjK`SkwbFYe=XymUQd#!K_!9t-1 z0_Mh%=`v8EIL!2;fZc?1BIvM_>`?Qq*zK|H#6cvp9!Wilvr2)6Ak_@*U?u=*n2ypq zloGfj-(hocrIv)$NKVb$W_6nt0#;Gu%~jr7w6BC%u@j{LN&%Ym6ZE(K67!Oms7VAy zn6-;hP1v@zBw;jw@KA0ICNly+46)pt8BxxDqktQu{-PvD%Z&Wwq$WfGoWR8-LC}Jt z+>~E+C`KqX6boGeTx3!?KbFZlgx0a6B#2(Ka2EJB10Q892{Z}C5G?`&JtrtQsFwG( ziiBCYOonK+4xxDd#ZR4}%khF5& z)f@G|npzDcLl~HeQ~PppcHU688bmK2zTYfZ9^t}6wjBvC9|mcWS$$#*ON?FDjP6V5 zt!yM-XBWIke0hb9@|CPZaF7h4wBSO?EoV5s2qPH6Df+DT%>2dZ&P-K{wk6{`?hrb| zRau_Q$S#4gb2=Fhnf=27SQMubG6}-$x)>muVO=zpqdh@40wFP>wgxL}`a-aYYfONx z0CFmqddjN2gx6whuOt7{k-tAZ%TWS?wy; zw}-$=2BZSODhZMTF>@zT6Z%~CG{ck)2F%2nj-0&D*&=I`QM^HCV`Q5(ft-eR3TIsR z^{_I!c>yNat3qqE$$;eOAFIjnItepl7BfgteJ%av%}m|`V&EtRIQH%20<_D!hVpEf zFOyfx)`p0AS_Q4kT6MANt1V(y9FglMj8nGeNfV2T(vLBm`b+w8WWZ2C|0V^;L+HCB zM1+3lfr*$YV;>z6qi$Fz5QPDZsEk@Ig9<;=7GD=lA()0=J)ul7h0TN}A*70+5Do&s z2rXqX)UC8U_OlWr*hcL7Wy~#VjJTR}m!Y(YGnRR|1uUwajldO@P77TKZ)L-h7!;g#LG>IZ+{Z3#*I+UVWg@eIOWz=X8YquT$RmoRo6;)|2&=^Z~JYXw8 zoUy?$9-1I6O)@jc5Ex@kV1!o5R_8#w*z#JI?{i|FQ@Q;&#YM^msel`lOffaIY6GxR`sgD9?~-6mTfD4-hk`Ap04|IW z6LasxoGs7UMG&pCKx05bDk2FH5a7XUPz<3=6piT3Oh{1g+<0@rLCZ=5MC%m5#gl2@ z6GlnCU1lgqM9U1D?G%K?1`8$&Lovyek}IYFh*7L&W*Nf7G7FIKpa@Q%VIUY;Il?22 zLV5|Lhx!sYluWVN0KDEK2nri7f!T&mG^!9K3h7y!(qcfy%7MzLPUKvhMvCK7lnyl* z7;3Uwg(zkc=*N}$p`_$Wu4aCLbqVSSC`7AAaGH+-b_wbz4&!VuW_)b{T9YH|uO|r{ z^d?lipCq@rX&@wF6AQ%;dG1pNT0SGQ68rg?KF%uUQ7VG8X%hl%xkoN;4 zW`@lW9w&qC0OAWPfQwuTq*CC{{EsDH$Vd}fw9`arro~Z)}bdKq0&6*%1tz%>lAH#Grhd3Ekfpv&;Lcjp&l{U!` zmb?!X*AUibfG6us_wr(j9s zVSZW#<%4sNu>l6AEW@sJDu`7H%t*5n%lR<*lrg@sj(kvtqrPMvs@9q35l0xor9-Ty znx#1u=i-8Pya|>_u4QVuNwIRCT0T)Owpj2T<0C^1^H3qI=RcW5X&S+qiHC5Tpbs}N zj1HSSq{Sgb5;US_tTpo>Fvze3&o|HZN{Bae)LlTD&ao_Wz_)f9X;7`Tc4}=LvV)p) zo~^(eP_7F(DA0u!!Zbv)PJac2VSKX;k>^@E0&@|-va3aDBr%SZ6`r0NC=n5}pTv*> zmGHhT36-Rl*oM7DqzY0i`1BS;jcOR>R&bui1P6s!916k;wDFB_zZ8IBG9rL7XeyQ5z75wJG;?zzm!?J{L?F=NvFN79(MchHF@9NFg-mnqomQ zMn41013(-c)Y^~ENlsQlHDVY?pu7<&;JP^s$A**G0IVyBDL`7Bg`Pb*7-9nf zi*W)*JY9tto*2SmF@)2o*c|AygzNkDXAp`bFofjYF5eR;MrzP(`?R?QPm$#}r2yS~ zG6+NxFd{^N3xxo~6~$U33L~CO@5K~?aY<;NBIfT{_D_OjJN#d#c;@-!&i@rim-38^ ziVKUC&?)pcw(+(6e=lFJ9QnV#azCG+`M*Eni6cM~LULhkh|ZnZ4s?(?x5tCAvlNWb zVpfEj#(-FCj$yUz*AGZy5DbARL=q{y1^xR2EkbjKoEQTxkij6aT~7=Irn1@Z=8_8u z3F|2U641ZD%m4Or`ak#Nwf>bl1T!EcC7})9v=!hh>)%VBYyJCq`TflQ{~=GmexB|i z3DqlDjYorG2yHSTE%nT>sC9?}xO?{RFJelG!cq*Zos~nEQnUNTM0`0h?^}1gip7?4 zY^JA#+i=FP3?PZ;HlHvhql8SNR7Ul})xcNI{zUb$MvVqd0WM+-Fa(F$-yEhPoYDrF z+u)Ka`WPXM(I#O=muLGR5p_X0=xlkKX@ltx1yqO{V$4o1=w@-GvOU+J2*#&xuqvC7 zn1fQljkkIEr~;!HQU*lbR3Zx30Fq;m91z~hrMLm))k>PPCWZ79hyoCcJdbTY23b%+ zEn|af%;PG2G>@%N#$kbc4L(L$AT?w=qG*}_`L)8+JSH3z8+l^E)#h!15j2n}PHTWm zBTC$ea>)UWYqua2=jeA1O3az!l<)bpHH#vxX4X8yZYZwCjPO^;LDJMb{I)RJ*#LQ= zZ$+UD2jKUtG1Ms}Ntqb|r&Z^xXKZ;Fhx1}hHr#K1XCg>tLvDi6JmVq(^87hG%XPAd>~q_v)V~*4%AR0F@OwcTaqvu*dY^hAru?uk=Uv@U{eA3 z*W7%vKe^m3tDkBD;~f|5OD!8i60?DG_J@H%iT%ax31ok=cslL?8^X*N!hEpP3fF=m zKobB%XfqyaLD=dL2*wb~oZ*r`Y=jJIyQAkSq^nxxlG^Vml0qXm#pc`D?J%M?k|=Fr zlfF@#94^*s<_nrzvBkznA&lf_WZCx-W%=a)-?Sb4EBxQrTb5)0x0h7*^ZbV&^ZYpe z&$Q%NPqnA!^1S({q<}DtV*<=CB3a0E# zG(;P@*FsA+PWK-rY-mE@?*z$Is@kP0uY z;#UqkDRd$i?wr6tAUMaid^R#^-mjETL22HNWJg@G4dqlA_z*EfA^^fD!fdBSn~y^I zAJivk0))hL4mIyovDw4Ih2`@r$yosTL0iDkde*p^EqT1pZMfmnW@Su8^5Q`ce`IYlEl&84-eF$mS? zz|GFMaA;bvv%<+5B})_*CUAts5H{hE9#sPaNhG1nvO{rg;ZXhB2=bf&xB>#lO#D`N z3hm41Gnw=FTrD~(w_k|cgoVQZCICrb7{@%=voF4RxO1tt4hJ_M%Yy9=k~0Sq#jX3n z@<0@4$efKwbG-&k#r&h}iKHNE#|kjNJUKm?+fa#e@$;NXCRPUve7o}PVEF9wToy+* z$Zwf5o*k+QoF)m3vfbN~4~!v$!HzaB?DeDy$ZK*;y9H|@Ws+k%wh51Ruu}2;&nYZO z0%w4H14=o^REyA7yVdNd9PAEpZns@NIiqFon|+DK`wgQ*gDDB<%&l-8WH2B&#jzK* z6P|pxa<$3rskKYwmJR>he*b(Al(iCKn?XXElda7Cvd!G*Hxd?TE%2^l2i(tkS@Jkp z?{UqQm@l@sk2M!M4z>K;STd~%jeQ+OE?vuP*8cz0n*P7{6x9?}2nociB+^5p-5*Wnuv1N|ER_wkeE%>VKE>HqyP51)8}^fAi- z>!T*EPnygr5G-t8C}N-6=fzX4vJ?IvXY$;zG1w6akRHo18~o9oAQhlEjNmlz;u)gd z((Gw~`7CRd+LXTv!s!wDEN;EWKc7{E-KhC)!D<6xKI^c1r}GcrYO8U6M~UAJmaDk< ze@uqu$^S|CyPO04)%&inozjoQt8$PWUXc(WQN zVT^2#{I{=4<|t!UCH<{oS~X$2IKL+u6^4-PcbX$}hyCS0=k98*&I${Q{hiCP^bM{k zmQXCmR`YMfmep^anQk>g!qFm33BE(@D~WK{2OwzuN_1z3bXeeXGw77@IZU$hmjk z(u|#L^;S#5Va1=e2mj=y|2vj?1#9Cg`oC9h{ol*W=coPuL!Pfm1L2~#5NjF;i3!c+ z4tBB0G4)K6x$t&`T?jv2#f3LSY|oV`pwAd^XIVL}mcx1M%~eXYxuyj;zJs7(vse(X zG>?h-bRza>`@E#Hs1IXMs>>whTcNY64S$xR7Y}4b5s{(bmOmU5ABn%Y6In{nteM^Z9O1KKMVM zEL?ao@U{4_uUwWh{^RTY^Zc(L^4P|Ia~up}KNB>km=KZxf986AyRiA;IgYv4zl0eo zRErZN@}1iF+WPmA<<$RtWYVAhpC9tDr#R3E$=#B`O*^-hOl3qd_O>(wq)vpi2z&8% zoDO9#e`S>zKI?~(URlBp0HGUxiT8{`E?TBv!0ydB~1fP``$MUWp+pNmLEb}|uM z1QD+zh(s3!P?NBj6}A{=w#N8N1asnm3tPp6y%Y9*)@+0wrRWZh%XH-SQ37DVj3k6P z8U-$bjpAA(29XX5ab}H69dA)m12g#eAh`d$zA{xLSRNcZneWw0mAt^>;+4d zyX1&{!CfkD;7%v!?=EI<-mtp=lD#CB+sX{si!!6xdsHO0AtCF_HLTj^tzI!3)XxVF zY&o_`n_JGTFU__hfIRTAqM_W^YFi=5D{ErQHkPlU82Qy3OY@DHqw7np@QXlkak$k5 z?E-epWNQ&JH+m8L-GY`Ihs>LrSz&7DDu6tY)uXu5`hIu;tsonU;`giDz!A8CM7T;L z4%H(R4e1T6!^1Q!o6Vg7aOaM&2WSJ~#6i~fU?cn}%tt@iV1xm|gQCOZ z<*{lVqBrL%ap-Y3ABlu$K6mG$!Ya*xS@BkXS{S0$%iIeS@OL)SC}Ndw!*UO+T!dW2 z#61XZ)LR#0nsCf=IL>Hh(}e?{GdgF%xay%4%WAM+%X(c}Rpb_>0*GY*xHA8? z1TaboR|wh^{FhrG5z2(Cg-e6;+j9~zKqy(zS`?D=qE#hL@Ryh7dz$z=^KGtgaxgTx zD3y7vMWlVY&9e{51es`+1ZG?QD{~>U`M^hA^NqNpWd?vbmz2GBRk#8tcd5Mk#XRio zE`_Y0GaUTQT@u7JOz0sr7YF!iUxey8^B?nt7sxlA)uH zAtXv8@K>oBB1y<(=PODh`oc^#f1@fhvU(y3F<r9fj3~|LWqoZO{M!WVC{?AP zy)wWmo9kT*Tt5fpD`~<&V1(Ah-icQjXb7H!k_3BYjnbSJUnoeNH=6~-9h_5ODE72Y z;SmEDf`kzgfmy0Bxi~6ifJg|#5G;2A21y~AQ>n%viYnwJ68!yQg=5b4Y&Od&f!_sO zp}Un65Zg`u+?H=el14~9ibFJ_)RK@Iai9u*6_nNTQ`+t`DF_e)iV;c;#rbj^*0!&m z3Dqqp0u`L&TT++*JEq2+Cj~1l=VlaS7#-!%<+~?&sdD_S1WL8RXkXj^YUh9u08oG? zzq<8+o9R<~z|GuxNP58iWFGSVO#_=YY?5cYyutB5GqzBxrX`mT_W=#Y6 zSKY&+Fm3+#vth|9wbyci7Z$TvS+S)|hETfD+8L`j zBN)P}&EEux73LfW*8Q9W6pU@*FgfUki?RihL_H9w!*5zDd~^u=y4^)>+~9suY-# z9No%a(9Y#~$9{ozhFGVHecUfwE|CVUbo#BVqo&T z(*E1w6>`k;xRw6fK^2k)^K;+*cYsNK`!os{-+!|zl-k3+shcOK0`;Jp<#^S!pLhQp zdJw<2#9o_)ur5?at&-n7QfOfzoGQ=HQo-O@?RxnxUCQff|H5`K$F2R}0>*AvzYZby z9sC7WRBbNAFSwfP;DURHfWEgo)$Wb_vIPvH(1~Wnyh*9u4N1~j?axI0gvojm` z|87-Z%u(i|q0&o7_*P7$6bKK;RUr!Vo@pmrwW%>1a3M zlNlokqg92&RxMg5K<3>j4?nh3Me*j!-gcrq>L~Sryrxmk;x=^@qGX7i9;h=oKjAC;>w{TbC*E$qpBuNCP z0cFC~ywqnV*FvV|U_h=ev4tf<=lK+ObHo1!bDNn-;{W{4Da|=pAoy4Fn61wPbTC5p zM~Q=Lk4ad6H}lA^bkMPh#9!SR%MwBiggZ=RwdV=IjTi>0Fhl`p(um~v=)gtr73Yez zuASOlCJyM?15TCofP28HUfrc)Z{|-gpYBqzPtT@3BtN#0Ctt{mc`NTO6?^r}sp+dR zJCUb6ELZyz&&{K?^!WNj0*m!taJ%greAeIWi2p9mX-clt5TqJmqdiKBHWk9RM9G!O z5LUQ&5iRijZ!|EwOT_^_n>7`?Nm*h{m35cOyn0&x?k@A{$(CX0Pu8<(_c*$z8}o~M z$NcdI)-Q2o&>)W$ws$2JFVtZh$px85M0dNIzIs}l2 z7D+L1n`#m68bd}{ikU$HT!<04#k2_la=APJ#Bw*W0w0NQQ$Xt=nq`ux21O_Uu>;wc zz!3mZ0Kx&G(NG9&*}Y2>pmSV^T};AKAiaU%MY4bZU$M+fEDMN}`6}cA3LihQ)L$W$ z+N~Bz&wWrMU00SLD zvNK@5=axujG%27rQZ##=Dx$_95`h7Vv(wFn1wbNV5|mK}6!W*lwx4VOFuV|(acqtW zvtQToR-6bndTxEzSwSJhaDnr00THXy#}{xnP|oajX3k*5Kq6|0MsgC^zaga^IL_^o zUT)wF`XLT;^Z)no#jI&8)Qm2!R-Oua|~ZD zUD^KVePw@?t&lKq7n6UdPz19eVAX#Yr!r`jIu-Qef_)T5e{*%QCPB$o8cFAE%o* z+E$Sd`DY(mbgtvL^Iuu8rN5I=mz2ZP!8en~FeT;oOnen>?#XlSOMpb~aK}r4#5<2~ zK6!leb@)vtb^J-@_>-5NT{jR3=@B~5M3jG%m6CRp!F-UWJq@ogyBzYOF2R>+=Mx)^ z&G{kE6P$6o_NOAYN zTb)lG65ZKMTtEj=K#yV=N-;Jat;=oC3Q&2m0T@C?8LlG%tc4G$X(NPjmsn5$A`$M= z8qNSz6R=~5*7%VK8-)T$ih)EZ20Tb6j2O&Vlscmp0T2!Y9YK<8pa{b4+0YzRoS+%L zXaYI}GiZz$gNDUN4G@wfNT5e33eqA}bI!Uaa31U2t3v=w;FyWUly_S2#;g`iLU3Nc zEci_6Xxcz2JUwlri7=sNud-Dm23ok&mQQ5jPw^A03BAEcBVsd?5Swj5Pggwx;V7;Z z6B@Cj^;Eb75CLt$FM@Ehp3^zNdIFefP$8f%LK4gxQ6Ye#_1xNs0F%~1oWDR#kX%fV zJ<5&eTK0#Q;|brix)TT^P(!#tcS5)bq5uR87=+@2_QQ07eajvJCuSX@4xtKlj1U|tppf1Ls{nZc{0M~&t8=+6`S#S=-0WwsPJzQF4kj!wHBAf>5qqmvYCAXM5J;^91g%5J+zw=i@vK7n)5Sn#GC&kXV)kfT41yD) zgJNT2I>y@liW7O)j*QB?K7S6e%I?RIn6RL@u+U%!E$3M`GAhs7TwRi|OVdKjF;N}E z@@(5-eW7)S5cm`6?0yT6Y!?<29_OItylO^9tg_VcbC8D+nIUT5YO zTxH>s-L0QIbrrPlci4(vc!j*c%6M+$kD>_@(jsiAv*0O^Mpblv^t69_|4;7yUnmYE zDc`>7%j)^c{V(29dCvP^y#2i8KlgwAh$qLzPFA~+b5RDjCn-k-!0q5NzmiG3C0u3? zFYy$>#Y^fX6-&LuUeY+3pTf&a;VqZ=`gwZ?$bIDg&7?Agl#f{3+ge1KPe$h zv+^nH4M==WHU&r{j$76oid(rIxn@u;LeUB!ll%L~Ah}wN$o&wNzs4U@!*V|#janv! z<*>iXTdkJM{Cw0>4FV$p-rh1;6#xbJdBMJrm&(_h>!yy-BMP&dLW|Nmqe`MC^q%Y{ z1(@)(w~vjrDUt~iw(JJuOYwi&lobOtMi^l+&SvHc6}W^c{x8;OWq-4|ZPtl^Vu0u& zl)H)o=X@cpdE6vsw%i4#7E`ht?`k{eGqR% z1xcmeGQ?jB`T0s=Umsr|xu4w2=O+pNOC%__PlEFNlAu>UNl@nFFY^!hFG&!VN&RF} zwVzre3($BYUcSDFpP$rQ;{(f(0DoAmQTxhN2qF)V1^9S*$-R8lav!yqmm2bey)<4b z&Ht}S&|EE1Ya$4LFS%E1E%Soun@y!eRG&g<`OSy_CEqN9{hhTcA$1PE+_Ld3nTkKvi zweK2LzY`sXJwZD6p6g-idG+0?_K`6kH*a3Dr`EVo?_wRD$|2oOe|ftK?UJN6o~-}ib~x7^dRgC5Pf+;z$B+LyPjx|8*JVhfL| zfs{UDg6p)3#ik&4=I+0W6O|5J4($d{`E_;*KB(fH?%>R<1bpDpnlX+4o-}j8xxdqP z4RJd<`p(6HS&OTYqTrHFfp^ub8*RBj8@zju){c0=-z0;?QSF^iy%F0$ut`=0IW8*5- z7jbx2r3!<*GJ%k=uY+MO~l z8nvF%ozeYl*`z($8Qy-*hw3znigcRgKQgG@T6tvEQ?SR)uHmo38+Fg>Q2AWd<@Hw_ zdF|TBbxy*E8zqA0{^Iw!?$~ML&PsOet5<7SXqTgsM;iAEN!_-(K*{qSl?dAI|7OwW z@t!?K{Nr&aXxy@Q(e061RrY0Ge<@P#Lez7%?oMxabi}j)aNlE3N-NsFzSn$8K0a$!;?+_H7w>&X{eDH4_hc)#@|2PF(NiYYz53Rd-)SFxfT2X`HCm zl+JBVJ*ZH3^RjWtPRClUDn6r+IIf%;_w=9IPPu>KtRqWIhcZf@NISl5-JugPjp_Rj zGp~bPWik?u{8q;4+9ap9B`X9UXb8_Zxou;mKBHe1cNx0yx5jf?wmz|KOxz67OcJfT zqfhPaYvvQK8{HSx8#_-9jkqy;&)Bv@=VcBz9oRT?Z?BmlM4gUZ8m^=UH5fYoWcAOR zj;$J3T63twey5IyBn<~;Z5+8(hCCZD>9pEss&9~Hcl3uktBT!!d^UB3#@YXoF7tBF z3K+gT;Gc;NHl*v6*<-Hkt1pWXo!Af>ieGeA$C3kU|8cWl*K_r5RGac5<#ciUWoDvB z$fW&ON1K{fKU}s_%wH*W1~%(DIR0W}y;sNDQ1=g~?t6J)-;0m(;njbMXm_!`a#Ciw z8uHbNud6RUU88lU#}a<1}d~RP|Sn z%&hR)LvOW6+P(gO4LPyn!LUR94`nt8nc3Q> z?65e0TDre+;O8my%pp7C{yK3wwC1Rq?<;SSYr^+s29yrC)$RJYjvL$8E;r7rY2(XX z$ByvqLIyQ(TG=k(p==h^c&uS$)XU;AvNyj}Y2@rW+}~^U>>e|8hiMUF=c7OK9A(~U*0upq39&5Gxbwx&k^zW>v z`mlWFLG|J7zr}1E-f14<-sxv?tj^EFWtPf z+nriF1Cl)Y3|g@Cc)P8j`fM3?z4FB>!OhP+?9un*H2%^pnKs<$32X^xVGW4hc!3O7S%7IkJm2jqN%a@ zTKu%tZC*SLJ-BpdxoN!;+cpUdIJ>QAbkc&Cw@-C9ICrdxm{8NxfSAyPU*{F6mc6xv zo02HLf6$ikyK1F2TeP#!;}^gD?rdzQOz(E~V&lhG%hV|nc=pug2wnO-)BH0t?i@Oj z*vu{a^wj!3lO7(-?69@hKIbt#yASYR<@_q6d%ce%BGTrY(mOX3D_&3PUbb!t*O4{O zEE-bxI=yOG$F!A2nk~B1H*3P=F%>ts6(C>EWpmeUi@v?C(a3|+ zHrHA%PhGZUy^~9It-h$igDJ?-eF^s#HS8{)kf7f==xMohH^2DB^Vd}V6Kt0hp|jrK zJUe{JhkHx!rS82<#CP3vRpxeawfwI<%k1vmxl2a- z;dLvd1r%F+=txc_SnGX*S@3WjlM@g!$ zj;&tF{caRr-j+*=5+#lQ zy8HQ?9jgZ{jYvD3T(9sEITx*@*)F=m!(*|>wt#vMpEG?-9agJtb{J|k<^dBypT-RhH>aqZhQX+M49 z1!V0>+59f0s<(@qR_);G2N~_lc3!(;tG|fM(uor@uQYYpy?DyUze%DKdC&3_<&!4!|Dl9wcV_faPFFRs4O6t8Ub~`Q>jQ|-O`0O_x(Qj^oh93 zlilxro~>Fu?>KxJIWrR9HYO1FE@#FdnDdO9C!W@^yX6L^|UWF zDyOVb{kl@#zj9Hx*r1vha3iP^kb(To3xvY`Z z3k;j9$+-B-koIuZ`$@;o&#o`=oS%HGYF4>tyF9P%J72xyHe+P0-=GS|*Z&&ZutD*a zJq~3l#0~r2z7tY$^=d`;5%ijq##e!}UNpRQ@imsTbk!ceKeDgv`twuT?;k(?o%AK} z0A>0+ttd5q%#oq9hG$#~JCxRLyvOOxvDd@1AG+^}*}EdDR=~6lf1;uH!ip+aKe~16 zk+#nk_qMf<^?kah%*b;0KR@=Jt3F!s;FPh~PEVU$xy7Z0b6r{{D$fr%#Z+%j7_Z^R*k1g6X8T#Q=*#S2yC?;?DtK{3md#1H5_gTMXWPFLBW%sJ? zt-5>NpvTu#rG`kKA8Gz^0OAy(pZMZ_AI}tdktYWh7Q1<+->uU3%NrkEzQ68Tog?L> z!B^Egii|J2@b?9yr%j^8t)^@gA6qG2Tlq!R*q{$(jqvOJcW-syzhTjc!Dmi2slIyj zmMv|hXM-=6p}+Lrp5`;Vk#6;sONPtauXUK!b>&Rivjh4Tb0XTl>aT}8hyts4wSW1p zWYYbXlB3r$!WR~~Fkg8qdXm$Ce_Y8XeU@oWT}IZKIJ$3}JIxahFV6CPx+QRW@!e|Jzv+ebk}8FQeWf!52}`?ZMfLabCPk+)`f#Ss;=2|Ozsjqb6zhucjULfAQ&Mn=Mb0YYa`7o)4(JW%i$YN<>XK@!{5%{i~mh zZt^tduy5n3aWZAl!#3H=F3cPhqMf+8d)F;ky+$1ub$CAYl*h~u3rgbqu8bcxWgEQy zxz?EZE>rXQhNt$eW(CiU^*L-nl+~ueXKIQIQ(iQ8mqH~vx8A*S2J<|2a zgA3Ra%^R`L;L#V3#g04K|9$YLtHJc+=>uvtT6KF(Z*2LSlnoo-+?4#$b>iTuQ*Sp; z4f%E7(6_tq?eL_E;cK&pef)TG>f`Jid%yhoC2icjUms_0evp0U#qmFjZLL_bb)!#{ zqT1jW9wa6Q>z){jJL%g#9&^BN+oca9@ZrU-UY=0t@xZu^L*{S3FnRN`lx;!IgwX|d zX*hOp-StZ^o3_PV@SJ`6*O4*sRO7bj8;AFEy^7tLr&UHA7Li(73)Kc{8C z-_Yn|8u1QmmR`stXTua}qIxlu! ztJsG-<>~DX_bfW0;h>}b`h>R^ih4}!SaITx_u_?FtK zEgSdP_4~RzhfCc&d~?RBg^Kr6yHdz>=c%iE{L@b1u??7dH4e#q{Xvi5v-#TVXTgl4(!YM+1)<+pt(KkArgAGGX<2Ngfe ztlaZ&%}7`0jPPTacfalnANB9_wu2(!*WtILSB!44xBH*YPFqHSfob=BD@|P5>c}|* zQU7$j^j(K%3lA^;XF{#W!#kRH2wIk>pAGp8M?@ zOP8y<@VAHU4gW0Ze&WlAtOLIc8L7V9bzk!EU*C+Rt{9Bxl`qz9ji|MHb(>Wur$jtB z6|!*r#EZsbtA07(^SJT$)>+r|zdL^p{P6rs;MJWKuGO8B@vP#tUGBRF@0j)QZROoY zYLo8y?frirzOZyn*1n2O)~>MiEpUD0u{Y0}6**T? zvidjW$DM7~p1*VGC3LWdonYoUYN_rOq=KPfS zzHfUjesxHHeeIX^*DtNO@}}9ZcZM|Gl7PM}J#|a+sm_}&G=1^rpHZ6!UVQ2P$BC)y zQ_tTmk-VkHrI*sbhLo9HVQHt$^;-8^ydreofKsQ7HCOid_%d3R3|$^Ns#l4>k0s*x zx~-k(x2#T%4AHGAwGQueqL^3Z4$c>%zhwUI+4g3xg zZ?dody*EBP@W|QA*;{C-;Z5Woui&j4%V0Y$JbSwR^WNlHo9?Am3~#g~1|1NkzwDh| zacHSkpOk~2yw5IWx_`0b@l88!oGZU$$KjM&qcYy&8V`jwHgd$4R$0aGOglPhbICt4 zo0RETb^E;2ZR8D>MqSx-Q~ofO~iBwXk5z}vBDi9MIR4^6!(xmNAd@muv$#~m#? zY0-?~RbV=M`R!fvl2e`MBrG8^>u+AAC9mn9eRA)s8%f#OgIiqsl$M?O zKKsu$$ioLttB!3iGxt@XLe|T)=Ejy8&TE_R{Aba?dbj8LHUd|M_VX-Lq`T<1&0<4csD==CqY$n`+{*q+`qL#O*!`n~tv*4bm8p4f5k^&mX!a^lN9F?3q( zWyN-FUfn0;jb>)7LHO@^d_aljes`)p?s>7}+_JTlr#&xr>3`m|HFWU)y;m=^lXqRV zKtExx^?zxjd!4(aURjhb*>t@A+?eMFuOGa=qG7c<6-pi-_^iq7>jvpxjjLW3 z{`)ksVO4tj^?I>oR);tIYs|%~4Mz1U@wnODN*;|Leb#*GKJ}kx-P?WK;`Lzejv>`e zuYO(sWfS?O^So!%od31lnPnU}9Ue={%>)NJnV+VIzT*YsATJp-f z&a<1|8meqpZe+{-i(;d$PI>`}CIp-c>okLUJ+e}g3_9t5c+=!#CF}+vmC6?Rr2qb=Al2n`<@Q!u@yh?cc2~-K#e@yP}g*cJ^KQ(_u~v z{_9}a|ASBd;=l5LG5+iABlFE2|MmVE|NSA)x5R%-rB)03$88=7N|zqKvBCU|aXW64 z;h?R&VUePX#%28S{I4CecXVIBrQhSHjRLBa?x|?^40K-9q11Ru#Eju?yZo9KO)nk% zW%ukWyDlX*D0AeW{<{NL1nzpAwbLYjb@xO+;_6QA;)JXNt>eSHHyGT%gET89oR7V%8?hfcXb&SrrS4a(6|djS`QdI`i^u-*CWlg z+_}0fdwi)4Rc3?zRlFCMsMWiKW|QJe+zRjIl}c1dC>GK+;I(`BBIj{#F=eIFO1D5+ zsi>55=Dv>OCbwTq>nfI8H2MzMU-VF$PQ{U;#TPrxn;lWHmWUkJc^X>wTv7Sqgao}| z?TG7t`0hH^x_kP^UJcvapx2ffj|`y^g!nJZZiC_`YUC zo=K2O_9y~t+9a!yfuSGFp=h1`u&K%)6 z>{TG$Q14ZFQh3=<{)b9_uIAKQbarC&Fm;_Jhr9l^zg)>9>&Fx=I_b}G>E+tE_-|}b z*GsW4efm7-=i&`@)LjBDED?ztRgWyCE?KlLO%2}H7Rea%w;Gy&Sh)Zvom%MVem;Y5X_}$d$-V?Jn)W2S%@#yr4OZ#b67_?peI<9!#Kcebaj9=LV>*(CBkD^&bm6AK0PnD}s zYxtv@!*3+5s~IwJOrI*HN2R$AxDC_|+%g9%st&F-?@^r%r}ngtykDl2IKIlQdCuof zZeBO;hVpIafb9#tq?LxHwr#h$m2xuCPKyjLm!8ph`0-(WRd1Ek40qkSvby}awAiCj zug}am^nA)Em9%w9aov^+x($Ey;Z(JAMSHACPndSFeei&?WLA2Ks-=e=GR*#U`Llqf zn)LQxMac|#JvJT45n+Mhy8-MM?KKGz`ua+&peLQ82=bL)#X23_t zPVq(8xi_1?;Y_)xAf_1=?z z7l-y7nKG@xm7&ubG~IdX!RgvPA=BipJ)6e;lX19r=-VT)8^`Pk`*JIxJt1H8>d|pk z7g6A3-=VL&Rf*X;xv^VT6Swh(*-t)5js}Z|kBlf&e(dv?vzx@$E@OIIGB~(3$R6vT zIpL3Kp|_5nYS8#a_o0;36+E9u%Td<0VT!+h<>w-xEf4by`<#jk5ATm4B~Pt<>~#ndgd@fAryCr?ypHQY)A2 zq?Lw*Y}dA&x<>M(%>86|{N+bohn6;u=!6#8z4%7oIgQr)HiKbho%Nds?LF;0Bs5KY z-Lx&ZYl+lh`i9dRdj0LME_$MoY}}A)!+xnYrQMfNVS6$)Wdr4qq*7H zxuI{YcZFC`rhJ_fApB7KTb-(fQNJaxe7$_v)`XX7vf?Ky&1yO0Kv&-)Z%&?!+IwF% zxb20SrOv;so^S-NRI(-_J>@&{m7B+dvXe$PtX?PL<$aOcqBkk8&U`*zKkB{m;_kn$ zXB;^9SGDR>5AIUc_B@*%;u?JAc6RB}ZC=HHNvj&=eqvNc#_0~l) zNfa zd2Jc>L8a&n1?Z?e$G5@vhXs0Qz-%l8ENy`@d!^8GBLPXA1sDSXl0YF2I2v$ff&~G3 zI~tXkgcQCkL74z+g@y(yMVt{Z0(%sIh&neb4509i;6Nrpl0XlEjpgxV0fvwlu`R$L z^QBz}nlL)XRr=@{Td9oC0tFg;l669=gF2#ZR`B_i;prF?0gB6h;>6YEZJ`mgG6cmL zfnQx#S(o*5gC?|F7>e`t+l>-Mu@PW`+*t7Wbd@&~WHaj+;Q)d%kT(1!_#PBy}C@nizzAk~GJF8mt$0Y9Bz zp(gA<&2j&`p8WQI(R{+tBz&dgfS20;H5%9bKQu(GiFmvJzlkS1TfRT=`majRXL*Jo zKO97#FdJv48KMu;X;tZskpQ2e)YUjcw<=T-m!fOj1N0WX*mfuNWnX;HD}V8n)AT3KXPj@q6$2u5aHskb9wj(sIX zO2-JU)e=Oe%-BfbSP`=ltr^;8MT4M3b`e*biOja%Vx#-j~*EK`FUv%b;E*L-5TJb76^c3F!hZfvi8Y;8fcP)22MCsadv5O%m|^z3>Pt>!kq>O9;O&Nd5-cwvTIT=70=(&rrd+?c(z#EWuzg2D;P^lvsJ z6CB-|;TxBgM|MKtOA#dl4@C9ix^YK|p{ZW9p5@oFaKOV#hgd*3#CjECl(Ew6Y@})Y zuP7P?l>bGdDViGa68T>p7Ut&v7pe|x6#7>Fe-qDZk^enK|CR#1yww0+hXz2CFc}&} z9|y$i@|g|=0G^82D-J$u)Axb~4yhpm@D!J#3V%S+=m^vdCFYOz8X)owB=NFf|KzsquAk^}oi;i}K{l|0Qke6`es} z#{V0s!`#n*!Zl%U`Tv`EM8iAm?brk4jxxZ{;V9$hz9Bf-zQdQsE4R;&r^>9;|IeAc zG;CHH*NI$9CvrXAD{(r$!*o~7Lb^?`=yVHm072o9;y`GArIFK60n5rx%L-OOa;Pd` zak=AT0jo$Z%>`~j=J{8^I>;5e;P7S6iv=Ae(M8RpxbjstseJiAgTI!0059SHA)z4= z?*2dF>bK|rZ{%^6{|yW-6aRVAVlc`CK^-f`nS($by`5jx+Gar2iXb7YYE9C5;K$QR za3G=-Kaqm#NChA#R7SeGQYk0bSESsx&+B;dTmLw;lC<3dsaL!X__FiAun-UbpGIMC z&;Q=YBbVqftCe+X^D&OucaiJ<-%4mj6KPp1G{~H-SuIeb`y!&RGQz}yaRzgH*|@&0 zku6Fm9oJU`)MPV{;K`h-#aD&XAb|KF_jJ}~5tU3A@iqXtf`B@zR1p_dON@m{{4V0> zrj+_M04k6JSThzH9>Lly&L$jwKd+-vug9bXK|t8vWWywlk*kBcce&54j6?2gEQcv; z!gX=JPBJPAA$jk#K;}OCD}MIusz+y~g@t~5C_k@Y7(#^NybmY7Q94(@+%riwLgGCj zKfZ+6A@oWmU3LI{`v!aYC|z{`efw%XK697}IX$~Hk|eug;ZehR|3Z!&q%vM@y-NL$ zPE2SO*IE}Jm846EijQ@C2LRn0F1*I?YpqHRCMBk%woZ;s(IqCuCP$?vChG(gIwnV@ z#w8}`5?i*7jZS@yc*xh2%Q2EC!8$%V3058AllmQVmm(a>I}5sh@z$0{?Eo@%Cny7P}`w zN0AKj-SU0kY4!lhIqnG;-0&?{b-i?7NK3c~1IuJ}G@~;atiAzw+Ql6R$nSUOhk;j1 zxd;Up!h;mhWG)SxAey#VXbNes9+}`LB>IMpIJqqip{ht0#$k}T@k3_ zELKHeKZQT&gfRqdV?kU@EUQpi8M+rVa0(THLo8S*$I!k0cXoZ}$JR!S255%a~T2TgO7}cJYyir5EO+qr6j?=9K{4N@P)f$ zvUA7^jF~UHhYY`Xf>OpwK_+F`@sn+Wj3;{W1VvbE7C_l7ddTpII$~6!&D0UUR~(l7 zk#&i2DMKsDg`&g@gf4I-ZXjq71Wlz97RYj##fo%mny=^Zw8l}aF}=%$9RP$&swm_Z z02wmf1D)_lAw30Sloe7@NpYPtDF!pNIL@jC^l0R|u8>Z1dfNyF;#%1;Q@U^^N&3^t z5M?ht7bXB!h9^djfU@&e#64tZQ69u3JdmVjLX7fOT^7c*;8h(_UJHt=YaCdwwdCtkR#8}# zTA)+|;LrcJ8NdmaM;%wV75tZ5aE(P<(4M5D4^o)kqRfTb<%5m&3CAnqzLpsPJ_0v-l*+GP{Nonhu~7Bhj{{!0 zOK#zgy&#isU)-W0`>gI2FHm4QDKH+gh|7iU3X-tJsLLMCR&TybKEg!%#K+qzmcj9e zCjBaaJY!B56`CV7b)f+el##($jHr%^1UAb}wHE z4qA#b)nY_v6fv95fR5m?h3x@F;}8YbCMd|VFIxxyHUaxOsw`};+(VTYx_-U^%!>jI zQW*q8Qz)*v!!5E%kURsV7Z!>)sZ)BCz|SSzx}UF7R*b<&5|W+;cqAof9-V>2Shk3h z=o;p|DAnC{HfL0u*8;A{-Es$nx+&hjeN`lV1ivpVD6e7i3k?JX%r~-vEyrhV@6wr< zc>9Is_)gcg{~S~E%nVSNVf5whP~e{AC(3!obuTgp#J9p-SLK&obGtTfbOd9 zc(yu7)3-s326gJ^+pZM*{mr)Q5|sXxn+|~8@rDgrI1p&qpoRD`3RAi>0VKjQ5N!g@)!MuS?-+$$OHQ!oZQcuz9OeXi9QUzN89R zq%01F>YNWaIxEJR3lGehW2>-L(`|aFBQOgR_6l8_q)oDPGKH<3$#=d;wcVWyy;Upp z)aI9*rw9X+Z(^QThgZZg&nLsrtAp~)1@szVvade+Qf{GFmSA$a2Q~-@79^MMY(uwK z4YI@fx`{8il-|y>%_4}!@(g)J=2`n%x+LO+;TD1t)J-ETSd@!yALMb= zA=5Mu=as;C?dr=RSTT#v=b@M@2w(E0$dHSw%ZtjHV-$qmyg56dm7zIk;AI;}FWzA5 zhyt86Rp{{(>RSl``2x=5zb!60=uu~53MZF)y0jM<|9mAODO;XTTznZ&d?nQ@G$6O! zdxfem`RBiuu@Hcv*(u7dCsu#c_6}LIZ`Rx)He58(O04|fVJdM&=09pFk)gxuxsWLo zqY#~C6z;`5mcq-~nG5{?w0Gj$=ih#O`2Wh%1-yC$u>AdhHR`Z1kNA)3xA%YF%=7R0 z|H?_|f5Pe4i72-tV>Bws>s0(i@#4HN0tl!1=lP?As$XaQN zCG;c|eRoBN&*=7E9X|OnQt`rR94>27?f{v8qays24((-`#XVba4u;BE%n{*3@-Kdg zb8CXbsg?Pb!w^BF zf#6^DW0uE*@-af*M@fL|jY-JAJ9y+vI_O)|#7L6Pgm?-fgk+5J>B#EMphDS564R5; zEYtGGho$8uHY&Nwl9Q%ZM)v5A=cv2m-SM2zv=C(&|7U1;T8J{dM}zLFH(MxJEELMW z)ue?eLwmR@Oa{`+xm!L40Phno%%gnwc=<#ErxB#^>hCL9>|gm9;AiU- zNf-&p0t^jgADV(F$t(qEqmhN2^Y$)BgN~`uUNMQy!Yo#v7pWs7Bb4e;r8+WI9ii1k zYQq~TLmF#CLcCT>5K}}DtdKxgT9R^PJSy6})46z<3~V+6FAN<>=O=IQ4uP}llCM4M zmLJZKEyd(uIHkK2TWf(||fjA&2G~MWQ1*Ai}B#iQ)_`g-I{Y1x~aYeX{ z;}X%udtKjGapIBhfIKWsVId?7ZUe6ZBE@X!RLI*v?%wVEoZ-;QKm*1>cLMu2c4;5y z4_-;n6F854v`^ps|2}+4IgO?GIN!}@LA~m^t_9qyL`qbZ#cBS<!Lce|Fnq#BA+HD)=%E3# zu|k4ESt!xyq>c{a0%nW_7J?)Rl%|_Ad+M_iRB_t?JRuW2*HHna;bR8QhLJ)La~5C> z6p|JQ3ZQ|;eM6Mv$3S!n1z?tyfh-RmWYRVg=QT>R%>)5P;lNBY3_2*nC~rEn0F|OS zfHE|iA!#+*NFEKRjT%6Pp&4L-EQ^^S+eFarX-dS}b9G2yQ95lNQ_*N4Dzhdc15%=V zS@@YUbDWjc1_!&ICgQXKr7|=?D<|bN6#c=(D1J%+;s8Ssp%lCn& zE`eNF2w0nuhrntz+ zCS)c|=E8F!B%;6FHs5ZW|AB4uwNb3EAUFSg2LvccL#ve5-aiJ3f{ z1suBszn?J}P8c`-- ziq5}wLSp{)#aO_4uOFk6W1~`IV_N!XIp4YoiTTzR>N2!fnwDBlPHZ2WZ`(fWORf8a zAV!7r`YkS@O>A;ps*jfQtC^6PUtN+m5#HoLC1{`6IxfLSd%kNWB>Ju=i)rcgdva`2 z`?zS7^3g}TFN2Vf_%axd3%6dsrgTh-P41Wyo9v_g{A(v9das{f+}gs6EP}(i{K;2V zA^NCiSqT|NGyCK^2Nd=M9{?m7K{>{hp{ue>vFzeMXeaW7e zrFgviJfGZxjiUDxRc~o0SGs^9Iw=>P?g}>Bks0Z;1?RA zj#P#;R;o3r>PA|PMym-`HPVEKgsQ_rH4Q^TwISk(wYRH9tRphI2$4lbI6=#20c9ff8H6J$9pz||p*)FFmO2BSLC7zRTlB4DFNAz{XFTn!@|;|8N4 zLam2T6RD024-M6XMi?~VhR{$0)(8(ZhU$$77^F-D1+~CLb5=nYFw+*Obr>#81ZTGC zRR-D;jIJ%z-QNQFkfk++-N zHBbql1tWyy6qH~c`Ao!3ulCh;PU z@5_7uxE#qV3qNEwhV&RNM+qe&MHOf2gGyw$Qm6@K$IOpD!!(+3O=M$_hFoxC1-( zniFL2aqh{{wh774)~{c*z3RA_u;T5Dm4V%ky?DHw>ZvN*pu^;4-&J9gE2U^Yn}5vw z=aBgtcsrFn-m&V*9|O;}n69>Dm!4ej{MqB0Ps+S^q+Hn>=Xdm6_0PQve_QWuxp;2J z-4{D{ytwf9g)KX*d4KnOQlre>(gS{>>qvgRR6;Z zKV}py-z(PQnqfuV4QEPpPZ)B=e`?^2x;xr0%qgKs*Jy72wRq>@2fc%ni9AiasiQh> z+p}Uvq^^5FjTs$|4!M+mbjXA3D)VBl_U;_o@LKiK>ZOxDj;hkWUWG~v+P674_{#L2 ze@q;X4}P?;!=^D$&gY!oI`#cx<7*Gi&0P7-rR&FsBnM~wm5~>*r^+z4>yz#a=KLJi zXuz{IXG@+eyRP5T6-Q?HKZ?D4es&%^`O29yi;HdS_u}5&vX9?y_b{*L#tqwM<(0cX zY-0T10mY8y1dci}vcum?J0BWQQ;h|8N)1VA)n!O**E*LfjVjgiw~w!ux?lIR$@TXS zZyU4rlc?*5u5A6b{`6`pOZ^WrE)*|SWpbU;Ayvoj#@}iG?(yl$&*J-sG~GYp%Ba@U z(~g(U*q%2ytWo73s@F|SC^ob4h^RKJGzk@t;z5_X#666wn|8Qe`JXF(Q)}tIKmF_a z&+hv4V#$_sK5g{8#@MOjj;prrtXcJon9loEzZ(O~W^~)=hh@(Fy=2tp#*Y>}|1!Ay z@V|ntMvYtYB&jW|T4Cp?3l9{!t zScfq2J%M5R!s@d;mrGyXqp4nN)}xQU8-2;TsLbON1DZy6{c!b&!<#bjvfJ~jYHNN!nSQo= zNu58o?7C?FwaE=*UaS5ok4xdR$84%+obW?v)!u`ur}k>Ha!;S5HFlN*lladwJ*~dH_>J4gbCwz_H~!r` z>P(NaB=t??UlVGt8EDq!jXArsmO5T>XiZEEb*i!U1ih=EDQ6*c*Fuc0Sm>^Zqlecy zH*r*%Dw-APe^y#}tjcE{>T%(%>prX>s4um{)H`Tq&HjT+)Cw`|PJQr6lL7vn*UtZ8 z+k;_QYx_^nirfF~_ym8`ncDc%Q~f(VT3)PE{SLF*^%@PPb#LeY$K)ylHuTcQZ>)W6 z|AI=}>s4Iv;N2nhb_Q1UPg%VLH!ZsK(ZMQvmfDlP=)?R|K^<}CQj2?oo6gh>=(4@o z#GzQl&x1xCj+-^~a?^~>^C}$NUA|Y#Hj!6LetshIV9va0tF>dv6_Ms4hqm1O;)lLJ zjH(?yeWu^~PF9Bt9sStbX)qg}RmfhcymeF{}G@^ADGs zMok}>ivM}<2@`WU^^pArHnwNxxc7z(PP<#G;ntdUz{oa-{n~_RCe>H}v~TW(`)g9; zJ|?E`of|pw?Cu{f)qXMj-5)>xr}Kf0>8(FL->>nHcgQNswAaVuHP|~B8y)Q0>%t#b zKHQNUv|-IS#flpHnQkBZef9Cu$&0)049aZ#;z`}6+79aKQ~voNczCT^O~$mG5mIwY zgYXVoxbN$QHS9OvfBMagJ%+uTzfN8|tixOw*x`7|nGZ{@J25@ugKVPLkl5va%)Hz* z{nK2t3Dmn2mpr{IUh$nN>{pXw7QV+WD*5fO6cBiD$-}nq&SAHHR_43zV|I00cXauBh~FS9?KzO~m+q5h-pWpr%vqhe$laVbzgKh`w5 zJsB66HMo2F#@Qo>&1gQQ_oBgTudSv_E$+Qz?BP{A8I(udH%k#$A!8mlxJO*& z?e!Y}eY^cs{kMI7h4-!BblpAW(A4+VOj_TNXhu)Wo{_aKW@YcpF8x2<@z<0gq2Jw_ zRQ=bHVPhk|P1b+Ec>UsTSF3&(nGw``z`Vr=+iV1tW~s>w;+2p5N-TOS7RCQe2 zjCINDS7~cz*ZXL=qW;jiW9MqB-?l&AIJI{Z|G?hm7I29Q^}s($PKSC`h2~e)3lgZv zuTMN%G@?QHqmpZmjjGmS);~upjX0K_oT9$|?SsWf1Lt+?-R1jgQP<-ZkGqv{YE_*j zw?0`rOHr$&rITrXXJeK1=Q~YZ(dz!)m_3WXD>Jocdh7ZvB9Cu+Hz{M@gDXeVtd-k; z0_}wTZe(&y*U#s^^KstBk^wrp#I6AwzT8?hr{RL{df&eP>GzdwZFB>>9Y0m?_PJ8k z-)V9D=$Ux)z`6FXewu#uhkfY{1M-edsTDr))}B%AHul_Ec}$PAevOw`emFR-=Ck4P zxnJ1_c5JBB{y8zNbd8e!BdYwgU`UM%-10Bl=PrAv;exAu4v(KSrrer<;>)`BT6xrN z<@%fno4+5J?(Yomw7uqz*~GLO_`0vdqAR3c9`aGC$(q{f+uQ#5*L#bGZ|XSs{G(>8 z6!WT%Jv?jwAj62qcJ;CEI=9{y9k`+9-;;LD=y0^2-=lqP@2xynqUoirM7hJu>?2xj zznn30^lDIZx~w~=4mh=Tpf-&v7YVfm${vsaUWSJr0U%zC%$rCZNVUrssNvqsCy8Bw?NfzH&X@TSwAG_Z}@(`xiPv)N{yhBoDCz!4*_y4-g;-M`Yw%zATJAyRarDFegHQUGd69mj;e@J<%>xpKJ!^X5Q&1(LC4o;`F@1OW zm{t|X4{f;MR#|RwgZ^<<8=t*5DlM#I=fQ1<)hL@AS$yHeN5_UQzIJiQ-*a`Xr_H~N zRXWsr0F|{{Rpytet*ZV!>ek+2y}x0Ik*Z27QYw8Ec&0;O#K5w>*00DuX&W=+p?2!5 zjz>TGxZ2Xl;E(>eP~Y#O5erIe_f_iD#IfJ0zh87U#E=m&Frn7zdwW|x%lPu+>4{&G zhiYfp9)D~7V4OkK{>F&W{jZ<>p!Kx6XR(r34qa?wx&j8zoH(*|w^eKRCO_^Pmw}l( z9<3HVI;Nb%yg z9ksstt#pq=@dKVSPb!z0Gp|Lj>M75eC+nXbut&5xUvIm%NwT>^!&2M6S)DoJ^x^K2 z6JmFdpAw`!(yaKHn~k3IEE`AG^Bq zyKHv7oqb)1?EsoFKn_08kq4?QJ zGr3K3!sjg>x#HQ`L$;Rw+nJ@wKVw6NkC-*9dA z{X96RV*JRxTONI`dS}p_g!8|csvNviX3t`sIxAHbFn9g@VzVb0sn_B8uwjJXgm_tMH`S1Y_fd;imKcBi!d zBy88IV;NtJ>omFbjUf|bKEsDy?ECQNrFz@m-hV0WeTZ(V9V_=%s;BSPXw|KRa^-FI znduia7dNlkJ)p)<{*${ZlE-uz|HSy^{ZoVShOLZzwYU9|O&J&f|Z?Yw%G#XI$;J2Tr)Tv!!S@$0fYn65S zT)7%uZp8|1+Hh{=5!=HSGw;{AeCkg!WAXCsjeg8KyY-iUa=(A}&ub)Vb zb+rS1?rtwNqRfrwww3T(7vJ;-46Yk&W9h{|k=g#i=#V?)BzWl+B4{g7nxv~0u^?hYRTAnj( zdFRX0^S_@rdg??@*?jU^<$-0&RpswjOo@70+J^tR>)PeCU27H$AN14F`ju9U-msxn z$nln^N^viGZO#p!RoA@Y>}l(n&F9-U#GQ@{5TlfCnZ>JvuyX?3+p`rd_yBkpc!F|EY575qx%AC{j@<2;nf>A(Hv8bg z&HauVDnH8k=;p1!uWOZEkQlTUXv4s9I^C9NjVZ>f;f2*0uW({5-nY#4X)V`rUZE=0v|%f%h-2 zY4v^uY;tzXCiMvQoQG5QHopJQgH{{vWL6nEFyusJ`3ae zqCej`^zpXqTY}l*)T+EMo;^D<<#yi19WQ=)kvs1C=eP6L-^}~z{=r|0Z!A~tv%3FG zOl(D+yqTWa(tO8SqL`)i?J>I>Z94sQI5n*JxijNGy4^o@?U1k5pPaOQN!F&Q%CyZ7 z_p38@P>pXFpRsRBJ{dgg*ykgr*VQLv>#{RVi%+dyXlvN{{6Ou-PA{sQ`?*H&piOZv zTGYB5yfUw8wcD1fdC^Uq|8Y3Cq3yHh-+cKr-v6NDs|wFzzdZ5N3+=RSBNK8jxBMQY zExL5eI<8{c#aVS~WORAnsNIBx?>yeHWBWaT|G03-+27k9Z&tb55 ze;T>!!=)d8Q89x0YW>u#q^{J{VpEhdQxEOV42VHIU zNpcvqp;}I}dfm5vzxwLl_b=_eH2vs&?cY{f*}KftKm^Mw^>Fd~xij zXLpa-p0>EwbzbELQ!>9R`{aj5c{}btc$(KUYt@Ooa`(exY2!EMbG;*?)5a1e_fci|M)kO%CkO{@4o-H2?hF1KYau8&Vhj`Rwzbv0tW}6AnMa=Lcg8PRz<6zhA%Y1-oVG z^23!Yj_kK#V6Ds#%2b^H^{uwnzZRt(dhzt|?oWq|Fr4YKGjrJIk4CU(t+o@o`>Qv` zS6#89)$${g<8K~~p8w^9Q?>)kKRwaopzX@WnddFvSAO2&=^rmzocpfq`5Lna-z#^1 zYv8s)TV~#RTmeTUxZ8f4ja_H{nVo1n-8rzG2qyE?2AF`&TPwx5gPSBEs%(3N=b!ITY| zM?0=N+2H=8zecX_f9gTtkB6pwn{(n?$;=JiPd^Cxbx5g6Wfyl?U-Prr`A8y&QH*4m| z!H+3pkk*uvFnmMv!zHdx-9K^tdq0k?z+&vs(9w@wS>SM=t+W@@Cm>jkoqR+4}6Lr_Igjwxe?AoU;oCs@5H> zH7EIxJs0*|SX$@f>SfH}#Kuo4MS(%X0lM z*JAeXs`Jn3ceX9*{K;q4?A^u=>b9^#qhYz3%dSFl2)KFUyIM;h;4H|fBKx2pJ^IjM?HZ1?4~%EfeBYuZ1}X!}v0j@)(aCwImj z-_mU#5&hXx|Ac!hMm9cM?3_5)V)OpfhhhH@AJ6k&;R=<~z~HaT z2`K;b-_WoyO{m-XZ+Jvl=-cz(H}N>4_%qN%upDF8I)15~?jiEH$qcm8EWy!yQOrQ# zUVSq-%%lYwYLx~#16ge(nIz(-9@jf>AaEKy*5iqNO>~WAc?4Xw(+;K9T4z6D%a05B3)k zBPf#+z2bW$#8PCHG^NCpRvK6Md*I3stiK~Xt;L=!4sAPV7fK{x7N}6Tg?|Crefd}I zg&^7dUkc%hQ!2)oAg2X>l1o&y{N_`1ats^xb1(^hA28mL9+D?wkjS5E=9s`5xWC3j@ zzjuGYuQ?82lpRno(;55DO5+GBZR3RdOTvHvngXoZ;=Ft-CG9p_bfW*P6Ex%uINxDP z41Os1a0(HN1qUe=h_zTbJFu~k1??KMq8Am2iB3`Rz&Qhsa{Fn4pC1C5VkJmg45=qf zu|kTlCuYIOcg+N&Rf7Dnlm^_YREKKSstBc39ib3!w2C)35w%M(wRjSC4{xmkN;PN( zC|VdD1&XGnb5zj+rCI<Cz;f0T}~ru@D?jxdOF8 z_iU&-juvx|03$}SP$ABQl?NnVlM|Q+;9O^(6(9;lT(ZmwUjUSXWBQ4|1PT%V;4qV) z(3o?67}~}Wlo-xa;-P@GVwn_b-pbGh$g+;{;YZFO1h6Lg)sRUu=_qJn7{B_Rt6OxZ z)br%;n09pSMO^nCr7JN04(~Zfj7t6oj#iD#uii0tge*$3?7L2?t(ETLxdL~%$fL34 z`=*BumaVsoVcSs1Y%64B&>X&yBIZtqP62%}=PigyNC7x);pu`l@@rcnbC8J~${nLi z$h}0Rj7CCDtz&7Bv;kf3@|Ruy5mOfcjAaQE1##iNJan`p`0TSXsBXCqq_NUEJT(0U zm@Xh?7v)8W@^O+AimWA+W>ZLPF1=Io@7%i}%c8)yh2f}4h_0*4d9kqOgygG2_Ij5P zW&VQGDy9{j_c7-yz4%Hmkh;LGS+_8ie4C7o0+>O-L=TNL14U*_u)-dJE_8|P-Xln- zgXeR!$;3Cn?;jY&5kShZ&+i=kU^&R&D&!gvA-bpJm$-o(W>QQ`9#a-_{Qu$!Dk=jb z_(29CfM$T&6&g;kj19>%I=u}?o3(Ri5{E(JF9y${AZg0Pf0jl;z@Ft7i!dM3&L`dE zbwWogN{R`KWDBVQe*F`ak-=Dwu^BiU0|7(xhzJZ0qi|rwSQa@SD};;V^1>*O8mp7# zZkH#wP`VaYqQb(olLU(b6o@MZ1t8`*xE@NknRpJv&=!{kWWgxHD5NFucb1c_B7fwB zF2C!;zkyT+!O#@q`NI4O0sRmQV{Itjwb4cb3t7GXED(f-SQOG0`0z&Bm&;b87G0Mi7LjgypLkjIJn1BHP4dtiJK_cTfd4^vn9BrjZ+GIx&k@>bfSb#!^awuQsI;j>4fDZgzbpc-V<1z8` z7*avnj>oKm=qn>RkbyCTu)6s{rL8v9d72Wrs>DorOk~x>?1UD~ieycJl0+3qh^`=A zbU=+*kqU)G6b|DcPtGi_1)+>6u3{Z4cH3E7XO$E%nn7fS=*sTrV*~QwnjnBDV=)Ie z8!Sb!XB&tup=^ud<|D|u;$=sFH#iu>lLuoVD72lSVDd&Aue@PKel?;{DB=neVLOWJ zOblj#?t-NYu{ky{Bxi^^8gRJ#d)Yo^)UA_1X@Ot1B!2W&IVezNU?2{8QX*MZHfLqh z2@3CvexWQweTCsfzd>A#Kqr|z2U7|m_xYg}R2CKlNLTqJ_AfAfj0F@;DH)pPB+;*d zNErl4>x`_J&qy7j5hy0p3^_9zIBDN->?{(i;h__}>`Y`3Bs4)BSOnfCz;wQs?|=?r zDN_tLFRU+dE+w%<+@r(@G?Ko}i>0U*u5-N80>3~%X=4!B4@Q#CIxH`~N~$V!1_yG; zy?pzgySY$9dY44w$?{N-{wUCp5Mj{)!0I)1jVMIQ5;+1tjWD4Gt4%^fys21lP*Pe! zpbin$7*SIx6jCk;6M?!lA%|2H)ZCID^4P@_{0xXoj}=k@2-L~a7MbuYN^Fu*LM*R! z*>*F{kOG+)!AZ)uCqohTJsgry5c81y#Q()hPkdG9KC2c-&L?!9Q}X-LiTYI^Z(5j(_9C^+*{#s6uW0*5a-p!>JB49M-f{L_R69gD zn1`d&JK}G6>P&@bRkR-D+pJi|ce`@Q(=olG;m<`lkoD6QEoYlzA90FxlSiFGKsGNa z_f=&J z809oO2`3&P0Lx_g8YY7k$i(ReJiZp%MsZSyB&Eq|xsnu7R7?x}_$^E+yi*D)o1kxC zoY@JD2ZibB3@9NWf;SQ*WbG^mEe^&b$w>jC#ON|AJ6b;bZ4C#Vt%f<@4i8T{eD^E+Z)klzaiF*9` zA39c}6KWO|Sm&trfQ2Sr6BSR6I!Cov#tI4yKfbO2<>ZLWxC~8!UbJ3RhdtFe#~OA` zUL+pLNirfUO|_VT2Ix^q1UW$5&3R>s1y+*|w^^(hN%Ev17S{>Ib*^VWelI`#5$h|3 z`hL=QMG4%l9kheE7|&)r623VOq~t>_keJvJ2C_C1nV&sjImXvwR|>zCHimRSlr(x^ zV@N?#p=eGF9vSRidz4hgnFk^&Ey>|!Ac{#j1M8r4&jUuG*$vEtmpmM1P@=54RNq_O zUCzC?E>(4BXxWLPqOQjkf_rq0vZ5O`@nl`r$FApSVjz0Z;2Mo)qq1gQAJLphqVBGb zBWK<7Rn>j;bPs}?v-yYS54vwx{l2fh`rg%N>KF5$IBVI`+=8zZUprHL@Gt$Ve%3A9 zCUhPzou;wdcXW1cy5=YE&zW%Yt&>jq>gg{`UD`SKjgAw3`R1%;FLwTL)DAvj^ukpq zo;YswmO~4#{Zi|xOV@uiw0_5vl}T*}W>1^>>(@FTe`oj3<@ayBH zf82gXuKk4Ln(vKYHR@i(Y?2=ld+UAj#N37DQ-A%? z%5Oh*-9T@v+}g6{;NtZUFFNCv;@%Ad*UumO@elWoC(mE;y(@QJ^|1fV9n%;5?1kGV zpML$F&gmZ>%3bv4%$1M-?O!gbv_3g$=jdBjUj6F4W$!((bq!y!d3#ISqd$JT@||6a z?znqi`PGwW>}$HV@`H&(cmC^|ec%1b!Y$KIT6~3n*0Qs{kUeK`+PEA4e8OMx8>jtXy=_e_i`oTjsLug&!|l(7U?g?U`}TI5Brs=EkNamrQ*0 zl%DH1ytjSX8{K0jtZ7GQL`GI-B>YY=%a$oO^X3!AKR*4Q_a{$1XWEz#*WGdF#w9au-@SdZ`>hkFJho)h zhK^B_U!B08e+=zMFa7(H9b=|WdWdhG_Pu3y?7r05-TA_ApZ?km=imo7lY{R~ef^UK z-`_v+q}SM~1fxW+nb^`ZZH=8g|`?!i}{{QiV} z=>b^Yf0hnAiA?kBr`?lcC|p8oKgPj&VNlTW(f-OKLTwU8bkezfr5EgyeS z`Q*`|Uw!@4-~H!hqeqSU^was?1Di%=M*p$@>eaDV^s4>)Z|XK=k+r$=hq?s#@PGfU zt-XEj@W21oeopJ$V}Jkc|KmkHTv8Wtb5f#0@=88_mrJfn%&9vGbLvt=P68&VWH1!l z)Tw(Z_Z*ee4ktV4J`sPKwDDmHqK$ zm^sRt9cGJWoB1{gqE5v#FBGMRum9lp<`x%+GWNBL9~b*61c455no1k zjy8vZqwXH<>g^V2kjgSTFCczL&+-fAT!3BbL^t3Q9~WF=+1#-P;^%oJc}NryusMma zUqL<@L=Kj?fCWJTa}+8IC32}pjX)6dktln}BOc?G1YVRq%iLj~Koe;@uwb9qYPI?v zbyQ>=_2|n&b7-*_$O;Mt5opCcVQv&f-Ccb-0GGC7m6-rJHz>%yfVvDh%g;7JTHr$r;!*hc{;14 zD;klO{?;-yEXgdj-aIN|SCEX^i2#5Eb%>3*_8Ul6Z@SQkdWesbeCdKA3^LwNxX7cq zVEBn)n>&OHp!xB(haxfg^M$z?w=up%R(%f(hc}Kg7)}D0+S+SGdl&16-t6dmE1I1L z0pT!4$rjdDAhEYDR1o#4bP5@y2d2>CD}fE?$t}j3eTNir=t|VRx(_+b z1HE8HFzZOo(O{YIlyqtOYGgCU(lhh1X}HYBuENKTLv?&Yu5!o-3qeahabOX!ZxJ0< zxP;hBha*%yAs<~29my&mUsT+vF{>bx1xAQzIi(dPMG0i3$;E5P!n5H^z)6?^TG&w} z$pGP%DmSccE)&m zq{uiiewGRy9*C28dYEn{VL9dQB`cMH0E7BINx;ueWK_vXQiuxS`n1YZ|3t}9#R--vo@KB{PRv~m$oKAw05kur(>{?N{%rC*$ zkrXud0*poPM-=olL|O*QqF7N+P-#U(0PLUoMUI8!p)EsB5NE)Mc5v8tP=JL{cW^7H z>cN&8Y-voZ+8#H$+3LYrHCS)EON9iEjK#<@mdb+{2%>(Oa0=R7@1 z)X}qCVKE%avZV%&xc9LSikxs9#w76q7fYg3uI^6M)WaOq9a{(LRMs&|>mXDIPC@&x zBI6#0v@5PCHXGF#$_7qBCu0!gIoOG00drZY0$xYas&N5;J1gKRK&6>llT7sk1Yt4y zc7H`AP}P%4Q6!cG)LB3Q#ELa(oiNKzW?Mm2doixmOzBmnVT1rw<26wesIee?fZ9Yf z6W<9K^`)3)D1qTe!AGaL@WdF0nuM@zv(T4XQB*KOSfF$J&RgAlVaKYKQI@XlLFFHX zN|HQ84#K!_EldM#8E7kkP9z#_;czyNj7~i&+7J>A4Va_l2unr<>N~KGqgtm>HW{ADS6O$QF{s4O z)WfF!te>x>+=&%In~ zi7YF@MS(gF<&kd(bPB}^9iT2L5eIoXm}t!8c{=5Yv(rH=?J^O@p_#pm@<5Xml;{BQ zlimi@0cw_G#n1An;mn385^H~tT{oE{pk9dD0#L{4nl&jjfPQWIq-R9WdYIE|CYtcv z)pNcsCKG_fxZ<1#s%M?GTXqme)mTGoLyTeA(5d86K$&^4VWw_0rQuOAWeP^aS9?p# zDw@XnJj_+spo%{OTE^T_Yxby5z0gBxO$|6GhFj4oVvVaeU4WDj+uF^#6|7uztL`o6Xb$~bQ1q)DJAk}Ks z@c?D2wc;}DpaOQWZzpY!l?*$b=9yN;?$v$hQs!$rjo(0xk zP>n<4SOA$vP9bNSKRKIeJsMts2Z3ABMW#*whEr4(oG`^-RaqXV26apGO{NaNv|hDd zRpmyMWhEfP)~eDxld!<ebdqLDh7b4olXI8JTXnLW#FG zO2^VUO=_LhY^6M-UZdlSVRcKAyKIJvYACl^s{?3*Nmq`Fk7(s1F%qr%Fq~vzOnr7T zkai@RNFC&mLRcyRD@M*q(@YBD7L65k!?1kSb`B6;V1j4_7fdp>OM!6_piTRGa99lH{%`>_)k0q7%^*OV2Rms{5q4SR}SmHp7&P4!w)})oIXbE;C?MZzohMMr8fEgt_K&dKV7Vzt- z0?sTbDz1|bXVkDz!}6#BLReNm%j(izmq}Qb6EuLr1vWq;1`K?WxZ4P6igCnXSD^#&~N>x_yO(3L@9>0x<9!mJHDjNNQ&0k%F!3ScwEFdBm6IaB*G7%IsoYcY%DX0n_TJ z+WnmQ=n_^iZIjySGqz!BW49CA{h?7nbxafGh$7l*R}Vx5HBuornP?74vp_+~azZb_ zt_!MQhFf~LRee0z@Y&~sW`0)BZ;0g8p}pM-QbEhRYsrW?#=BYlq+(GgIkQZQTC12@ z2qz+R)iFgaRxDqylGX3CA$Ma$qq(eTuf+BUl~U7ho~b8CIMYtWEWt4K=lj7@}U<7z!{|W1%rZokmj%QCK*jX)x2MF}@sO zbbjy3v`50BEJMgKC!bC6!6@Vgsx7X4e}Fx!>ZcWnr%U=duo2jCRQn|*5VHqSj-b0( z>a+mHf^R7ou!`PLoEo=mx{u0nI81NUN^{hr^<=P2?Q$%0pFoHylhn2-?Vvs)*e=(r z`jaEuWFxc`qXU@Q@6Dl4#EDs%Ir9NiFA&wXALLX@;=^uXKYfrhTT2G=%-X{$SU+cy z*icu+Qc&z}bpDtw;gm>+Z*hzqP|bX`!l!7$XEK^jk_K};^!vphbJQ9V*kAex}8Pd!p> zN6uMS;;Eln9Krl3vYOd(=W$qDt_HwNAF1(JueEnLjM|Rls#M4E2!TdwIIcQe!|^k~ zV#ajSju)_+`n-fVlCpn`y5@4g< z9s0CmBpMj?RgL>>V8B^xeh(Pg)G!R~|8r{S@}kO61d+N~sGz_~0BknG8i%HjI&?Op ziiXg2wI4F&d2k3UtMy4zLng!1O?2I<){emrLZuF#`lL5D(T}KC zpBmdbd~DpZ?TCIB5ucQ*6F+rm-iUgS;-`^`_iEM{*?>7&pE;ztF*;_=V&BPPU(#%R z7MuLgV)GC%w3fMqn?~3lk|zl1Qqh+&h2~81UgXjGi-CdF%puXK{pVr;LSMENQin9d w>a&8l`ZtHo8Tj0%J~FNr{Kx*8@3Ggh*Rj{JSIz5x0{{U3|GrFW?*Nbi0N<|8z5oCK literal 0 HcmV?d00001 diff --git a/charts/postgres-operator/templates/clusterrole.yaml b/charts/postgres-operator/templates/clusterrole.yaml index 9a4165797..7b3dd462d 100644 --- a/charts/postgres-operator/templates/clusterrole.yaml +++ b/charts/postgres-operator/templates/clusterrole.yaml @@ -63,9 +63,9 @@ rules: - secrets verbs: - create - - update - delete - get + - update # to check nodes for node readiness label - apiGroups: - "" @@ -102,9 +102,9 @@ rules: - delete - get - list - - watch - - update - patch + - update + - watch # to resize the filesystem in Spilo pods when increasing volume size - apiGroups: - "" diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index 195a03380..b5d561807 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -1,7 +1,7 @@ image: registry: registry.opensource.zalan.do repository: acid/postgres-operator - tag: v1.3.1 + tag: v1.4.0 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. @@ -100,8 +100,14 @@ configKubernetes: pod_management_policy: "ordered_ready" # label assigned to the Postgres pods (and services/endpoints) pod_role_label: spilo-role + # service account definition as JSON/YAML string to be used by postgres cluster pods + # pod_service_account_definition: "" + # name of service account to be used by postgres cluster pods pod_service_account_name: "postgres-pod" + # role binding definition as JSON/YAML string to be used by pod service account + # pod_service_account_role_binding_definition: "" + # Postgres pods are terminated forcefully after this timeout pod_terminate_grace_period: 5m # template for database user secrets generated by the operator diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 8b52a7d67..07ba76285 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -1,7 +1,7 @@ image: registry: registry.opensource.zalan.do repository: acid/postgres-operator - tag: v1.3.1 + tag: v1.4.0 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. @@ -93,8 +93,14 @@ configKubernetes: pod_management_policy: "ordered_ready" # label assigned to the Postgres pods (and services/endpoints) pod_role_label: spilo-role + # service account definition as JSON/YAML string to be used by postgres cluster pods + # pod_service_account_definition: "" + # name of service account to be used by postgres cluster pods pod_service_account_name: "postgres-pod" + # role binding definition as JSON/YAML string to be used by pod service account + # pod_service_account_role_binding_definition: "" + # Postgres pods are terminated forcefully after this timeout pod_terminate_grace_period: 5m # template for database user secrets generated by the operator diff --git a/docs/user.md b/docs/user.md index 91a010b9c..295c149bd 100644 --- a/docs/user.md +++ b/docs/user.md @@ -359,13 +359,16 @@ stored in secrets which are created by the operator. One solution is to create secrets beforehand and paste in the credentials of the source cluster. Otherwise, you will see errors in the Postgres logs saying users cannot log in and the operator logs will complain about not being able to sync resources. -This, however, can safely be ignored as it will be sorted out once the cluster -is detached from the source (and it’s still harmless if you don’t plan to). -You can also edit the secrets afterwards. Find them by: +When you only run a standby leader, you can safely ignore this, as it will be +sorted out once the cluster is detached from the source. It is also harmless if +you don’t plan it. But, when you created a standby replica, too, fix the +credentials right away. WAL files will pile up on the standby leader if no +connection can be established between standby replica(s). You can also edit the +secrets after their creation. Find them by: ```bash -kubectl get secrets --all-namespaces | grep +kubectl get secrets --all-namespaces | grep ``` ### Promote the standby diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index aa7bef034..0300b5495 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -63,7 +63,9 @@ data: pod_label_wait_timeout: 10m pod_management_policy: "ordered_ready" pod_role_label: spilo-role + # pod_service_account_definition: "" pod_service_account_name: "postgres-pod" + # pod_service_account_role_binding_definition: "" pod_terminate_grace_period: 5m # postgres_superuser_teams: "postgres_superusers" # protected_role_names: "admin" diff --git a/manifests/operator-service-account-rbac.yaml b/manifests/operator-service-account-rbac.yaml index 80fcd89ef..e5bc49f83 100644 --- a/manifests/operator-service-account-rbac.yaml +++ b/manifests/operator-service-account-rbac.yaml @@ -64,9 +64,9 @@ rules: - secrets verbs: - create - - update - delete - get + - update # to check nodes for node readiness label - apiGroups: - "" @@ -103,9 +103,9 @@ rules: - delete - get - list - - watch - - update - patch + - update + - watch # to resize the filesystem in Spilo pods when increasing volume size - apiGroups: - "" diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index e3bc3e3e4..63f17d9fa 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: postgres-operator containers: - name: postgres-operator - image: registry.opensource.zalan.do/acid/postgres-operator:v1.3.1 + image: registry.opensource.zalan.do/acid/postgres-operator:v1.4.0 imagePullPolicy: IfNotPresent resources: requests: diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 3c49b9a13..140d2bc4e 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -224,7 +224,7 @@ func (c *Controller) initRoleBinding() { switch { case err != nil: - panic(fmt.Errorf("unable to parse the definition of the role binding for the pod service account definition from the operator configuration: %v", err)) + panic(fmt.Errorf("unable to parse the role binding definition from the operator configuration: %v", err)) case groupVersionKind.Kind != "RoleBinding": panic(fmt.Errorf("role binding definition in the operator configuration defines another type of resource: %v", groupVersionKind.Kind)) default: diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index 8e8f9ae85..96d12bb9f 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -505,11 +505,11 @@ func (c *Controller) submitRBACCredentials(event ClusterEvent) error { namespace := event.NewSpec.GetNamespace() if err := c.createPodServiceAccount(namespace); err != nil { - return fmt.Errorf("could not create pod service account %v : %v", c.opConfig.PodServiceAccountName, err) + return fmt.Errorf("could not create pod service account %q : %v", c.opConfig.PodServiceAccountName, err) } if err := c.createRoleBindings(namespace); err != nil { - return fmt.Errorf("could not create role binding %v : %v", c.PodServiceAccountRoleBinding.Name, err) + return fmt.Errorf("could not create role binding %q : %v", c.PodServiceAccountRoleBinding.Name, err) } return nil } @@ -520,16 +520,16 @@ func (c *Controller) createPodServiceAccount(namespace string) error { _, err := c.KubeClient.ServiceAccounts(namespace).Get(podServiceAccountName, metav1.GetOptions{}) if k8sutil.ResourceNotFound(err) { - c.logger.Infof(fmt.Sprintf("creating pod service account in the namespace %v", namespace)) + c.logger.Infof(fmt.Sprintf("creating pod service account %q in the %q namespace", podServiceAccountName, namespace)) // get a separate copy of service account // to prevent a race condition when setting a namespace for many clusters sa := *c.PodServiceAccount if _, err = c.KubeClient.ServiceAccounts(namespace).Create(&sa); err != nil { - return fmt.Errorf("cannot deploy the pod service account %v defined in the config map to the %v namespace: %v", podServiceAccountName, namespace, err) + return fmt.Errorf("cannot deploy the pod service account %q defined in the configuration to the %q namespace: %v", podServiceAccountName, namespace, err) } - c.logger.Infof("successfully deployed the pod service account %v to the %v namespace", podServiceAccountName, namespace) + c.logger.Infof("successfully deployed the pod service account %q to the %q namespace", podServiceAccountName, namespace) } else if k8sutil.ResourceAlreadyExists(err) { return nil } @@ -545,14 +545,14 @@ func (c *Controller) createRoleBindings(namespace string) error { _, err := c.KubeClient.RoleBindings(namespace).Get(podServiceAccountRoleBindingName, metav1.GetOptions{}) if k8sutil.ResourceNotFound(err) { - c.logger.Infof("Creating the role binding %v in the namespace %v", podServiceAccountRoleBindingName, namespace) + c.logger.Infof("Creating the role binding %q in the %q namespace", podServiceAccountRoleBindingName, namespace) // get a separate copy of role binding // to prevent a race condition when setting a namespace for many clusters rb := *c.PodServiceAccountRoleBinding _, err = c.KubeClient.RoleBindings(namespace).Create(&rb) if err != nil { - return fmt.Errorf("cannot bind the pod service account %q defined in the config map to the cluster role in the %q namespace: %v", podServiceAccountName, namespace, err) + return fmt.Errorf("cannot bind the pod service account %q defined in the configuration to the cluster role in the %q namespace: %v", podServiceAccountName, namespace, err) } c.logger.Infof("successfully deployed the role binding for the pod service account %q to the %q namespace", podServiceAccountName, namespace) diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 0e88c60d7..fee65be81 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -91,12 +91,11 @@ type Config struct { Scalyr LogicalBackup - WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' - EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS - DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p2"` - Sidecars map[string]string `name:"sidecar_docker_images"` - // default name `operator` enables backward compatibility with the older ServiceAccountName field - PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"` + WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' + EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS + DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p2"` + Sidecars map[string]string `name:"sidecar_docker_images"` + PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"` // value of this string must be valid JSON or YAML; see initPodServiceAccount PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""` PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""` From 51909204fd7085f5b890cc4689221fece54ccecc Mon Sep 17 00:00:00 2001 From: Hengchu Zhang Date: Fri, 28 Feb 2020 08:13:58 -0500 Subject: [PATCH 31/31] Change `logging_rest_api.api_port` to `8080` instead of `8008` (#848) The documentation states that the default operator REST service is at port `8080`, but the current default CRD based configuration is `8008`. Changing the default config to match documentation. --- manifests/postgresql-operator-default-configuration.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index bdb131fc5..33838b2a9 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -110,7 +110,7 @@ configuration: log_statement: all # teams_api_url: "" logging_rest_api: - api_port: 8008 + api_port: 8080 cluster_history_entries: 1000 ring_log_lines: 100 scalyr: