From a660d758a5850530bb7f7d3e1a049dd01f2bdda2 Mon Sep 17 00:00:00 2001 From: Vito Botta Date: Mon, 10 Feb 2020 12:48:24 +0200 Subject: [PATCH 1/6] Add region setting for logical backups to non-AWS storage (#813) * Add region setting for logical backups to non-AWS storage --- charts/postgres-operator/crds/operatorconfigurations.yaml | 2 ++ charts/postgres-operator/values-crd.yaml | 2 ++ charts/postgres-operator/values.yaml | 2 ++ docker/logical-backup/dump.sh | 1 + docs/reference/operator_parameters.md | 5 ++++- manifests/configmap.yaml | 1 + manifests/operatorconfiguration.crd.yaml | 2 ++ manifests/postgresql-operator-default-configuration.yaml | 1 + pkg/apis/acid.zalan.do/v1/crds.go | 3 +++ pkg/apis/acid.zalan.do/v1/operator_configuration_type.go | 1 + pkg/cluster/k8sres.go | 4 ++++ pkg/controller/operator_config.go | 1 + pkg/util/config/config.go | 1 + 13 files changed, 25 insertions(+), 1 deletion(-) diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index 52d03df9c..9725c2708 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -243,6 +243,8 @@ spec: type: string logical_backup_s3_endpoint: type: string + logical_backup_s3_region: + type: string logical_backup_s3_secret_access_key: type: string logical_backup_s3_sse: diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index 61cab3d06..1f9b5e495 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -204,6 +204,8 @@ configLogicalBackup: logical_backup_s3_access_key_id: "" # S3 bucket to store backup results logical_backup_s3_bucket: "my-bucket-url" + # S3 region of bucket + logical_backup_s3_region: "" # S3 endpoint url when not using AWS logical_backup_s3_endpoint: "" # S3 Secret Access Key diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index deb506329..1be5851d2 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -195,6 +195,8 @@ configLogicalBackup: logical_backup_s3_access_key_id: "" # S3 bucket to store backup results logical_backup_s3_bucket: "my-bucket-url" + # S3 region of bucket + logical_backup_s3_region: "" # S3 endpoint url when not using AWS logical_backup_s3_endpoint: "" # S3 Secret Access Key diff --git a/docker/logical-backup/dump.sh b/docker/logical-backup/dump.sh index 673f09038..2d9a39e02 100755 --- a/docker/logical-backup/dump.sh +++ b/docker/logical-backup/dump.sh @@ -40,6 +40,7 @@ function aws_upload { [[ ! -z "$EXPECTED_SIZE" ]] && args+=("--expected-size=$EXPECTED_SIZE") [[ ! -z "$LOGICAL_BACKUP_S3_ENDPOINT" ]] && args+=("--endpoint-url=$LOGICAL_BACKUP_S3_ENDPOINT") + [[ ! -z "$LOGICAL_BACKUP_S3_REGION" ]] && args+=("--region=$LOGICAL_BACKUP_S3_REGION") [[ ! -z "$LOGICAL_BACKUP_S3_SSE" ]] && args+=("--sse=$LOGICAL_BACKUP_S3_SSE") aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}" diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index d6dde8c0e..7a8acc232 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -461,8 +461,11 @@ grouped under the `logical_backup` key. S3 bucket to store backup results. The bucket has to be present and accessible by Postgres pods. Default: empty. +* **logical_backup_s3_region** + Specifies the region of the bucket which is required with some non-AWS S3 storage services. The default is empty. + * **logical_backup_s3_endpoint** - When using non-AWS S3 storage, endpoint can be set as a ENV variable. + When using non-AWS S3 storage, endpoint can be set as a ENV variable. The default is empty. * **logical_backup_s3_sse** Specify server side encription that S3 storage is using. If empty string diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index 7d11198da..d26c83edf 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -40,6 +40,7 @@ data: # logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" # logical_backup_s3_access_key_id: "" # logical_backup_s3_bucket: "my-bucket-url" + # logical_backup_s3_region: "" # logical_backup_s3_endpoint: "" # logical_backup_s3_secret_access_key: "" # logical_backup_s3_sse: "AES256" diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index 509d9aefc..7bd5c529c 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -219,6 +219,8 @@ spec: type: string logical_backup_s3_endpoint: type: string + logical_backup_s3_region: + type: string logical_backup_s3_secret_access_key: type: string logical_backup_s3_sse: diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index f13a1eed9..efd1a5396 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -88,6 +88,7 @@ configuration: # logical_backup_s3_access_key_id: "" logical_backup_s3_bucket: "my-bucket-url" # logical_backup_s3_endpoint: "" + # logical_backup_s3_region: "" # logical_backup_s3_secret_access_key: "" logical_backup_s3_sse: "AES256" logical_backup_schedule: "30 00 * * *" diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 4d5a6f024..bc33f11f6 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -909,6 +909,9 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "logical_backup_s3_endpoint": { Type: "string", }, + "logical_backup_s3_region": { + Type: "string", + }, "logical_backup_s3_secret_access_key": { Type: "string", }, diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index 1e6a3b459..35c51e08d 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -157,6 +157,7 @@ type OperatorLogicalBackupConfiguration struct { Schedule string `json:"logical_backup_schedule,omitempty"` DockerImage string `json:"logical_backup_docker_image,omitempty"` S3Bucket string `json:"logical_backup_s3_bucket,omitempty"` + S3Region string `json:"logical_backup_s3_region,omitempty"` S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"` S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"` S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"` diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index aed0c6e83..4c2f50296 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1589,6 +1589,10 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { Name: "LOGICAL_BACKUP_S3_BUCKET", Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket, }, + { + Name: "LOGICAL_BACKUP_S3_REGION", + Value: c.OpConfig.LogicalBackup.LogicalBackupS3Region, + }, { Name: "LOGICAL_BACKUP_S3_ENDPOINT", Value: c.OpConfig.LogicalBackup.LogicalBackupS3Endpoint, diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index d0357d222..98b56a298 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -106,6 +106,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.LogicalBackupSchedule = fromCRD.LogicalBackup.Schedule result.LogicalBackupDockerImage = fromCRD.LogicalBackup.DockerImage result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket + result.LogicalBackupS3Region = fromCRD.LogicalBackup.S3Region result.LogicalBackupS3Endpoint = fromCRD.LogicalBackup.S3Endpoint result.LogicalBackupS3AccessKeyID = fromCRD.LogicalBackup.S3AccessKeyID result.LogicalBackupS3SecretAccessKey = fromCRD.LogicalBackup.S3SecretAccessKey diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 339f06ce0..e4e429abb 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -76,6 +76,7 @@ type LogicalBackup struct { LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"` LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup"` LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""` + LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""` LogicalBackupS3Endpoint string `name:"logical_backup_s3_endpoint" default:""` LogicalBackupS3AccessKeyID string `name:"logical_backup_s3_access_key_id" default:""` LogicalBackupS3SecretAccessKey string `name:"logical_backup_s3_secret_access_key" default:""` From ba60e15d073e0b254df142cf71adb5725d6b1a16 Mon Sep 17 00:00:00 2001 From: Jonathan Juares Beber Date: Mon, 10 Feb 2020 12:03:25 +0100 Subject: [PATCH 2/6] Add ServiceAnnotations cluster config (#803) The [operator parameters][1] already support the `custom_service_annotations` config.With this parameter is possible to define custom annotations that will be used on the services created by the operator. The `custom_service_annotations` as all the other [operator parameters][1] are defined on the operator level and do not allow customization on the cluster level. A cluster may require different service annotations, as for example, set up different cloud load balancers timeouts, different ingress annotations, and/or enable more customizable environments. This commit introduces a new parameter on the cluster level, called `serviceAnnotations`, responsible for defining custom annotations just for the services created by the operator to the specifically defined cluster. It allows a mix of configuration between `custom_service_annotations` and `serviceAnnotations` where the latest one will have priority. In order to allow custom service annotations to be used on services without LoadBalancers (as for example, service mesh services annotations) both `custom_service_annotations` and `serviceAnnotations` are applied independently of load-balancing configuration. For retro-compatibility purposes, `custom_service_annotations` is still under [Load balancer related options][2]. The two default annotations when using LoadBalancer services, `external-dns.alpha.kubernetes.io/hostname` and `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` are still defined by the operator. `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` can be overridden by `custom_service_annotations` or `serviceAnnotations`, allowing a more customizable environment. `external-dns.alpha.kubernetes.io/hostname` can not be overridden once there is no differentiation between custom service annotations for replicas and masters. It updates the documentation and creates the necessary unit and e2e tests to the above-described feature too. [1]: https://github.com/zalando/postgres-operator/blob/master/docs/reference/operator_parameters.md [2]: https://github.com/zalando/postgres-operator/blob/master/docs/reference/operator_parameters.md#load-balancer-related-options --- .../postgres-operator/crds/postgresqls.yaml | 4 + docs/administrator.md | 11 + docs/reference/cluster_manifest.md | 5 + docs/reference/operator_parameters.md | 5 +- e2e/README.md | 1 + e2e/tests/test_e2e.py | 51 ++- manifests/complete-postgres-manifest.yaml | 2 + ...res-manifest-with-service-annotations.yaml | 20 ++ manifests/postgresql.crd.yaml | 4 + pkg/apis/acid.zalan.do/v1/crds.go | 8 + pkg/apis/acid.zalan.do/v1/postgresql_type.go | 1 + pkg/apis/acid.zalan.do/v1/util_test.go | 101 +++++- .../acid.zalan.do/v1/zz_generated.deepcopy.go | 7 + pkg/cluster/cluster_test.go | 322 ++++++++++++++++++ pkg/cluster/k8sres.go | 60 ++-- 15 files changed, 565 insertions(+), 37 deletions(-) create mode 100644 manifests/postgres-manifest-with-service-annotations.yaml diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index 198afe119..b4b676236 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -266,6 +266,10 @@ spec: pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' # Note: the value specified here must not be zero or be higher # than the corresponding limit. + serviceAnnotations: + type: object + additionalProperties: + type: string sidecars: type: array nullable: true diff --git a/docs/administrator.md b/docs/administrator.md index 5b8769edb..2e86193c0 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -376,6 +376,17 @@ cluster manifest. In the case any of these variables are omitted from the manifest, the operator configuration settings `enable_master_load_balancer` and `enable_replica_load_balancer` apply. Note that the operator settings affect all Postgresql services running in all namespaces watched by the operator. +If load balancing is enabled two default annotations will be applied to its +services: + +- `external-dns.alpha.kubernetes.io/hostname` with the value defined by the + operator configs `master_dns_name_format` and `replica_dns_name_format`. + This value can't be overwritten. If any changing in its value is needed, it + MUST be done changing the DNS format operator config parameters; and +- `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` with + a default value of "3600". This value can be overwritten with the operator + config parameter `custom_service_annotations` or the cluster parameter + `serviceAnnotations`. To limit the range of IP addresses that can reach a load balancer, specify the desired ranges in the `allowedSourceRanges` field (applies to both master and diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index bf6df681b..7b049b6fa 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -122,6 +122,11 @@ These parameters are grouped directly under the `spec` key in the manifest. A map of key value pairs that gets attached as [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to each pod created for the database. +* **serviceAnnotations** + A map of key value pairs that gets attached as [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + to the services created for the database cluster. Check the + [administrator docs](https://github.com/zalando/postgres-operator/blob/master/docs/administrator.md#load-balancers-and-allowed-ip-ranges) + for more information regarding default values and overwrite rules. * **enableShmVolume** Start a database pod without limitations on shm memory. By default Docker diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 7a8acc232..e3893ea31 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -388,8 +388,9 @@ In the CRD-based configuration they are grouped under the `load_balancer` key. `false`. * **custom_service_annotations** - when load balancing is enabled, LoadBalancer service is created and - this parameter takes service annotations that are applied to service. + This key/value map provides a list of annotations that get attached to each + service of a cluster created by the operator. If the annotation key is also + provided by the cluster definition, the manifest value is used. Optional. * **master_dns_name_format** defines the DNS name string template for the diff --git a/e2e/README.md b/e2e/README.md index 1d611bcd0..f1bc5f9ed 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -44,3 +44,4 @@ The current tests are all bundled in [`test_e2e.py`](tests/test_e2e.py): * taint-based eviction of Postgres pods * invoking logical backup cron job * uniqueness of master pod +* custom service annotations diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index fc87c9887..5f34dcb16 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -211,8 +211,8 @@ class EndToEndTestCase(unittest.TestCase): schedule = "7 7 7 7 *" pg_patch_enable_backup = { "spec": { - "enableLogicalBackup": True, - "logicalBackupSchedule": schedule + "enableLogicalBackup": True, + "logicalBackupSchedule": schedule } } k8s.api.custom_objects_api.patch_namespaced_custom_object( @@ -234,7 +234,7 @@ class EndToEndTestCase(unittest.TestCase): image = "test-image-name" patch_logical_backup_image = { "data": { - "logical_backup_docker_image": image, + "logical_backup_docker_image": image, } } k8s.update_config(patch_logical_backup_image) @@ -247,7 +247,7 @@ class EndToEndTestCase(unittest.TestCase): # delete the logical backup cron job pg_patch_disable_backup = { "spec": { - "enableLogicalBackup": False, + "enableLogicalBackup": False, } } k8s.api.custom_objects_api.patch_namespaced_custom_object( @@ -257,6 +257,37 @@ class EndToEndTestCase(unittest.TestCase): self.assertEqual(0, len(jobs), "Expected 0 logical backup jobs, found {}".format(len(jobs))) + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_service_annotations(self): + ''' + Create a Postgres cluster with service annotations and check them. + ''' + k8s = self.k8s + patch_custom_service_annotations = { + "data": { + "custom_service_annotations": "foo:bar", + } + } + k8s.update_config(patch_custom_service_annotations) + + k8s.create_with_kubectl("manifests/postgres-manifest-with-service-annotations.yaml") + annotations = { + "annotation.key": "value", + "foo": "bar", + } + self.assertTrue(k8s.check_service_annotations( + "version=acid-service-annotations,spilo-role=master", annotations)) + self.assertTrue(k8s.check_service_annotations( + "version=acid-service-annotations,spilo-role=replica", annotations)) + + # clean up + unpatch_custom_service_annotations = { + "data": { + "custom_service_annotations": "", + } + } + k8s.update_config(unpatch_custom_service_annotations) + def assert_master_is_unique(self, namespace='default', version="acid-minimal-cluster"): ''' Check that there is a single pod in the k8s cluster with the label "spilo-role=master" @@ -322,6 +353,16 @@ class K8s: pod_phase = pods[0].status.phase time.sleep(self.RETRY_TIMEOUT_SEC) + def check_service_annotations(self, svc_labels, annotations, namespace='default'): + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + if len(svc.metadata.annotations) != len(annotations): + return False + for key in svc.metadata.annotations: + if svc.metadata.annotations[key] != annotations[key]: + return False + return True + def wait_for_pg_to_scale(self, number_of_instances, namespace='default'): body = { @@ -330,7 +371,7 @@ class K8s: } } _ = self.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body) + "acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body) labels = 'version=acid-minimal-cluster' while self.count_pods_with_label(labels) != number_of_instances: diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index 2478156d6..9e3b891c3 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -32,6 +32,8 @@ spec: # spiloFSGroup: 103 # podAnnotations: # annotation.key: value +# serviceAnnotations: +# annotation.key: value # podPriorityClassName: "spilo-pod-priority" # tolerations: # - key: postgres diff --git a/manifests/postgres-manifest-with-service-annotations.yaml b/manifests/postgres-manifest-with-service-annotations.yaml new file mode 100644 index 000000000..ab3096740 --- /dev/null +++ b/manifests/postgres-manifest-with-service-annotations.yaml @@ -0,0 +1,20 @@ +apiVersion: "acid.zalan.do/v1" +kind: postgresql +metadata: + name: acid-service-annotations +spec: + teamId: "acid" + volume: + size: 1Gi + numberOfInstances: 2 + users: + zalando: # database owner + - superuser + - createdb + foo_user: [] # role for application foo + databases: + foo: zalando # dbname: owner + postgresql: + version: "11" + serviceAnnotations: + annotation.key: value diff --git a/manifests/postgresql.crd.yaml b/manifests/postgresql.crd.yaml index 3b0f652ea..276bc94b8 100644 --- a/manifests/postgresql.crd.yaml +++ b/manifests/postgresql.crd.yaml @@ -230,6 +230,10 @@ spec: pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' # Note: the value specified here must not be zero or be higher # than the corresponding limit. + serviceAnnotations: + type: object + additionalProperties: + type: string sidecars: type: array nullable: true diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index bc33f11f6..4cfc9a9e6 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -383,6 +383,14 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, }, }, + "serviceAnnotations": { + Type: "object", + AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ + Schema: &apiextv1beta1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "sidecars": { Type: "array", Items: &apiextv1beta1.JSONSchemaPropsOrArray{ diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index 515a73ff0..07b42d4d4 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -60,6 +60,7 @@ type PostgresSpec struct { LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"` StandbyCluster *StandbyDescription `json:"standby"` PodAnnotations map[string]string `json:"podAnnotations"` + ServiceAnnotations map[string]string `json:"serviceAnnotations"` // deprecated json tags InitContainersOld []v1.Container `json:"init_containers,omitempty"` diff --git a/pkg/apis/acid.zalan.do/v1/util_test.go b/pkg/apis/acid.zalan.do/v1/util_test.go index a1e01825f..28e9e8ca4 100644 --- a/pkg/apis/acid.zalan.do/v1/util_test.go +++ b/pkg/apis/acid.zalan.do/v1/util_test.go @@ -456,18 +456,84 @@ var postgresqlList = []struct { PostgresqlList{}, errors.New("unexpected end of JSON input")}} -var annotations = []struct { +var podAnnotations = []struct { about string in []byte annotations map[string]string err error }{{ - about: "common annotations", - in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"podAnnotations": {"foo": "bar"},"teamId": "acid", "clone": {"cluster": "team-batman"}}}`), + about: "common annotations", + in: []byte(`{ + "kind": "Postgresql", + "apiVersion": "acid.zalan.do/v1", + "metadata": { + "name": "acid-testcluster1" + }, + "spec": { + "podAnnotations": { + "foo": "bar" + }, + "teamId": "acid", + "clone": { + "cluster": "team-batman" + } + } + }`), annotations: map[string]string{"foo": "bar"}, err: nil}, } +var serviceAnnotations = []struct { + about string + in []byte + annotations map[string]string + err error +}{ + { + about: "common single annotation", + in: []byte(`{ + "kind": "Postgresql", + "apiVersion": "acid.zalan.do/v1", + "metadata": { + "name": "acid-testcluster1" + }, + "spec": { + "serviceAnnotations": { + "foo": "bar" + }, + "teamId": "acid", + "clone": { + "cluster": "team-batman" + } + } + }`), + annotations: map[string]string{"foo": "bar"}, + err: nil, + }, + { + about: "common two annotations", + in: []byte(`{ + "kind": "Postgresql", + "apiVersion": "acid.zalan.do/v1", + "metadata": { + "name": "acid-testcluster1" + }, + "spec": { + "serviceAnnotations": { + "foo": "bar", + "post": "gres" + }, + "teamId": "acid", + "clone": { + "cluster": "team-batman" + } + } + }`), + annotations: map[string]string{"foo": "bar", "post": "gres"}, + err: nil, + }, +} + func mustParseTime(s string) metav1.Time { v, err := time.Parse("15:04", s) if err != nil { @@ -517,21 +583,42 @@ func TestWeekdayTime(t *testing.T) { } } -func TestClusterAnnotations(t *testing.T) { - for _, tt := range annotations { +func TestPodAnnotations(t *testing.T) { + for _, tt := range podAnnotations { t.Run(tt.about, func(t *testing.T) { var cluster Postgresql err := cluster.UnmarshalJSON(tt.in) if err != nil { if tt.err == nil || err.Error() != tt.err.Error() { - t.Errorf("Unable to marshal cluster with annotations: expected %v got %v", tt.err, err) + t.Errorf("Unable to marshal cluster with podAnnotations: expected %v got %v", tt.err, err) } return } for k, v := range cluster.Spec.PodAnnotations { found, expected := v, tt.annotations[k] if found != expected { - t.Errorf("Didn't find correct value for key %v in for podAnnotations: Expected %v found %v", k, expected, found) + t.Errorf("Didn't find correct value for key %v in for podAnnotations: Expected %v found %v", k, expected, found) + } + } + }) + } +} + +func TestServiceAnnotations(t *testing.T) { + for _, tt := range serviceAnnotations { + t.Run(tt.about, func(t *testing.T) { + var cluster Postgresql + err := cluster.UnmarshalJSON(tt.in) + if err != nil { + if tt.err == nil || err.Error() != tt.err.Error() { + t.Errorf("Unable to marshal cluster with serviceAnnotations: expected %v got %v", tt.err, err) + } + return + } + for k, v := range cluster.Spec.ServiceAnnotations { + found, expected := v, tt.annotations[k] + if found != expected { + t.Errorf("Didn't find correct value for key %v in for serviceAnnotations: Expected %v found %v", k, expected, found) } } }) diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index dc07aa2cf..aaae1f04b 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -514,6 +514,13 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { (*out)[key] = val } } + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.InitContainersOld != nil { in, out := &in.InitContainersOld, &out.InitContainersOld *out = make([]corev1.Container, len(*in)) diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 85d014d8a..9efbc51c6 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -355,6 +355,12 @@ func TestPodAnnotations(t *testing.T) { database: map[string]string{"foo": "bar"}, merged: map[string]string{"foo": "bar"}, }, + { + subTest: "Both Annotations", + operator: map[string]string{"foo": "bar"}, + database: map[string]string{"post": "gres"}, + merged: map[string]string{"foo": "bar", "post": "gres"}, + }, { subTest: "Database Config overrides Operator Config Annotations", operator: map[string]string{"foo": "bar", "global": "foo"}, @@ -382,3 +388,319 @@ func TestPodAnnotations(t *testing.T) { } } } + +func TestServiceAnnotations(t *testing.T) { + enabled := true + disabled := false + tests := []struct { + about string + role PostgresRole + enableMasterLoadBalancerSpec *bool + enableMasterLoadBalancerOC bool + enableReplicaLoadBalancerSpec *bool + enableReplicaLoadBalancerOC bool + operatorAnnotations map[string]string + clusterAnnotations map[string]string + expect map[string]string + }{ + //MASTER + { + about: "Master with no annotations and EnableMasterLoadBalancer disabled on spec and OperatorConfig", + role: "master", + enableMasterLoadBalancerSpec: &disabled, + enableMasterLoadBalancerOC: false, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: make(map[string]string), + }, + { + about: "Master with no annotations and EnableMasterLoadBalancer enabled on spec", + role: "master", + enableMasterLoadBalancerSpec: &enabled, + enableMasterLoadBalancerOC: false, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + { + about: "Master with no annotations and EnableMasterLoadBalancer enabled only on operator config", + role: "master", + enableMasterLoadBalancerSpec: &disabled, + enableMasterLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: make(map[string]string), + }, + { + about: "Master with no annotations and EnableMasterLoadBalancer defined only on operator config", + role: "master", + enableMasterLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + { + about: "Master with cluster annotations and load balancer enabled", + role: "master", + enableMasterLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{"foo": "bar"}, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + "foo": "bar", + }, + }, + { + about: "Master with cluster annotations and load balancer disabled", + role: "master", + enableMasterLoadBalancerSpec: &disabled, + enableMasterLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{"foo": "bar"}, + expect: map[string]string{"foo": "bar"}, + }, + { + about: "Master with operator annotations and load balancer enabled", + role: "master", + enableMasterLoadBalancerOC: true, + operatorAnnotations: map[string]string{"foo": "bar"}, + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + "foo": "bar", + }, + }, + { + about: "Master with operator annotations override default annotations", + role: "master", + enableMasterLoadBalancerOC: true, + operatorAnnotations: map[string]string{ + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + }, + { + about: "Master with cluster annotations override default annotations", + role: "master", + enableMasterLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{ + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + }, + { + about: "Master with cluster annotations do not override external-dns annotations", + role: "master", + enableMasterLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com", + }, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + { + about: "Master with operator annotations do not override external-dns annotations", + role: "master", + enableMasterLoadBalancerOC: true, + clusterAnnotations: make(map[string]string), + operatorAnnotations: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com", + }, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + // REPLICA + { + about: "Replica with no annotations and EnableReplicaLoadBalancer disabled on spec and OperatorConfig", + role: "replica", + enableReplicaLoadBalancerSpec: &disabled, + enableReplicaLoadBalancerOC: false, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: make(map[string]string), + }, + { + about: "Replica with no annotations and EnableReplicaLoadBalancer enabled on spec", + role: "replica", + enableReplicaLoadBalancerSpec: &enabled, + enableReplicaLoadBalancerOC: false, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + { + about: "Replica with no annotations and EnableReplicaLoadBalancer enabled only on operator config", + role: "replica", + enableReplicaLoadBalancerSpec: &disabled, + enableReplicaLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: make(map[string]string), + }, + { + about: "Replica with no annotations and EnableReplicaLoadBalancer defined only on operator config", + role: "replica", + enableReplicaLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + { + about: "Replica with cluster annotations and load balancer enabled", + role: "replica", + enableReplicaLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{"foo": "bar"}, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + "foo": "bar", + }, + }, + { + about: "Replica with cluster annotations and load balancer disabled", + role: "replica", + enableReplicaLoadBalancerSpec: &disabled, + enableReplicaLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{"foo": "bar"}, + expect: map[string]string{"foo": "bar"}, + }, + { + about: "Replica with operator annotations and load balancer enabled", + role: "replica", + enableReplicaLoadBalancerOC: true, + operatorAnnotations: map[string]string{"foo": "bar"}, + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + "foo": "bar", + }, + }, + { + about: "Replica with operator annotations override default annotations", + role: "replica", + enableReplicaLoadBalancerOC: true, + operatorAnnotations: map[string]string{ + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + clusterAnnotations: make(map[string]string), + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + }, + { + about: "Replica with cluster annotations override default annotations", + role: "replica", + enableReplicaLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{ + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800", + }, + }, + { + about: "Replica with cluster annotations do not override external-dns annotations", + role: "replica", + enableReplicaLoadBalancerOC: true, + operatorAnnotations: make(map[string]string), + clusterAnnotations: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com", + }, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + { + about: "Replica with operator annotations do not override external-dns annotations", + role: "replica", + enableReplicaLoadBalancerOC: true, + clusterAnnotations: make(map[string]string), + operatorAnnotations: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com", + }, + expect: map[string]string{ + "external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com", + "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600", + }, + }, + // COMMON + { + about: "cluster annotations append to operator annotations", + role: "replica", + enableReplicaLoadBalancerOC: false, + operatorAnnotations: map[string]string{"foo": "bar"}, + clusterAnnotations: map[string]string{"post": "gres"}, + expect: map[string]string{"foo": "bar", "post": "gres"}, + }, + { + about: "cluster annotations override operator annotations", + role: "replica", + enableReplicaLoadBalancerOC: false, + operatorAnnotations: map[string]string{"foo": "bar", "post": "gres"}, + clusterAnnotations: map[string]string{"post": "greSQL"}, + expect: map[string]string{"foo": "bar", "post": "greSQL"}, + }, + } + + for _, tt := range tests { + t.Run(tt.about, func(t *testing.T) { + cl.OpConfig.CustomServiceAnnotations = tt.operatorAnnotations + cl.OpConfig.EnableMasterLoadBalancer = tt.enableMasterLoadBalancerOC + cl.OpConfig.EnableReplicaLoadBalancer = tt.enableReplicaLoadBalancerOC + cl.OpConfig.MasterDNSNameFormat = "{cluster}.{team}.{hostedzone}" + cl.OpConfig.ReplicaDNSNameFormat = "{cluster}-repl.{team}.{hostedzone}" + cl.OpConfig.DbHostedZone = "db.example.com" + + cl.Postgresql.Spec.ClusterName = "test" + cl.Postgresql.Spec.TeamID = "acid" + cl.Postgresql.Spec.ServiceAnnotations = tt.clusterAnnotations + cl.Postgresql.Spec.EnableMasterLoadBalancer = tt.enableMasterLoadBalancerSpec + cl.Postgresql.Spec.EnableReplicaLoadBalancer = tt.enableReplicaLoadBalancerSpec + + got := cl.generateServiceAnnotations(tt.role, &cl.Postgresql.Spec) + if len(tt.expect) != len(got) { + t.Errorf("expected %d annotation(s), got %d", len(tt.expect), len(got)) + return + } + for k, v := range got { + if tt.expect[k] != v { + t.Errorf("expected annotation '%v' with value '%v', got value '%v'", k, tt.expect[k], v) + } + } + }) + } +} diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 4c2f50296..e6561e0f3 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1230,14 +1230,6 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *ac } func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) *v1.Service { - var dnsName string - - if role == Master { - dnsName = c.masterDNSName() - } else { - dnsName = c.replicaDNSName() - } - serviceSpec := v1.ServiceSpec{ Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}}, Type: v1.ServiceTypeClusterIP, @@ -1247,8 +1239,6 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) serviceSpec.Selector = c.roleLabelsSet(false, role) } - var annotations map[string]string - if c.shouldCreateLoadBalancerForService(role, spec) { // spec.AllowedSourceRanges evaluates to the empty slice of zero length @@ -1262,18 +1252,6 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges) serviceSpec.Type = v1.ServiceTypeLoadBalancer - - annotations = map[string]string{ - constants.ZalandoDNSNameAnnotation: dnsName, - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - } - - if len(c.OpConfig.CustomServiceAnnotations) != 0 { - c.logger.Debugf("There are custom annotations defined, creating them.") - for customAnnotationKey, customAnnotationValue := range c.OpConfig.CustomServiceAnnotations { - annotations[customAnnotationKey] = customAnnotationValue - } - } } else if role == Replica { // before PR #258, the replica service was only created if allocated a LB // now we always create the service but warn if the LB is absent @@ -1285,7 +1263,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) Name: c.serviceName(role), Namespace: c.Namespace, Labels: c.roleLabelsSet(true, role), - Annotations: annotations, + Annotations: c.generateServiceAnnotations(role, spec), }, Spec: serviceSpec, } @@ -1293,6 +1271,42 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) return service } +func (c *Cluster) generateServiceAnnotations(role PostgresRole, spec *acidv1.PostgresSpec) map[string]string { + annotations := make(map[string]string) + + for k, v := range c.OpConfig.CustomServiceAnnotations { + annotations[k] = v + } + if spec != nil || spec.ServiceAnnotations != nil { + for k, v := range spec.ServiceAnnotations { + annotations[k] = v + } + } + + if c.shouldCreateLoadBalancerForService(role, spec) { + var dnsName string + if role == Master { + dnsName = c.masterDNSName() + } else { + dnsName = c.replicaDNSName() + } + + // Just set ELB Timeout annotation with default value, if it does not + // have a cutom value + if _, ok := annotations[constants.ElbTimeoutAnnotationName]; !ok { + annotations[constants.ElbTimeoutAnnotationName] = constants.ElbTimeoutAnnotationValue + } + // External DNS name annotation is not customizable + annotations[constants.ZalandoDNSNameAnnotation] = dnsName + } + + if len(annotations) == 0 { + return nil + } + + return annotations +} + func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubset) *v1.Endpoints { endpoints := &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ From be6c8cd5737f21a03ed7d671ddd31bc1e9abbef9 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Mon, 10 Feb 2020 16:41:51 +0100 Subject: [PATCH 3/6] specify cluster in e2e taint test (#823) --- e2e/tests/test_e2e.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 5f34dcb16..e3f00da9c 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -68,8 +68,8 @@ class EndToEndTestCase(unittest.TestCase): _, failover_targets = k8s.get_pg_nodes(cluster_label) # configure minimum boundaries for CPU and memory limits - minCPULimit = '250m' - minMemoryLimit = '250Mi' + minCPULimit = '500m' + minMemoryLimit = '500Mi' patch_min_resource_limits = { "data": { "min_cpu_limit": minCPULimit, @@ -176,7 +176,7 @@ class EndToEndTestCase(unittest.TestCase): # patch node and test if master is failing over to one of the expected nodes k8s.api.core_v1.patch_node(current_master_node, body) k8s.wait_for_master_failover(failover_targets) - k8s.wait_for_pod_start('spilo-role=replica') + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) new_master_node, new_replica_nodes = k8s.get_pg_nodes(cluster_label) self.assertNotEqual(current_master_node, new_master_node, From 00f00af2e84786eb4a87a35f2b987f5f946af3c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fredrik=20=C3=98strem?= Date: Tue, 11 Feb 2020 17:16:38 +0100 Subject: [PATCH 4/6] Fix MasterPodMoveTimeout field that cannot be unmarshalled (#816) * Update operator_configuration_type.go * Update operator_config.go --- pkg/apis/acid.zalan.do/v1/operator_configuration_type.go | 2 +- pkg/controller/operator_config.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index 35c51e08d..ded5261fb 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -67,7 +67,7 @@ type KubernetesMetaConfiguration struct { // TODO: use namespacedname PodEnvironmentConfigMap string `json:"pod_environment_configmap,omitempty"` PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` - MasterPodMoveTimeout time.Duration `json:"master_pod_move_timeout,omitempty"` + MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"` EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"` PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"` PodManagementPolicy string `json:"pod_management_policy,omitempty"` diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 98b56a298..c6f10faa0 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -66,7 +66,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName result.PodManagementPolicy = fromCRD.Kubernetes.PodManagementPolicy - result.MasterPodMoveTimeout = fromCRD.Kubernetes.MasterPodMoveTimeout + result.MasterPodMoveTimeout = time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout) result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity result.PodAntiAffinityTopologyKey = fromCRD.Kubernetes.PodAntiAffinityTopologyKey From 744c71d16bd93b2d94b74fabc5e1f37a1eaa58c2 Mon Sep 17 00:00:00 2001 From: Jonathan Juares Beber Date: Thu, 13 Feb 2020 10:55:30 +0100 Subject: [PATCH 5/6] Allow services update when changing annotations (#818) The current implementations for `pkg.util.k8sutil.SameService` considers only service annotations change on the default annotations created by the operator. Custom annotations are not compared and consequently not applied after the first service creation. This commit introduces a complete annotations comparison between the current service created by the operator and the new one generated based on the configs. Also, it adds tests on the above-mentioned function. --- e2e/tests/test_e2e.py | 11 +- ...res-manifest-with-service-annotations.yaml | 20 -- pkg/util/k8sutil/k8sutil.go | 41 ++- pkg/util/k8sutil/k8sutil_test.go | 310 ++++++++++++++++++ 4 files changed, 348 insertions(+), 34 deletions(-) delete mode 100644 manifests/postgres-manifest-with-service-annotations.yaml create mode 100644 pkg/util/k8sutil/k8sutil_test.go diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index e3f00da9c..e92aba11f 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -270,7 +270,16 @@ class EndToEndTestCase(unittest.TestCase): } k8s.update_config(patch_custom_service_annotations) - k8s.create_with_kubectl("manifests/postgres-manifest-with-service-annotations.yaml") + pg_patch_custom_annotations = { + "spec": { + "serviceAnnotations": { + "annotation.key": "value" + } + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations) + annotations = { "annotation.key": "value", "foo": "bar", diff --git a/manifests/postgres-manifest-with-service-annotations.yaml b/manifests/postgres-manifest-with-service-annotations.yaml deleted file mode 100644 index ab3096740..000000000 --- a/manifests/postgres-manifest-with-service-annotations.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: "acid.zalan.do/v1" -kind: postgresql -metadata: - name: acid-service-annotations -spec: - teamId: "acid" - volume: - size: 1Gi - numberOfInstances: 2 - users: - zalando: # database owner - - superuser - - createdb - foo_user: [] # role for application foo - databases: - foo: zalando # dbname: owner - postgresql: - version: "11" - serviceAnnotations: - annotation.key: value diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index 118d1df53..c7b2366b0 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -9,7 +9,6 @@ import ( batchv1beta1 "k8s.io/api/batch/v1beta1" clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" - "github.com/zalando/postgres-operator/pkg/util/constants" v1 "k8s.io/api/core/v1" policybeta1 "k8s.io/api/policy/v1beta1" apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -136,21 +135,37 @@ func SameService(cur, new *v1.Service) (match bool, reason string) { } } - oldDNSAnnotation := cur.Annotations[constants.ZalandoDNSNameAnnotation] - newDNSAnnotation := new.Annotations[constants.ZalandoDNSNameAnnotation] - oldELBAnnotation := cur.Annotations[constants.ElbTimeoutAnnotationName] - newELBAnnotation := new.Annotations[constants.ElbTimeoutAnnotationName] + match = true - if oldDNSAnnotation != newDNSAnnotation { - return false, fmt.Sprintf("new service's %q annotation value %q doesn't match the current one %q", - constants.ZalandoDNSNameAnnotation, newDNSAnnotation, oldDNSAnnotation) - } - if oldELBAnnotation != newELBAnnotation { - return false, fmt.Sprintf("new service's %q annotation value %q doesn't match the current one %q", - constants.ElbTimeoutAnnotationName, oldELBAnnotation, newELBAnnotation) + reasonPrefix := "new service's annotations doesn't match the current one:" + for ann := range cur.Annotations { + if _, ok := new.Annotations[ann]; !ok { + match = false + if len(reason) == 0 { + reason = reasonPrefix + } + reason += fmt.Sprintf(" Removed '%s'.", ann) + } } - return true, "" + for ann := range new.Annotations { + v, ok := cur.Annotations[ann] + if !ok { + if len(reason) == 0 { + reason = reasonPrefix + } + reason += fmt.Sprintf(" Added '%s' with value '%s'.", ann, new.Annotations[ann]) + match = false + } else if v != new.Annotations[ann] { + if len(reason) == 0 { + reason = reasonPrefix + } + reason += fmt.Sprintf(" '%s' changed from '%s' to '%s'.", ann, v, new.Annotations[ann]) + match = false + } + } + + return match, reason } // SamePDB compares the PodDisruptionBudgets diff --git a/pkg/util/k8sutil/k8sutil_test.go b/pkg/util/k8sutil/k8sutil_test.go new file mode 100644 index 000000000..12288243e --- /dev/null +++ b/pkg/util/k8sutil/k8sutil_test.go @@ -0,0 +1,310 @@ +package k8sutil + +import ( + "strings" + "testing" + + "github.com/zalando/postgres-operator/pkg/util/constants" + + v1 "k8s.io/api/core/v1" +) + +func newsService(ann map[string]string, svcT v1.ServiceType, lbSr []string) *v1.Service { + svc := &v1.Service{ + Spec: v1.ServiceSpec{ + Type: svcT, + LoadBalancerSourceRanges: lbSr, + }, + } + svc.Annotations = ann + return svc +} + +func TestServiceAnnotations(t *testing.T) { + tests := []struct { + about string + current *v1.Service + new *v1.Service + reason string + match bool + }{ + { + about: "two equal services", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeClusterIP, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeClusterIP, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: true, + }, + { + about: "services differ on service type", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeClusterIP, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's type "LoadBalancer" doesn't match the current one "ClusterIP"`, + }, + { + about: "services differ on lb source ranges", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"185.249.56.0/22"}), + match: false, + reason: `new service's LoadBalancerSourceRange doesn't match the current one`, + }, + { + about: "new service doesn't have lb source ranges", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{}), + match: false, + reason: `new service's LoadBalancerSourceRange doesn't match the current one`, + }, + { + about: "services differ on DNS annotation", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "new_clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's annotations doesn't match the current one: 'external-dns.alpha.kubernetes.io/hostname' changed from 'clstr.acid.zalan.do' to 'new_clstr.acid.zalan.do'.`, + }, + { + about: "services differ on AWS ELB annotation", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: "1800", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's annotations doesn't match the current one: 'service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout' changed from '3600' to '1800'.`, + }, + { + about: "service changes existing annotation", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "bar", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "baz", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's annotations doesn't match the current one: 'foo' changed from 'bar' to 'baz'.`, + }, + { + about: "service changes multiple existing annotations", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "bar", + "bar": "foo", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "baz", + "bar": "fooz", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + // Test just the prefix to avoid flakiness and map sorting + reason: `new service's annotations doesn't match the current one:`, + }, + { + about: "service adds a new custom annotation", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "bar", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's annotations doesn't match the current one: Added 'foo' with value 'bar'.`, + }, + { + about: "service removes a custom annotation", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "bar", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's annotations doesn't match the current one: Removed 'foo'.`, + }, + { + about: "service removes a custom annotation and adds a new one", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "bar", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "bar": "foo", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's annotations doesn't match the current one: Removed 'foo'. Added 'bar' with value 'foo'.`, + }, + { + about: "service removes a custom annotation, adds a new one and change another", + current: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "foo": "bar", + "zalan": "do", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + "bar": "foo", + "zalan": "do.com", + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + reason: `new service's annotations doesn't match the current one: Removed 'foo'. Added 'bar' with value 'foo'. 'zalan' changed from 'do' to 'do.com'`, + }, + { + about: "service add annotations", + current: newsService( + map[string]string{}, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + new: newsService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeLoadBalancer, + []string{"128.141.0.0/16", "137.138.0.0/16"}), + match: false, + // Test just the prefix to avoid flakiness and map sorting + reason: `new service's annotations doesn't match the current one: Added `, + }, + } + for _, tt := range tests { + t.Run(tt.about, func(t *testing.T) { + match, reason := SameService(tt.current, tt.new) + if match && !tt.match { + t.Errorf("expected services to do not match: '%q' and '%q'", tt.current, tt.new) + return + } + if !match && tt.match { + t.Errorf("expected services to be the same: '%q' and '%q'", tt.current, tt.new) + return + } + if !match && !tt.match { + if !strings.HasPrefix(reason, tt.reason) { + t.Errorf("expected reason '%s', found '%s'", tt.reason, reason) + return + } + } + }) + } +} From 3b10dc645dd3f4112df7d15042f335b86c01b3fc Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Thu, 13 Feb 2020 16:24:15 +0100 Subject: [PATCH 6/6] patch/update services on type change (#824) * use Update when disabling LoadBalancer + added e2e test --- e2e/tests/test_e2e.py | 56 ++++++++++ manifests/operator-service-account-rbac.yaml | 1 + pkg/cluster/resources.go | 103 +++++-------------- pkg/cluster/sync.go | 2 +- 4 files changed, 86 insertions(+), 76 deletions(-) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index e92aba11f..2d81a0647 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -58,6 +58,55 @@ class EndToEndTestCase(unittest.TestCase): k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml") k8s.wait_for_pod_start('spilo-role=master') + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_enable_load_balancer(self): + ''' + Test if services are updated when enabling/disabling load balancers + ''' + + k8s = self.k8s + cluster_label = 'version=acid-minimal-cluster' + + # enable load balancer services + pg_patch_enable_lbs = { + "spec": { + "enableMasterLoadBalancer": True, + "enableReplicaLoadBalancer": True + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs) + # wait for service recreation + time.sleep(60) + + master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master') + self.assertEqual(master_svc_type, 'LoadBalancer', + "Expected LoadBalancer service type for master, found {}".format(master_svc_type)) + + repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica') + self.assertEqual(repl_svc_type, 'LoadBalancer', + "Expected LoadBalancer service type for replica, found {}".format(repl_svc_type)) + + # disable load balancer services again + pg_patch_disable_lbs = { + "spec": { + "enableMasterLoadBalancer": False, + "enableReplicaLoadBalancer": False + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs) + # wait for service recreation + time.sleep(60) + + master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master') + self.assertEqual(master_svc_type, 'ClusterIP', + "Expected ClusterIP service type for master, found {}".format(master_svc_type)) + + repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica') + self.assertEqual(repl_svc_type, 'ClusterIP', + "Expected ClusterIP service type for replica, found {}".format(repl_svc_type)) + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_min_resource_limits(self): ''' @@ -362,6 +411,13 @@ class K8s: pod_phase = pods[0].status.phase time.sleep(self.RETRY_TIMEOUT_SEC) + def get_service_type(self, svc_labels, namespace='default'): + svc_type = '' + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + svc_type = svc.spec.type + return svc_type + def check_service_annotations(self, svc_labels, annotations, namespace='default'): svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items for svc in svcs: diff --git a/manifests/operator-service-account-rbac.yaml b/manifests/operator-service-account-rbac.yaml index a37abe476..4761c145e 100644 --- a/manifests/operator-service-account-rbac.yaml +++ b/manifests/operator-service-account-rbac.yaml @@ -114,6 +114,7 @@ rules: - delete - get - patch + - update # to CRUD the StatefulSet which controls the Postgres cluster instances - apiGroups: - apps diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index c94a7bb46..d6c2149bf 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -366,6 +366,11 @@ func (c *Cluster) createService(role PostgresRole) (*v1.Service, error) { } func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error { + var ( + svc *v1.Service + err error + ) + c.setProcessName("updating %v service", role) if c.Services[role] == nil { @@ -373,70 +378,6 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error } serviceName := util.NameFromMeta(c.Services[role].ObjectMeta) - endpointName := util.NameFromMeta(c.Endpoints[role].ObjectMeta) - // TODO: check if it possible to change the service type with a patch in future versions of Kubernetes - if newService.Spec.Type != c.Services[role].Spec.Type { - // service type has changed, need to replace the service completely. - // we cannot use just patch the current service, since it may contain attributes incompatible with the new type. - var ( - currentEndpoint *v1.Endpoints - err error - ) - - if role == Master { - // for the master service we need to re-create the endpoint as well. Get the up-to-date version of - // the addresses stored in it before the service is deleted (deletion of the service removes the endpoint) - currentEndpoint, err = c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("could not get current cluster %s endpoints: %v", role, err) - } - } - err = c.KubeClient.Services(serviceName.Namespace).Delete(serviceName.Name, c.deleteOptions) - if err != nil { - return fmt.Errorf("could not delete service %q: %v", serviceName, err) - } - - // wait until the service is truly deleted - c.logger.Debugf("waiting for service to be deleted") - - err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, - func() (bool, error) { - _, err2 := c.KubeClient.Services(serviceName.Namespace).Get(serviceName.Name, metav1.GetOptions{}) - if err2 == nil { - return false, nil - } - if k8sutil.ResourceNotFound(err2) { - return true, nil - } - return false, err2 - }) - if err != nil { - return fmt.Errorf("could not delete service %q: %v", serviceName, err) - } - - // make sure we clear the stored service and endpoint status if the subsequent create fails. - c.Services[role] = nil - c.Endpoints[role] = nil - if role == Master { - // create the new endpoint using the addresses obtained from the previous one - endpointSpec := c.generateEndpoint(role, currentEndpoint.Subsets) - ep, err := c.KubeClient.Endpoints(endpointSpec.Namespace).Create(endpointSpec) - if err != nil { - return fmt.Errorf("could not create endpoint %q: %v", endpointName, err) - } - - c.Endpoints[role] = ep - } - - svc, err := c.KubeClient.Services(serviceName.Namespace).Create(newService) - if err != nil { - return fmt.Errorf("could not create service %q: %v", serviceName, err) - } - - c.Services[role] = svc - - return nil - } // update the service annotation in order to propagate ELB notation. if len(newService.ObjectMeta.Annotations) > 0 { @@ -454,18 +395,30 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error } } - patchData, err := specPatch(newService.Spec) - if err != nil { - return fmt.Errorf("could not form patch for the service %q: %v", serviceName, err) - } + // now, patch the service spec, but when disabling LoadBalancers do update instead + // patch does not work because of LoadBalancerSourceRanges field (even if set to nil) + oldServiceType := c.Services[role].Spec.Type + newServiceType := newService.Spec.Type + if newServiceType == "ClusterIP" && newServiceType != oldServiceType { + newService.ResourceVersion = c.Services[role].ResourceVersion + newService.Spec.ClusterIP = c.Services[role].Spec.ClusterIP + svc, err = c.KubeClient.Services(serviceName.Namespace).Update(newService) + if err != nil { + return fmt.Errorf("could not update service %q: %v", serviceName, err) + } + } else { + patchData, err := specPatch(newService.Spec) + if err != nil { + return fmt.Errorf("could not form patch for the service %q: %v", serviceName, err) + } - // update the service spec - svc, err := c.KubeClient.Services(serviceName.Namespace).Patch( - serviceName.Name, - types.MergePatchType, - patchData, "") - if err != nil { - return fmt.Errorf("could not patch service %q: %v", serviceName, err) + svc, err = c.KubeClient.Services(serviceName.Namespace).Patch( + serviceName.Name, + types.MergePatchType, + patchData, "") + if err != nil { + return fmt.Errorf("could not patch service %q: %v", serviceName, err) + } } c.Services[role] = svc diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index fa4fc9ec1..053db9ff7 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -116,7 +116,7 @@ func (c *Cluster) syncServices() error { c.logger.Debugf("syncing %s service", role) if err := c.syncEndpoint(role); err != nil { - return fmt.Errorf("could not sync %s endpont: %v", role, err) + return fmt.Errorf("could not sync %s endpoint: %v", role, err) } if err := c.syncService(role); err != nil {