From 2bb3bdeeb44611c099a68fc4cccdde43271ecfc6 Mon Sep 17 00:00:00 2001 From: Jan M Date: Fri, 4 May 2018 12:20:54 +0200 Subject: [PATCH 1/4] Slimming out README and config map, targeting easy first time deployers to minicube. --- README.md | 32 ++++++++--------- manifests/configmap.yaml | 59 ++++++++++++++++---------------- manifests/postgres-operator.yaml | 10 ++---- 3 files changed, 47 insertions(+), 54 deletions(-) diff --git a/README.md b/README.md index a76905303..3fe70fa82 100644 --- a/README.md +++ b/README.md @@ -6,25 +6,25 @@ The Postgres operator manages PostgreSQL clusters on Kubernetes using the [operator pattern](https://coreos.com/blog/introducing-operators.html). During the initial run it registers the [Custom Resource Definition (CRD)](https://kubernetes.io/docs/concepts/api-extension/custom-resources/#customresourcedefinitions) for Postgres. -The PostgreSQL CRD is essentially the schema that describes the contents of the manifests for deploying individual -PostgreSQL clusters using StatefulSets and [Patroni](https://github.com/zalando/patroni). +The `postgresql` CRD is essentially the schema that describes the contents of the manifests for deploying individual +Postgres clusters using StatefulSets and [Patroni](https://github.com/zalando/patroni). Once the operator is running, it performs the following actions: -* watches for new PostgreSQL cluster manifests and deploys corresponding clusters +* watches for new `postgresql` manifests and deploys new clusters * watches for updates to existing manifests and changes corresponding properties of the running clusters * watches for deletes of the existing manifests and deletes corresponding clusters -* acts on an update to the operator definition itself and changes the running clusters when necessary - (i.e. when the docker image inside the operator definition has been updated) -* periodically checks running clusters against the manifests and acts on the differences found +* acts on an update to the operator configuration itself and changes the running clusters when necessary + (i.e. the Docker image changes for a minor release update) +* periodically checks running clusters against the manifests and syncs changes -For instance, when the user creates a new custom object of type ``postgresql`` by submitting a new manifest with -``kubectl``, the operator fetches that object and creates the corresponding Kubernetes structures -(StatefulSets, Services, Secrets) according to its definition. +Example: When a user creates a new custom object of type ``postgresql`` by submitting a new manifest with +``kubectl``, the operator fetches that object and creates the required Kubernetes entities to spawn a new Postgres cluster +(StatefulSets, Services, Secrets). -Another example is changing the docker image inside the operator. In this case, the operator first goes to all StatefulSets -it manages and updates them with the new docker images; afterwards, all pods from each StatefulSet are killed one by one -(rolling upgrade) and the replacements are spawned automatically by each StatefulSet with the new docker image. +Update example: After changing the Docker image inside the operator's configuration, the operator first goes to all StatefulSets +it manages and updates them with the new Docker image; afterwards, all pods from each StatefulSet are killed one by one +and the replacements are spawned automatically by each StatefulSet with the new Docker image. This is called the rolling upgrade. ## Scope @@ -147,9 +147,9 @@ We can use the generated secret of the `postgres` robot user to connect to our ` The `manifests/operator-rbac.yaml` defines cluster roles and bindings needed for the operator to function under access control restrictions. To deploy the operator with this RBAC policy use: ```bash -kubectl create -f manifests/configmap.yaml +kubectl create -f manifests/configmap.yaml kubectl create -f manifests/operator-rbac.yaml -kubectl create -f manifests/postgres-operator.yaml +kubectl create -f manifests/postgres-operator.yaml kubectl create -f manifests/minimal-postgres-manifest.yaml ``` @@ -158,7 +158,7 @@ the `operator` default that is created in the `serviceaccount.yaml`. So you will This is done intentionally, as to avoid breaking those setups that already work with the default `operator` account. In the future the operator should ideally be run under the -`zalando-postgres-operator` service account. +`zalando-postgres-operator` service account. The service account defined in `operator-rbac.yaml` acquires some privileges not really used by the operator (i.e. we only need list and watch on configmaps), @@ -274,7 +274,7 @@ As a preventive measure, one can restrict the minimum and the maximum number of If either `min_instances` or `max_instances` is set to a non-zero value, the operator may adjust the number of instances specified in the cluster manifest to match either the min or the max boundary. For instance, of a cluster manifest has 1 instance and the min_instances is set to 3, the cluster will be created with 3 instances. By default, both parameters are set to -1. -### Load balancers +### Load balancers For any Postgresql/Spilo cluster an operator creates two separate k8s services: one for the master pod and one for replica pods. To expose these services to an outer network, one can attach load balancers to them by setting `enableMasterLoadBalancer` and/or `enableReplicaLoadBalancer` to `true` in the cluster manifest. In the case any of these variables is omitted from the manifest, the operator configmap's settings `enable_master_load_balancer` and `enable_replica_load_balancer` apply. Note that the operator settings affect all Postgresql services running in a namespace watched by the operator. diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index f3b79e67b..a6ebcf4f3 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -2,26 +2,43 @@ apiVersion: v1 kind: ConfigMap metadata: name: postgres-operator -data: - # the env var with the same name in the operator pod may overwrite this value - # if neither is set or evaluates to the empty string, listen to the operator's own namespace +data: # if set to the "*", listen to all namespaces # watched_namespace: development cluster_labels: application:spilo cluster_name_label: version pod_role_label: spilo-role - db_hosted_zone: db.example.com + debug_logging: "true" - master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}' - replica_dns_name_format: '{cluster}-repl.{team}.staging.{hostedzone}' + workers: "4" docker_image: registry.opensource.zalan.do/acid/demospilo-10:1.3-p3 secret_name_template: '{username}.{cluster}.credentials' - etcd_host: "" - infrastructure_roles_secret_name: postgresql-infrastructure-roles - oauth_token_secret_name: postgresql-operator - pam_configuration: | - https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees - pam_role_name: zalandos + # etcd_host: "" + super_username: postgres + enable_teams_api: "false" + # enable_team_superuser: "false" + # team_admin_role: "admin" + # teams_api_url: http://fake-teams-api.default.svc.cluster.local + # team_api_role_configuration: "log_statement:all" + # infrastructure_roles_secret_name: postgresql-infrastructure-roles + # oauth_token_secret_name: postgresql-operator + # pam_role_name: zalandos + # pam_configuration: | + # https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees + db_hosted_zone: db.example.com + master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}' + replica_dns_name_format: '{cluster}-repl.{team}.staging.{hostedzone}' + enable_master_load_balancer: "false" + enable_replica_load_balancer: "false" + + pdb_name_format: "postgres-{cluster}-pdb" + node_eol_label: "lifecycle-status:pending-decommission" + node_readiness_label: "" + + api_port: "8080" + ring_log_lines: "100" + cluster_history_entries: "1000" + pod_terminate_grace_period: 5m pod_deletion_wait_timeout: 10m pod_label_wait_timeout: 10m ready_wait_interval: 3s @@ -30,21 +47,3 @@ data: resource_check_interval: 3s resource_check_timeout: 10m resync_period: 5m - super_username: postgres - enable_teams_api: "false" - enable_team_superuser: "false" - team_admin_role: "admin" - teams_api_url: http://fake-teams-api.default.svc.cluster.local - workers: "4" - # turn on/off load balancers for all Postgres clusters managed by the operator - # LB settings in cluster manifests take priority over these settings - enable_master_load_balancer: "true" - enable_replica_load_balancer: "false" - api_port: "8080" - ring_log_lines: "100" - cluster_history_entries: "1000" - pod_terminate_grace_period: 5m - pdb_name_format: "postgres-{cluster}-pdb" - node_eol_label: "lifecycle-status:pending-decommission" - node_readiness_label: "" - team_api_role_configuration: "log_statement:all" diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index 760451936..a7a8dde49 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -12,15 +12,9 @@ spec: serviceAccountName: operator containers: - name: postgres-operator - image: registry.opensource.zalan.do/acid/postgres-operator:0f392c2 + image: registry.opensource.zalan.do/acid/postgres-operator:4c8dfd7 imagePullPolicy: IfNotPresent env: - # uncomment to overwrite a similar setting from operator configmap - # if set to the empty string, watch the operator's own namespace - # if set to the "*", listen to all namespaces - # - name: WATCHED_NAMESPACE - # valueFrom: - # fieldRef: - # fieldPath: metadata.namespace + # provided additional ENV vars can overwrite individual config map entries - name: CONFIG_MAP_NAME value: "postgres-operator" From 288242d021283d189b92823eee47d0cc2eaea8ff Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Tue, 8 May 2018 15:45:01 +0200 Subject: [PATCH 2/4] Change rolling upgrades to rolling updates. Terminology change. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3fe70fa82..88100e9bc 100644 --- a/README.md +++ b/README.md @@ -24,14 +24,14 @@ Example: When a user creates a new custom object of type ``postgresql`` by submi Update example: After changing the Docker image inside the operator's configuration, the operator first goes to all StatefulSets it manages and updates them with the new Docker image; afterwards, all pods from each StatefulSet are killed one by one -and the replacements are spawned automatically by each StatefulSet with the new Docker image. This is called the rolling upgrade. +and the replacements are spawned automatically by each StatefulSet with the new Docker image. This is called the Rolling update. ## Scope The scope of the postgres operator is on provisioning, modifying configuration and cleaning up Postgres clusters that use Patroni, basically to make it easy and convenient to run Patroni based clusters on Kubernetes. The provisioning and modifying includes Kubernetes resources on one side but also e.g. database and role provisioning once the cluster is up and running. We try to leave as much work as possible to Kubernetes and to Patroni where it fits, especially the cluster bootstrap and high availability. -The operator is however involved in some overarching orchestration, like rolling upgrades to improve the user experience. +The operator is however involved in some overarching orchestration, like rolling updates to improve the user experience. Monitoring of clusters is not in scope, for this good tools already exist from ZMON to Prometheus and more Postgres specific options. From 40163677c754681c6fc9fb43e38849086ffd9106 Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Tue, 8 May 2018 15:50:10 +0200 Subject: [PATCH 3/4] Remove Kubernetes upgrade-related labels The node_eol_label is obsolete and not used. The node_readiness_label, if set, will prevent scheduling pods on the node without that label, by default minikube doesn't set any label on the node. --- manifests/configmap.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index a6ebcf4f3..dc39936fb 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -32,8 +32,6 @@ data: enable_replica_load_balancer: "false" pdb_name_format: "postgres-{cluster}-pdb" - node_eol_label: "lifecycle-status:pending-decommission" - node_readiness_label: "" api_port: "8080" ring_log_lines: "100" From 987b43456b080518c23a1242caa5c199ae7c1407 Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Tue, 15 May 2018 15:19:18 +0200 Subject: [PATCH 4/4] Deprecate old LB options, fix endpoint sync. (#287) * Depreate old LB options, fix endpoint sync. - deprecate useLoadBalancer, replicaLoadBalancer from the manifest and enable_load_balancer from the operator configuration. The old operator configuration options become no-op with this commit. For the old manifest options, `useLoadBalancer` and `replicaLoadBalancer` are still consulted, but only in the absense of the new ones (enableMasterLoadBalancer and enableReplicaLoadBalancer). - Make sure the endpoint being created during the sync receives proper addresses subset. This is more critical for the replicas, as for the masters Patroni will normally re-create the endpoint before the operator. - Avoid creating the replica endpoint, since it will be created automatically by the corresponding service. - Update the README and unit tests. Code review by @mgomezch and @zerg-junior --- README.md | 16 ++++++++-- pkg/cluster/cluster.go | 16 +++++++--- pkg/cluster/k8sres.go | 18 ++--------- pkg/cluster/k8sres_test.go | 17 ----------- pkg/cluster/resources.go | 40 ++++++++++++++++++++++-- pkg/controller/controller.go | 9 ++++++ pkg/controller/postgresql.go | 51 +++++++++++++++++++++++++++++++ pkg/controller/postgresql_test.go | 43 ++++++++++++++++++++++++++ pkg/util/config/config.go | 2 +- 9 files changed, 168 insertions(+), 44 deletions(-) create mode 100644 pkg/controller/postgresql_test.go diff --git a/README.md b/README.md index 88100e9bc..aec444cad 100644 --- a/README.md +++ b/README.md @@ -276,11 +276,21 @@ For instance, of a cluster manifest has 1 instance and the min_instances is set ### Load balancers -For any Postgresql/Spilo cluster an operator creates two separate k8s services: one for the master pod and one for replica pods. To expose these services to an outer network, one can attach load balancers to them by setting `enableMasterLoadBalancer` and/or `enableReplicaLoadBalancer` to `true` in the cluster manifest. In the case any of these variables is omitted from the manifest, the operator configmap's settings `enable_master_load_balancer` and `enable_replica_load_balancer` apply. Note that the operator settings affect all Postgresql services running in a namespace watched by the operator. +For any Postgresql/Spilo cluster, the operator creates two separate k8s services: one for the master pod and one for +replica pods. To expose these services to an outer network, one can attach load balancers to them by setting +`enableMasterLoadBalancer` and/or `enableReplicaLoadBalancer` to `true` in the cluster manifest. In the case any of +these variables are omitted from the manifest, the operator configmap's settings `enable_master_load_balancer` and +`enable_replica_load_balancer` apply. Note that the operator settings affect all Postgresql services running in a +namespace watched by the operator. -For backward compatibility with already configured clusters we maintain in a cluster manifest older parameter names, namely `useLoadBalancer` for enabling the master service's load balancer and `replicaLoadBalancer` for the replica service. If set, these params take precedence over the newer `enableMasterLoadBalancer` and `enableReplicaLoadBalancer`. Note that in older versions of the operator (before PR #258) `replicaLoadBalancer` was responsible for both creating the replica service and attaching an LB to it; now the service is always created (since k8s service typically is free in the cloud setting), and this param only attaches an LB (that typically costs money). +###### Deprecated parameters -For the same reason of compatibility, we maintain the `enable_load_balancer` setting in the operator config map that was previously used to attach a LB to the master service. Its value is examined after the deprecated `useLoadBalancer` setting from the Postgresql manifest but before the recommended `enableMasterLoadBalancer`. There is no equivalent option for the replica service since the service used to be always created with a load balancer. +Parameters `useLoadBalancer` and `replicaLoadBalancer` in the PostgreSQL manifest are deprecated. To retain +compatibility with the old manifests they take affect in the absense of new `enableMasterLoadBalancer` and +`enableReplicaLoadBalancer` parameters (that is, if either of the new ones is present - all deprecated parameters are +ignored). The operator configuration parameter `enable_load_balancer` is ignored in all cases. + +` # Setup development environment diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 2260f0e96..b01d205c6 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -170,6 +170,10 @@ func (c *Cluster) setStatus(status spec.PostgresStatus) { } } +func (c *Cluster) isNewCluster() bool { + return c.Status == spec.ClusterStatusCreating +} + // initUsers populates c.systemUsers and c.pgUsers maps. func (c *Cluster) initUsers() error { c.setProcessName("initializing users") @@ -255,11 +259,15 @@ func (c *Cluster) Create() error { if c.Endpoints[role] != nil { return fmt.Errorf("%s endpoint already exists in the cluster", role) } - ep, err = c.createEndpoint(role) - if err != nil { - return fmt.Errorf("could not create %s endpoint: %v", role, err) + if role == Master { + // replica endpoint will be created by the replica service. Master endpoint needs to be created by us, + // since the corresponding master service doesn't define any selectors. + ep, err = c.createEndpoint(role) + if err != nil { + return fmt.Errorf("could not create %s endpoint: %v", role, err) + } + c.logger.Infof("endpoint %q has been successfully created", util.NameFromMeta(ep.ObjectMeta)) } - c.logger.Infof("endpoint %q has been successfully created", util.NameFromMeta(ep.ObjectMeta)) if c.Services[role] != nil { return fmt.Errorf("service already exists in the cluster") diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 18350f526..ea2c6a9ba 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -683,12 +683,6 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *sp case Replica: - // deprecated option takes priority for backward compatibility - if spec.ReplicaLoadBalancer != nil { - c.logger.Debugf("The Postgres manifest for the cluster %v sets the deprecated `replicaLoadBalancer` param. Consider using the `enableReplicaLoadBalancer` instead.", c.Name) - return *spec.ReplicaLoadBalancer - } - // if the value is explicitly set in a Postgresql manifest, follow this setting if spec.EnableReplicaLoadBalancer != nil { return *spec.EnableReplicaLoadBalancer @@ -699,16 +693,8 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *sp case Master: - if spec.UseLoadBalancer != nil { - c.logger.Debugf("The Postgres manifest for the cluster %v sets the deprecated `useLoadBalancer` param. Consider using the `enableMasterLoadBalancer` instead.", c.Name) - return *spec.UseLoadBalancer - } - - // `enable_load_balancer`` governs LB for a master service - // there is no equivalent deprecated operator option for the replica LB - if c.OpConfig.EnableLoadBalancer != nil { - c.logger.Debugf("The operator configmap sets the deprecated `enable_load_balancer` param. Consider using the `enable_master_load_balancer` or `enable_replica_load_balancer` instead.") - return *c.OpConfig.EnableLoadBalancer + if spec.EnableMasterLoadBalancer != nil { + return *spec.EnableMasterLoadBalancer } return c.OpConfig.EnableMasterLoadBalancer diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index 43405a2ec..54890660c 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -65,23 +65,6 @@ func TestCreateLoadBalancerLogic(t *testing.T) { opConfig: config.Config{EnableReplicaLoadBalancer: false}, result: false, }, - { - subtest: "old format, load balancer is enabled for replica", - role: Replica, - spec: &spec.PostgresSpec{ReplicaLoadBalancer: True()}, - opConfig: config.Config{}, - result: true, - }, - { - subtest: "old format has priority", - role: Replica, - spec: &spec.PostgresSpec{ - ReplicaLoadBalancer: True(), - EnableReplicaLoadBalancer: False(), - }, - opConfig: config.Config{}, - result: true, - }, } for _, tt := range tests { cluster.OpConfig = tt.opConfig diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 7a602c73d..1fa72d54b 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -344,10 +344,16 @@ func (c *Cluster) deleteService(role PostgresRole) error { } func (c *Cluster) createEndpoint(role PostgresRole) (*v1.Endpoints, error) { + var ( + subsets []v1.EndpointSubset + ) c.setProcessName("creating endpoint") - subsets := make([]v1.EndpointSubset, 0) - if role == Master { - //TODO: set subsets to the master + if !c.isNewCluster() { + subsets = c.generateEndpointSubsets(role) + } else { + // Patroni will populate the master endpoint for the new cluster + // The replica endpoint will be filled-in by the service selector. + subsets = make([]v1.EndpointSubset, 0) } endpointsSpec := c.generateEndpoint(role, subsets) @@ -361,6 +367,34 @@ func (c *Cluster) createEndpoint(role PostgresRole) (*v1.Endpoints, error) { return endpoints, nil } +func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset { + result := make([]v1.EndpointSubset, 0) + pods, err := c.getRolePods(role) + if err != nil { + if role == Master { + c.logger.Warningf("could not obtain the address for %s pod: %v", role, err) + } else { + c.logger.Warningf("could not obtain the addresses for %s pods: %v", role, err) + } + return result + } + + endPointAddresses := make([]v1.EndpointAddress, 0) + for _, pod := range pods { + endPointAddresses = append(endPointAddresses, v1.EndpointAddress{IP: pod.Status.PodIP}) + } + if len(endPointAddresses) > 0 { + result = append(result, v1.EndpointSubset{ + Addresses: endPointAddresses, + Ports: []v1.EndpointPort{{"postgresql", 5432, "TCP"}}, + }) + } else if role == Master { + c.logger.Warningf("master is not running, generated master endpoint does not contain any addresses") + } + + return result +} + func (c *Cluster) createPodDisruptionBudget() (*policybeta1.PodDisruptionBudget, error) { podDisruptionBudgetSpec := c.generatePodDisruptionBudget() podDisruptionBudget, err := c.KubeClient. diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 7b309a547..84a07811b 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -111,6 +111,7 @@ func (c *Controller) initOperatorConfig() { } c.opConfig = config.NewFromMap(configMapData) + c.warnOnDeprecatedOperatorParameters() scalyrAPIKey := os.Getenv("SCALYR_API_KEY") if scalyrAPIKey != "" { @@ -119,6 +120,14 @@ func (c *Controller) initOperatorConfig() { } +// warningOnDeprecatedParameters emits warnings upon finding deprecated parmaters +func (c *Controller) warnOnDeprecatedOperatorParameters() { + if c.opConfig.EnableLoadBalancer != nil { + c.logger.Warningf("Operator configuration parameter 'enable_load_balancer' is deprecated and takes no effect. " + + "Consider using the 'enable_master_load_balancer' or 'enable_replica_load_balancer' instead.") + } +} + func (c *Controller) initPodServiceAccount() { if c.opConfig.PodServiceAccountDefinition == "" { diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index 1203bff1f..0968eb758 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -166,6 +166,17 @@ func (c *Controller) processEvent(event spec.ClusterEvent) { defer c.curWorkerCluster.Store(event.WorkerID, nil) + if event.EventType == spec.EventAdd || event.EventType == spec.EventUpdate || event.EventType == spec.EventSync { + // handle deprecated parameters by possibly assigning their values to the new ones. + if event.OldSpec != nil { + c.mergeDeprecatedPostgreSQLSpecParameters(&event.OldSpec.Spec) + } + if event.NewSpec != nil { + c.mergeDeprecatedPostgreSQLSpecParameters(&event.NewSpec.Spec) + } + c.warnOnDeprecatedPostgreSQLSpecParameters(&event.NewSpec.Spec) + } + switch event.EventType { case spec.EventAdd: if clusterFound { @@ -287,6 +298,46 @@ func (c *Controller) processClusterEventsQueue(idx int, stopCh <-chan struct{}, } } +func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *spec.PostgresSpec) { + + deprecate := func(deprecated, replacement string) { + c.logger.Warningf("Parameter %q is deprecated. Consider setting %q instead", deprecated, replacement) + } + + noeffect := func(param string, explanation string) { + c.logger.Warningf("Parameter %q takes no effect. %s", param, explanation) + } + + if spec.UseLoadBalancer != nil { + deprecate("useLoadBalancer", "enableMasterLoadBalancer") + } + if spec.ReplicaLoadBalancer != nil { + deprecate("replicaLoadBalancer", "enableReplicaLoadBalancer") + } + + if len(spec.MaintenanceWindows) > 0 { + noeffect("maintenanceWindows", "Not implemented.") + } +} + +func (c *Controller) mergeDeprecatedPostgreSQLSpecParameters(spec *spec.PostgresSpec) *spec.PostgresSpec { + if spec.UseLoadBalancer != nil || spec.ReplicaLoadBalancer != nil { + if spec.EnableReplicaLoadBalancer != nil || spec.EnableMasterLoadBalancer != nil { + c.logger.Warnf("Both old and new load balancer options are present, ignoring old ones") + } else { + if spec.UseLoadBalancer != nil { + spec.EnableMasterLoadBalancer = new(bool) + *spec.EnableMasterLoadBalancer = *spec.UseLoadBalancer + } + if spec.ReplicaLoadBalancer != nil { + spec.EnableReplicaLoadBalancer = new(bool) + *spec.EnableReplicaLoadBalancer = *spec.ReplicaLoadBalancer + } + } + } + return spec +} + func (c *Controller) queueClusterEvent(old, new *spec.Postgresql, eventType spec.EventType) { var ( uid types.UID diff --git a/pkg/controller/postgresql_test.go b/pkg/controller/postgresql_test.go new file mode 100644 index 000000000..967166ecb --- /dev/null +++ b/pkg/controller/postgresql_test.go @@ -0,0 +1,43 @@ +package controller + +import ( + "github.com/zalando-incubator/postgres-operator/pkg/spec" + "reflect" + "testing" +) + +var ( + True bool = true + False bool = false +) + +func TestMergeDeprecatedPostgreSQLSpecParameters(t *testing.T) { + c := NewController(&spec.ControllerConfig{}) + + tests := []struct { + name string + in *spec.PostgresSpec + out *spec.PostgresSpec + error string + }{ + { + "Check that old parameters propagate values to the new ones", + &spec.PostgresSpec{UseLoadBalancer: &True, ReplicaLoadBalancer: &True}, + &spec.PostgresSpec{UseLoadBalancer: &True, ReplicaLoadBalancer: &True, + EnableMasterLoadBalancer: &True, EnableReplicaLoadBalancer: &True}, + "New parameters should be set from the values of old ones", + }, + { + "Check that new parameters are not set when both old and new ones are present", + &spec.PostgresSpec{UseLoadBalancer: &True, EnableReplicaLoadBalancer: &True}, + &spec.PostgresSpec{UseLoadBalancer: &True, EnableReplicaLoadBalancer: &True}, + "New parameters should remain unchanged when both old and new are present", + }, + } + for _, tt := range tests { + result := c.mergeDeprecatedPostgreSQLSpecParameters(tt.in) + if !reflect.DeepEqual(result, tt.out) { + t.Errorf("%s: %v", tt.name, tt.error) + } + } +} diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index aed9accd3..3ffdfb932 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -88,7 +88,7 @@ type Config struct { EnableMasterLoadBalancer bool `name:"enable_master_load_balancer" default:"true"` EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"` // deprecated and kept for backward compatibility - EnableLoadBalancer *bool `name:"enable_load_balancer" default:"true"` + EnableLoadBalancer *bool `name:"enable_load_balancer"` MasterDNSNameFormat stringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"` ReplicaDNSNameFormat stringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"` PDBNameFormat stringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"`