Merge branch 'master' into fix-reporting-masters-under-migration

This commit is contained in:
Sergey Dudoladov 2018-12-21 14:35:04 +01:00
commit dc381c29e0
34 changed files with 345 additions and 96 deletions

5
.golangci.yml Normal file
View File

@ -0,0 +1,5 @@
# https://github.com/golangci/golangci/wiki/Configuration
service:
prepare:
- make deps

View File

@ -1,11 +1,4 @@
# for github.com # for github.com
approvals:
groups:
zalando:
minimum: 2
from:
orgs:
- "zalando"
X-Zalando-Team: "acid" X-Zalando-Team: "acid"
# type should be one of [code, doc, config, tools, secrets] # type should be one of [code, doc, config, tools, secrets]
# code will be the default value, if X-Zalando-Type is not found in .zappr.yml # code will be the default value, if X-Zalando-Type is not found in .zappr.yml

View File

@ -1,4 +1,4 @@
.PHONY: clean local linux macos docker push scm-source.json .PHONY: clean local test linux macos docker push scm-source.json
BINARY ?= postgres-operator BINARY ?= postgres-operator
BUILD_FLAGS ?= -v BUILD_FLAGS ?= -v
@ -30,6 +30,11 @@ else
DOCKERFILE = Dockerfile DOCKERFILE = Dockerfile
endif endif
ifdef CDP_PULL_REQUEST_NUMBER
CDP_TAG := -${CDP_BUILD_VERSION}
endif
PATH := $(GOPATH)/bin:$(PATH) PATH := $(GOPATH)/bin:$(PATH)
SHELL := env PATH=$(PATH) $(SHELL) SHELL := env PATH=$(PATH) $(SHELL)
@ -52,13 +57,18 @@ docker-context: scm-source.json linux
cp build/linux/${BINARY} scm-source.json docker/build/ cp build/linux/${BINARY} scm-source.json docker/build/
docker: ${DOCKERDIR}/${DOCKERFILE} docker-context docker: ${DOCKERDIR}/${DOCKERFILE} docker-context
cd "${DOCKERDIR}" && docker build --rm -t "$(IMAGE):$(TAG)$(DEBUG_POSTFIX)" -f "${DOCKERFILE}" . echo `(env)`
echo "Tag ${TAG}"
echo "Version ${VERSION}"
echo "CDP tag ${CDP_TAG}"
echo "git describe $(shell git describe --tags --always --dirty)"
cd "${DOCKERDIR}" && docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_POSTFIX)" -f "${DOCKERFILE}" .
indocker-race: indocker-race:
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.8.1 bash -c "make linux" docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.8.1 bash -c "make linux"
push: push:
docker push "$(IMAGE):$(TAG)" docker push "$(IMAGE):$(TAG)$(CDP_TAG)"
scm-source.json: .git scm-source.json: .git
echo '{\n "url": "git:$(GITURL)",\n "revision": "$(GITHEAD)",\n "author": "$(USER)",\n "status": "$(GITSTATUS)"\n}' > scm-source.json echo '{\n "url": "git:$(GITURL)",\n "revision": "$(GITHEAD)",\n "author": "$(USER)",\n "status": "$(GITSTATUS)"\n}' > scm-source.json
@ -76,3 +86,6 @@ vet:
deps: deps:
@glide install --strip-vendor @glide install --strip-vendor
test:
@go test ./...

View File

@ -4,6 +4,7 @@
[![Coverage Status](https://coveralls.io/repos/github/zalando-incubator/postgres-operator/badge.svg)](https://coveralls.io/github/zalando-incubator/postgres-operator) [![Coverage Status](https://coveralls.io/repos/github/zalando-incubator/postgres-operator/badge.svg)](https://coveralls.io/github/zalando-incubator/postgres-operator)
[![Go Report Card](https://goreportcard.com/badge/github.com/zalando-incubator/postgres-operator)](https://goreportcard.com/report/github.com/zalando-incubator/postgres-operator) [![Go Report Card](https://goreportcard.com/badge/github.com/zalando-incubator/postgres-operator)](https://goreportcard.com/report/github.com/zalando-incubator/postgres-operator)
[![GoDoc](https://godoc.org/github.com/zalando-incubator/postgres-operator?status.svg)](https://godoc.org/github.com/zalando-incubator/postgres-operator) [![GoDoc](https://godoc.org/github.com/zalando-incubator/postgres-operator?status.svg)](https://godoc.org/github.com/zalando-incubator/postgres-operator)
[![golangci](https://golangci.com/badges/github.com/zalando-incubator/postgres-operator.svg)](https://golangci.com/r/github.com/zalando-incubator/postgres-operator)
## Introduction ## Introduction
@ -67,12 +68,14 @@ kubectl create -f manifests/configmap.yaml # configuration
kubectl create -f manifests/operator-service-account-rbac.yaml # identity and permissions kubectl create -f manifests/operator-service-account-rbac.yaml # identity and permissions
kubectl create -f manifests/postgres-operator.yaml # deployment kubectl create -f manifests/postgres-operator.yaml # deployment
# create a Postgres cluster # create a Postgres cluster in a non-default namespace
kubectl create namespace test
kubectl config set-context minikube --namespace=test
kubectl create -f manifests/minimal-postgres-manifest.yaml kubectl create -f manifests/minimal-postgres-manifest.yaml
# connect to the Postgres master via psql # connect to the Postgres master via psql
# operator creates the relevant k8s secret # operator creates the relevant k8s secret
export HOST_PORT=$(minikube service acid-minimal-cluster --url | sed 's,.*/,,') export HOST_PORT=$(minikube service --namespace test acid-minimal-cluster --url | sed 's,.*/,,')
export PGHOST=$(echo $HOST_PORT | cut -d: -f 1) export PGHOST=$(echo $HOST_PORT | cut -d: -f 1)
export PGPORT=$(echo $HOST_PORT | cut -d: -f 2) export PGPORT=$(echo $HOST_PORT | cut -d: -f 2)
export PGPASSWORD=$(kubectl get secret postgres.acid-minimal-cluster.credentials -o 'jsonpath={.data.password}' | base64 -d) export PGPASSWORD=$(kubectl get secret postgres.acid-minimal-cluster.credentials -o 'jsonpath={.data.password}' | base64 -d)
@ -88,13 +91,14 @@ cd postgres-operator
./run_operator_locally.sh ./run_operator_locally.sh
``` ```
Note we provide the `/manifests` directory as an example only; you should consider adjusting the manifests to your particular setting.
## Running and testing the operator ## Running and testing the operator
The best way to test the operator is to run it in [minikube](https://kubernetes.io/docs/getting-started-guides/minikube/). The best way to test the operator is to run it locally in [minikube](https://kubernetes.io/docs/getting-started-guides/minikube/). See developer docs(`docs/developer.yaml`) for details.
Minikube is a tool to run Kubernetes cluster locally.
### Configuration Options ### Configuration Options
The operator can be configured with the provided ConfigMap (`manifests/configmap.yaml`). The operator can be configured with the provided ConfigMap(`manifests/configmap.yaml`) or the operator's own CRD.

View File

@ -22,7 +22,7 @@ pipeline:
go version go version
- desc: 'Install Docker' - desc: 'Install Docker'
cmd: | cmd: |
curl -sSL https://get.docker.com/ | sh curl -fLOsS https://delivery.cloud.zalando.com/utils/ensure-docker && sh ensure-docker && rm ensure-docker
- desc: 'Symlink sources into the GOPATH' - desc: 'Symlink sources into the GOPATH'
cmd: | cmd: |
mkdir -p $OPERATOR_TOP_DIR mkdir -p $OPERATOR_TOP_DIR

View File

@ -41,12 +41,12 @@ manifests:
```bash ```bash
$ kubectl create namespace test $ kubectl create namespace test
$ kubectl config set-context --namespace=test $ kubectl config set-context $(kubectl config current-context) --namespace=test
``` ```
All subsequent `kubectl` commands will work with the `test` namespace. The All subsequent `kubectl` commands will work with the `test` namespace. The
operator will run in this namespace and look up needed resources - such as its operator will run in this namespace and look up needed resources - such as its
config map - there. config map - there. Please note that the namespace for service accounts and cluster role bindings in [operator RBAC rules](manifests/operator-service-account-rbac.yaml) needs to be adjusted to the non-default value.
## Specify the namespace to watch ## Specify the namespace to watch
@ -198,7 +198,9 @@ services to an outer network, one can attach load balancers to them by setting
cluster manifest. In the case any of these variables are omitted from the cluster manifest. In the case any of these variables are omitted from the
manifest, the operator configmap's settings `enable_master_load_balancer` and manifest, the operator configmap's settings `enable_master_load_balancer` and
`enable_replica_load_balancer` apply. Note that the operator settings affect `enable_replica_load_balancer` apply. Note that the operator settings affect
all Postgresql services running in a namespace watched by the operator. all Postgresql services running in all namespaces watched by the operator.
To limit the range of IP adresses that can reach a load balancer, specify desired ranges in the `allowedSourceRanges` field (applies to both master and replica LBs). To prevent exposing LBs to the entire Internet, this field is set at cluster creation time to `127.0.0.1/32` unless overwritten explicitly. If you want to revoke all IP ranges from an existing cluster, please set the `allowedSourceRanges` field to `127.0.0.1/32` or to the empty sequence `[]`. Setting the field to `null` or omitting entirely may lead to k8s removing this field from the manifest due to [the k8s handling of null fields](https://kubernetes.io/docs/concepts/overview/object-management-kubectl/declarative-config/#how-apply-calculates-differences-and-merges-changes). Then the resultant manifest will not have the necessary change, and the operator will respectively do noting with the existing source ranges.
## Running periodic 'autorepair' scans of Kubernetes objects ## Running periodic 'autorepair' scans of Kubernetes objects
@ -220,3 +222,7 @@ The operator is capable of maintaining roles of multiple kinds within a Postgres
3. **Per-cluster robot users** are also roles for processes originating from external systems but defined for an individual Postgres cluster in its manifest. A typical example is a role for connections from an application that uses the database. 3. **Per-cluster robot users** are also roles for processes originating from external systems but defined for an individual Postgres cluster in its manifest. A typical example is a role for connections from an application that uses the database.
4. **Human users** originate from the Teams API that returns list of the team members given a team id. Operator differentiates between (a) product teams that own a particular Postgres cluster and are granted admin rights to maintain it, and (b) Postgres superuser teams that get the superuser access to all PG databases running in a k8s cluster for the purposes of maintaining and troubleshooting. 4. **Human users** originate from the Teams API that returns list of the team members given a team id. Operator differentiates between (a) product teams that own a particular Postgres cluster and are granted admin rights to maintain it, and (b) Postgres superuser teams that get the superuser access to all PG databases running in a k8s cluster for the purposes of maintaining and troubleshooting.
## Understanding rolling update of Spilo pods
The operator logs reasons for a rolling update with the `info` level and a diff between the old and new StatefulSet specs with the `debug` level. To benefit from numerous escape characters in the latter log entry, view it in CLI with `echo -e`. Note that the resultant message will contain some noise because the `PodTemplate` used by the operator is yet to be updated with the default values used internally in Kubernetes.

View File

@ -275,3 +275,12 @@ Type 'help' for list of commands.
(dlv) c (dlv) c
PASS PASS
``` ```
To test the multinamespace setup, you can use
```
./run_operator_locally.sh --rebuild-operator
```
It will automatically create an `acid-minimal-cluster` in the namespace `test`. Then you can for example check the Patroni logs:
```
kubectl logs acid-minimal-cluster-0
```

View File

@ -51,6 +51,8 @@ Please, report any issues discovered to https://github.com/zalando-incubator/pos
## Talks ## Talks
1. "Blue elephant on-demand: Postgres + Kubernetes" talk by Oleksii Kliukin and Jan Mussler, FOSDEM 2018: [video](https://fosdem.org/2018/schedule/event/blue_elephant_on_demand_postgres_kubernetes/) | [slides (pdf)](https://www.postgresql.eu/events/fosdem2018/sessions/session/1735/slides/59/FOSDEM%202018_%20Blue_Elephant_On_Demand.pdf) 1. "PostgreSQL High Availability on Kubernetes with Patroni" talk by Oleksii Kliukin, Atmosphere 2018: [video](https://www.youtube.com/watch?v=cFlwQOPPkeg) | [slides](https://speakerdeck.com/alexeyklyukin/postgresql-high-availability-on-kubernetes-with-patroni)
2. "Kube-Native Postgres" talk by Josh Berkus, KubeCon 2017: [video](https://www.youtube.com/watch?v=Zn1vd7sQ_bc) 2. "Blue elephant on-demand: Postgres + Kubernetes" talk by Oleksii Kliukin and Jan Mussler, FOSDEM 2018: [video](https://fosdem.org/2018/schedule/event/blue_elephant_on_demand_postgres_kubernetes/) | [slides (pdf)](https://www.postgresql.eu/events/fosdem2018/sessions/session/1735/slides/59/FOSDEM%202018_%20Blue_Elephant_On_Demand.pdf)
3. "Kube-Native Postgres" talk by Josh Berkus, KubeCon 2017: [video](https://www.youtube.com/watch?v=Zn1vd7sQ_bc)

View File

@ -151,6 +151,9 @@ explanation of `ttl` and `loop_wait` parameters.
patroni `maximum_lag_on_failover` parameter value, optional. The default is patroni `maximum_lag_on_failover` parameter value, optional. The default is
set by the Spilo docker image. Optional. set by the Spilo docker image. Optional.
* **slots**
permanent replication slots that Patroni preserves after failover by re-creating them on the new primary immediately after doing a promote. Slots could be reconfigured with the help of `patronictl edit-config`. It is the responsibility of a user to avoid clashes in names between replication slots automatically created by Patroni for cluster members and permanent replication slots. Optional.
## Postgres container resources ## Postgres container resources
Those parameters define [CPU and memory requests and Those parameters define [CPU and memory requests and

View File

@ -221,6 +221,9 @@ CRD-based configuration.
memory limits for the postgres containers, unless overridden by cluster-specific memory limits for the postgres containers, unless overridden by cluster-specific
settings. The default is `1Gi`. settings. The default is `1Gi`.
* **set_memory_request_to_limit**
Set `memory_request` to `memory_limit` for all Postgres clusters (the default value is also increased). This prevents certain cases of memory overcommitment at the cost of overprovisioning memory and potential scheduling problems for containers with high memory limits due to the lack of memory on Kubernetes cluster nodes. This affects all containers (Postgres, Scalyr sidecar, and other sidecars). The default is `false`.
## Operator timeouts ## Operator timeouts
This set of parameters define various timeouts related to some operator This set of parameters define various timeouts related to some operator
@ -304,8 +307,7 @@ either. In the CRD-based configuration those options are grouped under the
* **log_s3_bucket** * **log_s3_bucket**
S3 bucket to use for shipping postgres daily logs. Works only with S3 on AWS. S3 bucket to use for shipping postgres daily logs. Works only with S3 on AWS.
The bucket has to be present and accessible by Postgres pods. At the moment The bucket has to be present and accessible by Postgres pods. The default is empty.
Spilo does not yet support this. The default is empty.
* **kube_iam_role** * **kube_iam_role**
AWS IAM role to supply in the `iam.amazonaws.com/role` annotation of Postgres AWS IAM role to supply in the `iam.amazonaws.com/role` annotation of Postgres
@ -380,7 +382,7 @@ key.
infrastructure role. The default is `admin`. infrastructure role. The default is `admin`.
* **postgres_superuser_teams** * **postgres_superuser_teams**
List of teams which members need the superuser role in each PG database cluster to administer Postgres and maintain infrastructure built around it. The default is `postgres_superuser`. List of teams which members need the superuser role in each PG database cluster to administer Postgres and maintain infrastructure built around it. The default is empty.
## Logging and REST API ## Logging and REST API

View File

@ -20,7 +20,7 @@ spec:
- createdb - createdb
# role for application foo # role for application foo
foo_user: foo_user: # or 'foo_user: []'
#databases: name->owner #databases: name->owner
databases: databases:
@ -74,8 +74,8 @@ for an example of `zalando` role, defined with `superuser` and `createdb`
flags. flags.
Manifest roles are defined as a dictionary, with a role name as a key and a Manifest roles are defined as a dictionary, with a role name as a key and a
list of role options as a value. For a role without any options supply an empty list of role options as a value. For a role without any options it is best to supply the empty
list. list `[]`. It is also possible to leave this field empty as in our example manifests, but in certain cases such empty field may removed by Kubernetes [due to the `null` value it gets](https://kubernetes.io/docs/concepts/overview/object-management-kubectl/declarative-config/#how-apply-calculates-differences-and-merges-changes) (`foobar_user:` is equivalent to `foobar_user: null`).
The operator accepts the following options: `superuser`, `inherit`, `login`, The operator accepts the following options: `superuser`, `inherit`, `login`,
`nologin`, `createrole`, `createdb`, `replication`, `bypassrls`. `nologin`, `createrole`, `createdb`, `replication`, `bypassrls`.
@ -238,9 +238,8 @@ metadata:
uid: efd12e58-5786-11e8-b5a7-06148230260c uid: efd12e58-5786-11e8-b5a7-06148230260c
``` ```
Note that timezone required for `timestamp` (offset relative to UTC, see RFC Note that timezone is required for `timestamp`. Otherwise, offset is relative
3339 section 5.6) to UTC, see [RFC 3339 section 5.6) 3339 section 5.6](https://www.ietf.org/rfc/rfc3339.txt).
## Sidecar Support ## Sidecar Support

View File

@ -6,7 +6,7 @@ metadata:
spec: spec:
teamId: "ACID" teamId: "ACID"
volume: volume:
size: 5Gi size: 1Gi
numberOfInstances: 2 numberOfInstances: 2
users: #Application/Robot users users: #Application/Robot users
zalando: zalando:
@ -31,7 +31,7 @@ spec:
memory: 100Mi memory: 100Mi
limits: limits:
cpu: 300m cpu: 300m
memory: 3000Mi memory: 300Mi
patroni: patroni:
initdb: initdb:
encoding: "UTF8" encoding: "UTF8"
@ -40,6 +40,13 @@ spec:
pg_hba: pg_hba:
- hostssl all all 0.0.0.0/0 md5 - hostssl all all 0.0.0.0/0 md5
- host all all 0.0.0.0/0 md5 - host all all 0.0.0.0/0 md5
slots:
permanent_physical_1:
type: physical
permanent_logical_1:
type: logical
database: foo
plugin: pgoutput
ttl: 30 ttl: 30
loop_wait: &loop_wait 10 loop_wait: &loop_wait 10
retry_timeout: 10 retry_timeout: 10

View File

@ -3,19 +3,20 @@ kind: ConfigMap
metadata: metadata:
name: postgres-operator name: postgres-operator
data: data:
# if set to the "*", listen to all namespaces watched_namespace: "*" # listen to all namespaces
# watched_namespace: development
cluster_labels: application:spilo cluster_labels: application:spilo
cluster_name_label: version cluster_name_label: version
pod_role_label: spilo-role pod_role_label: spilo-role
debug_logging: "true" debug_logging: "true"
workers: "4" workers: "4"
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-10:1.4-p8 docker_image: registry.opensource.zalan.do/acid/spilo-cdp-10:1.5-p35
pod_service_account_name: "zalando-postgres-operator" pod_service_account_name: "zalando-postgres-operator"
secret_name_template: '{username}.{cluster}.credentials' secret_name_template: '{username}.{cluster}.credentials'
super_username: postgres super_username: postgres
enable_teams_api: "false" enable_teams_api: "false"
# set_memory_request_to_limit: "true"
# postgres_superuser_teams: "postgres_superusers"
# enable_team_superuser: "false" # enable_team_superuser: "false"
# team_admin_role: "admin" # team_admin_role: "admin"
# teams_api_url: http://fake-teams-api.default.svc.cluster.local # teams_api_url: http://fake-teams-api.default.svc.cluster.local

View File

@ -2,6 +2,7 @@ apiVersion: "acid.zalan.do/v1"
kind: postgresql kind: postgresql
metadata: metadata:
name: acid-minimal-cluster name: acid-minimal-cluster
namespace: test # assumes namespace exists beforehand
spec: spec:
teamId: "ACID" teamId: "ACID"
volume: volume:

View File

@ -14,6 +14,7 @@ rules:
- acid.zalan.do - acid.zalan.do
resources: resources:
- postgresqls - postgresqls
- operatorconfigurations
verbs: verbs:
- "*" - "*"
- apiGroups: - apiGroups:

View File

@ -12,9 +12,13 @@ spec:
serviceAccountName: zalando-postgres-operator serviceAccountName: zalando-postgres-operator
containers: containers:
- name: postgres-operator - name: postgres-operator
image: registry.opensource.zalan.do/acid/postgres-operator:v1.0.0 image: registry.opensource.zalan.do/acid/smoke-tested-postgres-operator:v1.0.0-21-ge39915c
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
env: env:
# provided additional ENV vars can overwrite individual config map entries # provided additional ENV vars can overwrite individual config map entries
- name: CONFIG_MAP_NAME - name: CONFIG_MAP_NAME
value: "postgres-operator" value: "postgres-operator"
# In order to use the CRD OperatorConfiguration instead, uncomment these lines and comment out the two lines above
# - name: POSTGRES_OPERATOR_CONFIGURATION_OBJECT
# value: postgresql-operator-default-configuration

View File

@ -4,7 +4,7 @@ metadata:
name: postgresql-operator-default-configuration name: postgresql-operator-default-configuration
configuration: configuration:
etcd_host: "" etcd_host: ""
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-10:1.4-p8 docker_image: registry.opensource.zalan.do/acid/spilo-cdp-10:1.4-p29
workers: 4 workers: 4
min_instances: -1 min_instances: -1
max_instances: -1 max_instances: -1
@ -68,6 +68,7 @@ configuration:
protected_role_names: protected_role_names:
- admin - admin
# teams_api_url: "" # teams_api_url: ""
# postgres_superuser_teams: "postgres_superusers"
logging_rest_api: logging_rest_api:
api_port: 8008 api_port: 8008
ring_log_lines: 100 ring_log_lines: 100

View File

@ -131,6 +131,7 @@ type OperatorConfigurationData struct {
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"` PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"` Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`
PostgresPodResources PostgresPodResourcesDefaults `json:"postgres_pod_resources"` PostgresPodResources PostgresPodResourcesDefaults `json:"postgres_pod_resources"`
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
Timeouts OperatorTimeouts `json:"timeouts"` Timeouts OperatorTimeouts `json:"timeouts"`
LoadBalancer LoadBalancerConfiguration `json:"load_balancer"` LoadBalancer LoadBalancerConfiguration `json:"load_balancer"`
AWSGCP AWSGCPConfiguration `json:"aws_or_gcp"` AWSGCP AWSGCPConfiguration `json:"aws_or_gcp"`

View File

@ -90,18 +90,19 @@ type ResourceDescription struct {
// Resources describes requests and limits for the cluster resouces. // Resources describes requests and limits for the cluster resouces.
type Resources struct { type Resources struct {
ResourceRequest ResourceDescription `json:"requests,omitempty"` ResourceRequests ResourceDescription `json:"requests,omitempty"`
ResourceLimits ResourceDescription `json:"limits,omitempty"` ResourceLimits ResourceDescription `json:"limits,omitempty"`
} }
// Patroni contains Patroni-specific configuration // Patroni contains Patroni-specific configuration
type Patroni struct { type Patroni struct {
InitDB map[string]string `json:"initdb"` InitDB map[string]string `json:"initdb"`
PgHba []string `json:"pg_hba"` PgHba []string `json:"pg_hba"`
TTL uint32 `json:"ttl"` TTL uint32 `json:"ttl"`
LoopWait uint32 `json:"loop_wait"` LoopWait uint32 `json:"loop_wait"`
RetryTimeout uint32 `json:"retry_timeout"` RetryTimeout uint32 `json:"retry_timeout"`
MaximumLagOnFailover float32 `json:"maximum_lag_on_failover"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213 MaximumLagOnFailover float32 `json:"maximum_lag_on_failover"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213
Slots map[string]map[string]string `json:"slots"`
} }
// CloneDescription describes which cluster the new should clone and up to which point in time // CloneDescription describes which cluster the new should clone and up to which point in time

View File

@ -132,7 +132,7 @@ var unmarshalCluster = []struct {
// This error message can vary between Go versions, so compute it for the current version. // This error message can vary between Go versions, so compute it for the current version.
Error: json.Unmarshal([]byte(`{"teamId": 0}`), &PostgresSpec{}).Error(), Error: json.Unmarshal([]byte(`{"teamId": 0}`), &PostgresSpec{}).Error(),
}, },
[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), nil}, []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), nil},
{[]byte(`{ {[]byte(`{
"kind": "Postgresql", "kind": "Postgresql",
"apiVersion": "acid.zalan.do/v1", "apiVersion": "acid.zalan.do/v1",
@ -189,7 +189,14 @@ var unmarshalCluster = []struct {
"ttl": 30, "ttl": 30,
"loop_wait": 10, "loop_wait": 10,
"retry_timeout": 10, "retry_timeout": 10,
"maximum_lag_on_failover": 33554432 "maximum_lag_on_failover": 33554432,
"slots" : {
"permanent_logical_1" : {
"type" : "logical",
"database" : "foo",
"plugin" : "pgoutput"
}
}
}, },
"maintenanceWindows": [ "maintenanceWindows": [
"Mon:01:00-06:00", "Mon:01:00-06:00",
@ -230,10 +237,11 @@ var unmarshalCluster = []struct {
LoopWait: 10, LoopWait: 10,
RetryTimeout: 10, RetryTimeout: 10,
MaximumLagOnFailover: 33554432, MaximumLagOnFailover: 33554432,
Slots: map[string]map[string]string{"permanent_logical_1": {"type": "logical", "database": "foo", "plugin": "pgoutput"}},
}, },
Resources: Resources{ Resources: Resources{
ResourceRequest: ResourceDescription{CPU: "10m", Memory: "50Mi"}, ResourceRequests: ResourceDescription{CPU: "10m", Memory: "50Mi"},
ResourceLimits: ResourceDescription{CPU: "300m", Memory: "3000Mi"}, ResourceLimits: ResourceDescription{CPU: "300m", Memory: "3000Mi"},
}, },
TeamID: "ACID", TeamID: "ACID",
@ -265,7 +273,7 @@ var unmarshalCluster = []struct {
}, },
Error: "", Error: "",
}, },
[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"volume":{"size":"5Gi","storageClass":"SSD"},"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}}}`), nil}, []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"volume":{"size":"5Gi","storageClass":"SSD"},"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}}}`), nil},
{ {
[]byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "teapot-testcluster1"}, "spec": {"teamId": "acid"}}`), []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "teapot-testcluster1"}, "spec": {"teamId": "acid"}}`),
Postgresql{ Postgresql{
@ -280,7 +288,7 @@ var unmarshalCluster = []struct {
Status: ClusterStatusInvalid, Status: ClusterStatusInvalid,
Error: errors.New("name must match {TEAM}-{NAME} format").Error(), Error: errors.New("name must match {TEAM}-{NAME} format").Error(),
}, },
[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), nil}, []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), nil},
{ {
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "clone": {"cluster": "team-batman"}}}`), in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "clone": {"cluster": "team-batman"}}}`),
out: Postgresql{ out: Postgresql{
@ -300,12 +308,12 @@ var unmarshalCluster = []struct {
}, },
Error: "", Error: "",
}, },
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}}}`), err: nil}, marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}}}`), err: nil},
{[]byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1"`), {[]byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1"`),
Postgresql{}, Postgresql{},
[]byte{}, []byte{},
errors.New("unexpected end of JSON input")}, errors.New("unexpected end of JSON input")},
{[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), {[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`),
Postgresql{}, Postgresql{},
[]byte{}, []byte{},
errors.New("invalid character 'q' looking for beginning of value")}} errors.New("invalid character 'q' looking for beginning of value")}}

View File

@ -320,6 +320,23 @@ func (in *Patroni) DeepCopyInto(out *Patroni) {
*out = make([]string, len(*in)) *out = make([]string, len(*in))
copy(*out, *in) copy(*out, *in)
} }
if in.Slots != nil {
in, out := &in.Slots, &out.Slots
*out = make(map[string]map[string]string, len(*in))
for key, val := range *in {
var outVal map[string]string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
(*out)[key] = outVal
}
}
return return
} }
@ -556,7 +573,7 @@ func (in *ResourceDescription) DeepCopy() *ResourceDescription {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Resources) DeepCopyInto(out *Resources) { func (in *Resources) DeepCopyInto(out *Resources) {
*out = *in *out = *in
out.ResourceRequest = in.ResourceRequest out.ResourceRequests = in.ResourceRequests
out.ResourceLimits = in.ResourceLimits out.ResourceLimits = in.ResourceLimits
return return
} }

View File

@ -48,11 +48,22 @@ type Server struct {
controller controllerInformer controller controllerInformer
} }
const (
teamRe = `(?P<team>[a-zA-Z][a-zA-Z0-9\-_]*)`
namespaceRe = `(?P<namespace>[a-z0-9]([-a-z0-9\-_]*[a-z0-9])?)`
clusterRe = `(?P<cluster>[a-zA-Z][a-zA-Z0-9\-_]*)`
)
var ( var (
clusterStatusURL = regexp.MustCompile(`^/clusters/(?P<team>[a-zA-Z][a-zA-Z0-9]*)/(?P<namespace>[a-z0-9]([-a-z0-9]*[a-z0-9])?)/(?P<cluster>[a-zA-Z][a-zA-Z0-9-]*)/?$`) clusterStatusRe = fmt.Sprintf(`^/clusters/%s/%s/%s/?$`, teamRe, namespaceRe, clusterRe)
clusterLogsURL = regexp.MustCompile(`^/clusters/(?P<team>[a-zA-Z][a-zA-Z0-9]*)/(?P<namespace>[a-z0-9]([-a-z0-9]*[a-z0-9])?)/(?P<cluster>[a-zA-Z][a-zA-Z0-9-]*)/logs/?$`) clusterLogsRe = fmt.Sprintf(`^/clusters/%s/%s/%s/logs/?$`, teamRe, namespaceRe, clusterRe)
clusterHistoryURL = regexp.MustCompile(`^/clusters/(?P<team>[a-zA-Z][a-zA-Z0-9]*)/(?P<namespace>[a-z0-9]([-a-z0-9]*[a-z0-9])?)/(?P<cluster>[a-zA-Z][a-zA-Z0-9-]*)/history/?$`) clusterHistoryRe = fmt.Sprintf(`^/clusters/%s/%s/%s/history/?$`, teamRe, namespaceRe, clusterRe)
teamURL = regexp.MustCompile(`^/clusters/(?P<team>[a-zA-Z][a-zA-Z0-9]*)/?$`) teamURLRe = fmt.Sprintf(`^/clusters/%s/?$`, teamRe)
clusterStatusURL = regexp.MustCompile(clusterStatusRe)
clusterLogsURL = regexp.MustCompile(clusterLogsRe)
clusterHistoryURL = regexp.MustCompile(clusterHistoryRe)
teamURL = regexp.MustCompile(teamURLRe)
workerLogsURL = regexp.MustCompile(`^/workers/(?P<id>\d+)/logs/?$`) workerLogsURL = regexp.MustCompile(`^/workers/(?P<id>\d+)/logs/?$`)
workerEventsQueueURL = regexp.MustCompile(`^/workers/(?P<id>\d+)/queue/?$`) workerEventsQueueURL = regexp.MustCompile(`^/workers/(?P<id>\d+)/queue/?$`)
workerStatusURL = regexp.MustCompile(`^/workers/(?P<id>\d+)/status/?$`) workerStatusURL = regexp.MustCompile(`^/workers/(?P<id>\d+)/status/?$`)

View File

@ -0,0 +1,30 @@
package apiserver
import (
"testing"
)
const (
clusterStatusTest = "/clusters/test-id/test_namespace/testcluster/"
clusterStatusNumericTest = "/clusters/test-id-1/test_namespace/testcluster/"
clusterLogsTest = "/clusters/test-id/test_namespace/testcluster/logs/"
teamTest = "/clusters/test-id/"
)
func TestUrlRegexps(t *testing.T) {
if clusterStatusURL.FindStringSubmatch(clusterStatusTest) == nil {
t.Errorf("clusterStatusURL can't match %s", clusterStatusTest)
}
if clusterStatusURL.FindStringSubmatch(clusterStatusNumericTest) == nil {
t.Errorf("clusterStatusURL can't match %s", clusterStatusNumericTest)
}
if clusterLogsURL.FindStringSubmatch(clusterLogsTest) == nil {
t.Errorf("clusterLogsURL can't match %s", clusterLogsTest)
}
if teamURL.FindStringSubmatch(teamTest) == nil {
t.Errorf("teamURL can't match %s", teamTest)
}
}

View File

@ -321,7 +321,9 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *v1beta1.StatefulSet) *comp
needsRollUpdate = true needsRollUpdate = true
reasons = append(reasons, "new statefulset's container specification doesn't match the current one") reasons = append(reasons, "new statefulset's container specification doesn't match the current one")
} else { } else {
needsRollUpdate, reasons = c.compareContainers(c.Statefulset, statefulSet) var containerReasons []string
needsRollUpdate, containerReasons = c.compareContainers(c.Statefulset, statefulSet)
reasons = append(reasons, containerReasons...)
} }
if len(c.Statefulset.Spec.Template.Spec.Containers) == 0 { if len(c.Statefulset.Spec.Template.Spec.Containers) == 0 {
c.logger.Warningf("statefulset %q has no container", util.NameFromMeta(c.Statefulset.ObjectMeta)) c.logger.Warningf("statefulset %q has no container", util.NameFromMeta(c.Statefulset.ObjectMeta))
@ -329,7 +331,6 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *v1beta1.StatefulSet) *comp
} }
// In the comparisons below, the needsReplace and needsRollUpdate flags are never reset, since checks fall through // In the comparisons below, the needsReplace and needsRollUpdate flags are never reset, since checks fall through
// and the combined effect of all the changes should be applied. // and the combined effect of all the changes should be applied.
// TODO: log all reasons for changing the statefulset, not just the last one.
// TODO: make sure this is in sync with generatePodTemplate, ideally by using the same list of fields to generate // TODO: make sure this is in sync with generatePodTemplate, ideally by using the same list of fields to generate
// the template and the diff // the template and the diff
if c.Statefulset.Spec.Template.Spec.ServiceAccountName != statefulSet.Spec.Template.Spec.ServiceAccountName { if c.Statefulset.Spec.Template.Spec.ServiceAccountName != statefulSet.Spec.Template.Spec.ServiceAccountName {
@ -340,7 +341,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *v1beta1.StatefulSet) *comp
if *c.Statefulset.Spec.Template.Spec.TerminationGracePeriodSeconds != *statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds { if *c.Statefulset.Spec.Template.Spec.TerminationGracePeriodSeconds != *statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds {
needsReplace = true needsReplace = true
needsRollUpdate = true needsRollUpdate = true
reasons = append(reasons, "new statefulset's terminationGracePeriodSeconds doesn't match the current one") reasons = append(reasons, "new statefulset's terminationGracePeriodSeconds doesn't match the current one")
} }
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Affinity, statefulSet.Spec.Template.Spec.Affinity) { if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Affinity, statefulSet.Spec.Template.Spec.Affinity) {
needsReplace = true needsReplace = true
@ -416,23 +417,23 @@ func newCheck(msg string, cond containerCondition) containerCheck {
// compareContainers: compare containers from two stateful sets // compareContainers: compare containers from two stateful sets
// and return: // and return:
// * whether or not roll update is needed // * whether or not a rolling update is needed
// * a list of reasons in a human readable format // * a list of reasons in a human readable format
func (c *Cluster) compareContainers(setA, setB *v1beta1.StatefulSet) (bool, []string) { func (c *Cluster) compareContainers(setA, setB *v1beta1.StatefulSet) (bool, []string) {
reasons := make([]string, 0) reasons := make([]string, 0)
needsRollUpdate := false needsRollUpdate := false
checks := []containerCheck{ checks := []containerCheck{
newCheck("new statefulset's container %d name doesn't match the current one", newCheck("new statefulset's container %s (index %d) name doesn't match the current one",
func(a, b v1.Container) bool { return a.Name != b.Name }), func(a, b v1.Container) bool { return a.Name != b.Name }),
newCheck("new statefulset's container %d image doesn't match the current one", newCheck("new statefulset's container %s (index %d) image doesn't match the current one",
func(a, b v1.Container) bool { return a.Image != b.Image }), func(a, b v1.Container) bool { return a.Image != b.Image }),
newCheck("new statefulset's container %d ports don't match the current one", newCheck("new statefulset's container %s (index %d) ports don't match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Ports, b.Ports) }), func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Ports, b.Ports) }),
newCheck("new statefulset's container %d resources don't match the current ones", newCheck("new statefulset's container %s (index %d) resources don't match the current ones",
func(a, b v1.Container) bool { return !compareResources(&a.Resources, &b.Resources) }), func(a, b v1.Container) bool { return !compareResources(&a.Resources, &b.Resources) }),
newCheck("new statefulset's container %d environment doesn't match the current one", newCheck("new statefulset's container %s (index %d) environment doesn't match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Env, b.Env) }), func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Env, b.Env) }),
newCheck("new statefulset's container %d environment sources don't match the current one", newCheck("new statefulset's container %s (index %d) environment sources don't match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }), func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }),
} }
@ -441,7 +442,7 @@ func (c *Cluster) compareContainers(setA, setB *v1beta1.StatefulSet) (bool, []st
for _, check := range checks { for _, check := range checks {
if check.condition(containerA, containerB) { if check.condition(containerA, containerB) {
needsRollUpdate = true needsRollUpdate = true
reasons = append(reasons, fmt.Sprintf(check.reason, index)) reasons = append(reasons, fmt.Sprintf(check.reason, containerA.Name, index))
} }
} }
} }

View File

@ -36,11 +36,12 @@ type pgUser struct {
} }
type patroniDCS struct { type patroniDCS struct {
TTL uint32 `json:"ttl,omitempty"` TTL uint32 `json:"ttl,omitempty"`
LoopWait uint32 `json:"loop_wait,omitempty"` LoopWait uint32 `json:"loop_wait,omitempty"`
RetryTimeout uint32 `json:"retry_timeout,omitempty"` RetryTimeout uint32 `json:"retry_timeout,omitempty"`
MaximumLagOnFailover float32 `json:"maximum_lag_on_failover,omitempty"` MaximumLagOnFailover float32 `json:"maximum_lag_on_failover,omitempty"`
PGBootstrapConfiguration map[string]interface{} `json:"postgresql,omitempty"` PGBootstrapConfiguration map[string]interface{} `json:"postgresql,omitempty"`
Slots map[string]map[string]string `json:"slots,omitempty"`
} }
type pgBootstrap struct { type pgBootstrap struct {
@ -91,18 +92,18 @@ func (c *Cluster) makeDefaultResources() acidv1.Resources {
defaultRequests := acidv1.ResourceDescription{CPU: config.DefaultCPURequest, Memory: config.DefaultMemoryRequest} defaultRequests := acidv1.ResourceDescription{CPU: config.DefaultCPURequest, Memory: config.DefaultMemoryRequest}
defaultLimits := acidv1.ResourceDescription{CPU: config.DefaultCPULimit, Memory: config.DefaultMemoryLimit} defaultLimits := acidv1.ResourceDescription{CPU: config.DefaultCPULimit, Memory: config.DefaultMemoryLimit}
return acidv1.Resources{ResourceRequest: defaultRequests, ResourceLimits: defaultLimits} return acidv1.Resources{ResourceRequests: defaultRequests, ResourceLimits: defaultLimits}
} }
func generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) { func generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) {
var err error var err error
specRequests := resources.ResourceRequest specRequests := resources.ResourceRequests
specLimits := resources.ResourceLimits specLimits := resources.ResourceLimits
result := v1.ResourceRequirements{} result := v1.ResourceRequirements{}
result.Requests, err = fillResourceList(specRequests, defaultResources.ResourceRequest) result.Requests, err = fillResourceList(specRequests, defaultResources.ResourceRequests)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not fill resource requests: %v", err) return nil, fmt.Errorf("could not fill resource requests: %v", err)
} }
@ -215,6 +216,9 @@ PatroniInitDBParams:
if patroni.TTL != 0 { if patroni.TTL != 0 {
config.Bootstrap.DCS.TTL = patroni.TTL config.Bootstrap.DCS.TTL = patroni.TTL
} }
if patroni.Slots != nil {
config.Bootstrap.DCS.Slots = patroni.Slots
}
config.PgLocalConfiguration = make(map[string]interface{}) config.PgLocalConfiguration = make(map[string]interface{})
config.PgLocalConfiguration[patroniPGBinariesParameterName] = fmt.Sprintf(pgBinariesLocationTemplate, pg.PgVersion) config.PgLocalConfiguration[patroniPGBinariesParameterName] = fmt.Sprintf(pgBinariesLocationTemplate, pg.PgVersion)
@ -373,8 +377,8 @@ func generateSidecarContainers(sidecars []acidv1.Sidecar,
resources, err := generateResourceRequirements( resources, err := generateResourceRequirements(
makeResources( makeResources(
sidecar.Resources.ResourceRequest.CPU, sidecar.Resources.ResourceRequests.CPU,
sidecar.Resources.ResourceRequest.Memory, sidecar.Resources.ResourceRequests.Memory,
sidecar.Resources.ResourceLimits.CPU, sidecar.Resources.ResourceLimits.CPU,
sidecar.Resources.ResourceLimits.Memory, sidecar.Resources.ResourceLimits.Memory,
), ),
@ -621,7 +625,7 @@ func getBucketScopeSuffix(uid string) string {
func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) acidv1.Resources { func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) acidv1.Resources {
return acidv1.Resources{ return acidv1.Resources{
ResourceRequest: acidv1.ResourceDescription{ ResourceRequests: acidv1.ResourceDescription{
CPU: cpuRequest, CPU: cpuRequest,
Memory: memoryRequest, Memory: memoryRequest,
}, },
@ -640,6 +644,60 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
podTemplate *v1.PodTemplateSpec podTemplate *v1.PodTemplateSpec
volumeClaimTemplate *v1.PersistentVolumeClaim volumeClaimTemplate *v1.PersistentVolumeClaim
) )
if c.OpConfig.SetMemoryRequestToLimit {
// controller adjusts the default memory request at operator startup
request := spec.Resources.ResourceRequests.Memory
if request == "" {
request = c.OpConfig.DefaultMemoryRequest
}
limit := spec.Resources.ResourceLimits.Memory
if limit == "" {
limit = c.OpConfig.DefaultMemoryLimit
}
isSmaller, err := util.RequestIsSmallerThanLimit(request, limit)
if err != nil {
return nil, err
}
if isSmaller {
c.logger.Warningf("The memory request of %v for the Postgres container is increased to match the memory limit of %v.", request, limit)
spec.Resources.ResourceRequests.Memory = limit
}
// controller adjusts the Scalyr sidecar request at operator startup
// as this sidecar is managed separately
// adjust sidecar containers defined for that particular cluster
for _, sidecar := range spec.Sidecars {
// TODO #413
sidecarRequest := sidecar.Resources.ResourceRequests.Memory
if request == "" {
request = c.OpConfig.DefaultMemoryRequest
}
sidecarLimit := sidecar.Resources.ResourceLimits.Memory
if limit == "" {
limit = c.OpConfig.DefaultMemoryLimit
}
isSmaller, err := util.RequestIsSmallerThanLimit(sidecarRequest, sidecarLimit)
if err != nil {
return nil, err
}
if isSmaller {
c.logger.Warningf("The memory request of %v for the %v sidecar container is increased to match the memory limit of %v.", sidecar.Resources.ResourceRequests.Memory, sidecar.Name, sidecar.Resources.ResourceLimits.Memory)
sidecar.Resources.ResourceRequests.Memory = sidecar.Resources.ResourceLimits.Memory
}
}
}
defaultResources := c.makeDefaultResources() defaultResources := c.makeDefaultResources()
resourceRequirements, err := generateResourceRequirements(spec.Resources, defaultResources) resourceRequirements, err := generateResourceRequirements(spec.Resources, defaultResources)
@ -958,16 +1016,17 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
if c.shouldCreateLoadBalancerForService(role, spec) { if c.shouldCreateLoadBalancerForService(role, spec) {
// safe default value: lock load balancer to only local address unless overridden explicitly. // spec.AllowedSourceRanges evaluates to the empty slice of zero length
sourceRanges := []string{localHost} // when omitted or set to 'null'/empty sequence in the PG manifest
if len(spec.AllowedSourceRanges) > 0 {
allowedSourceRanges := spec.AllowedSourceRanges serviceSpec.LoadBalancerSourceRanges = spec.AllowedSourceRanges
if len(allowedSourceRanges) >= 0 { } else {
sourceRanges = allowedSourceRanges // safe default value: lock a load balancer only to the local address unless overridden explicitly
serviceSpec.LoadBalancerSourceRanges = []string{localHost}
} }
c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges)
serviceSpec.Type = v1.ServiceTypeLoadBalancer serviceSpec.Type = v1.ServiceTypeLoadBalancer
serviceSpec.LoadBalancerSourceRanges = sourceRanges
annotations = map[string]string{ annotations = map[string]string{
constants.ZalandoDNSNameAnnotation: dnsName, constants.ZalandoDNSNameAnnotation: dnsName,

View File

@ -132,16 +132,17 @@ func (c *Cluster) preScaleDown(newStatefulSet *v1beta1.StatefulSet) error {
return nil return nil
} }
// setRollingUpdateFlagForStatefulSet sets the indicator or the rolling upgrade requirement // setRollingUpdateFlagForStatefulSet sets the indicator or the rolling update requirement
// in the StatefulSet annotation. // in the StatefulSet annotation.
func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *v1beta1.StatefulSet, val bool) { func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *v1beta1.StatefulSet, val bool) {
anno := sset.GetAnnotations() anno := sset.GetAnnotations()
c.logger.Debugf("rolling upgrade flag has been set to %t", val)
if anno == nil { if anno == nil {
anno = make(map[string]string) anno = make(map[string]string)
} }
anno[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(val) anno[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(val)
sset.SetAnnotations(anno) sset.SetAnnotations(anno)
c.logger.Debugf("statefulset's rolling update annotation has been set to %t", val)
} }
// applyRollingUpdateFlagforStatefulSet sets the rolling update flag for the cluster's StatefulSet // applyRollingUpdateFlagforStatefulSet sets the rolling update flag for the cluster's StatefulSet
@ -176,9 +177,9 @@ func (c *Cluster) getRollingUpdateFlagFromStatefulSet(sset *v1beta1.StatefulSet,
return flag return flag
} }
// mergeRollingUpdateFlagUsingCache return the value of the rollingUpdate flag from the passed // mergeRollingUpdateFlagUsingCache returns the value of the rollingUpdate flag from the passed
// statefulset, however, the value can be cleared if there is a cached flag in the cluster that // statefulset, however, the value can be cleared if there is a cached flag in the cluster that
// is set to false (the disrepancy could be a result of a failed StatefulSet update).s // is set to false (the discrepancy could be a result of a failed StatefulSet update)
func (c *Cluster) mergeRollingUpdateFlagUsingCache(runningStatefulSet *v1beta1.StatefulSet) bool { func (c *Cluster) mergeRollingUpdateFlagUsingCache(runningStatefulSet *v1beta1.StatefulSet) bool {
var ( var (
cachedStatefulsetExists, clearRollingUpdateFromCache, podsRollingUpdateRequired bool cachedStatefulsetExists, clearRollingUpdateFromCache, podsRollingUpdateRequired bool
@ -198,7 +199,7 @@ func (c *Cluster) mergeRollingUpdateFlagUsingCache(runningStatefulSet *v1beta1.S
c.logger.Infof("clearing the rolling update flag based on the cached information") c.logger.Infof("clearing the rolling update flag based on the cached information")
podsRollingUpdateRequired = false podsRollingUpdateRequired = false
} else { } else {
c.logger.Infof("found a statefulset with an unfinished pods rolling update") c.logger.Infof("found a statefulset with an unfinished rolling update of the pods")
} }
} }

View File

@ -2,6 +2,7 @@ package cluster
import ( import (
"fmt" "fmt"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
policybeta1 "k8s.io/api/policy/v1beta1" policybeta1 "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -280,6 +281,7 @@ func (c *Cluster) syncStatefulSet() error {
podsRollingUpdateRequired = true podsRollingUpdateRequired = true
c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired) c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired)
} }
c.logStatefulSetChanges(c.Statefulset, desiredSS, false, cmp.reasons) c.logStatefulSetChanges(c.Statefulset, desiredSS, false, cmp.reasons)
if !cmp.replace { if !cmp.replace {

View File

@ -179,7 +179,7 @@ func (c *Cluster) logStatefulSetChanges(old, new *v1beta1.StatefulSet, isUpdate
if !reflect.DeepEqual(old.Annotations, new.Annotations) { if !reflect.DeepEqual(old.Annotations, new.Annotations) {
c.logger.Debugf("metadata.annotation diff\n%s\n", util.PrettyDiff(old.Annotations, new.Annotations)) c.logger.Debugf("metadata.annotation diff\n%s\n", util.PrettyDiff(old.Annotations, new.Annotations))
} }
c.logger.Debugf("spec diff\n%s\n", util.PrettyDiff(old.Spec, new.Spec)) c.logger.Debugf("spec diff between old and new statefulsets: \n%s\n", util.PrettyDiff(old.Spec, new.Spec))
if len(reasons) > 0 { if len(reasons) > 0 {
for _, reason := range reasons { for _, reason := range reasons {

View File

@ -110,6 +110,29 @@ func (c *Controller) initOperatorConfig() {
c.opConfig = config.NewFromMap(configMapData) c.opConfig = config.NewFromMap(configMapData)
c.warnOnDeprecatedOperatorParameters() c.warnOnDeprecatedOperatorParameters()
if c.opConfig.SetMemoryRequestToLimit {
isSmaller, err := util.RequestIsSmallerThanLimit(c.opConfig.DefaultMemoryRequest, c.opConfig.DefaultMemoryLimit)
if err != nil {
panic(err)
}
if isSmaller {
c.logger.Warningf("The default memory request of %v for Postgres containers is increased to match the default memory limit of %v.", c.opConfig.DefaultMemoryRequest, c.opConfig.DefaultMemoryLimit)
c.opConfig.DefaultMemoryRequest = c.opConfig.DefaultMemoryLimit
}
isSmaller, err = util.RequestIsSmallerThanLimit(c.opConfig.ScalyrMemoryRequest, c.opConfig.ScalyrMemoryLimit)
if err != nil {
panic(err)
}
if isSmaller {
c.logger.Warningf("The memory request of %v for the Scalyr sidecar container is increased to match the memory limit of %v.", c.opConfig.ScalyrMemoryRequest, c.opConfig.ScalyrMemoryLimit)
c.opConfig.ScalyrMemoryRequest = c.opConfig.ScalyrMemoryLimit
}
// generateStatefulSet adjusts values for individual Postgres clusters
}
} }
func (c *Controller) modifyConfigFromEnvironment() { func (c *Controller) modifyConfigFromEnvironment() {

View File

@ -55,6 +55,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest
result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit
result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval) result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval)
result.ResourceCheckTimeout = time.Duration(fromCRD.Timeouts.ResourceCheckTimeout) result.ResourceCheckTimeout = time.Duration(fromCRD.Timeouts.ResourceCheckTimeout)

View File

@ -104,6 +104,7 @@ type Config struct {
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
ProtectedRoles []string `name:"protected_role_names" default:"admin"` ProtectedRoles []string `name:"protected_role_names" default:"admin"`
PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""` PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""`
SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" defaults:"false"`
} }
// MustMarshal marshals the config or panics // MustMarshal marshals the config or panics

View File

@ -3,12 +3,14 @@ package util
import ( import (
"crypto/md5" // #nosec we need it to for PostgreSQL md5 passwords "crypto/md5" // #nosec we need it to for PostgreSQL md5 passwords
"encoding/hex" "encoding/hex"
"fmt"
"math/rand" "math/rand"
"regexp" "regexp"
"strings" "strings"
"time" "time"
"github.com/motomux/pretty" "github.com/motomux/pretty"
resource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/spec"
@ -127,3 +129,19 @@ func Coalesce(val, defaultVal string) string {
} }
return val return val
} }
// RequestIsSmallerThanLimit
func RequestIsSmallerThanLimit(requestStr, limitStr string) (bool, error) {
request, err := resource.ParseQuantity(requestStr)
if err != nil {
return false, fmt.Errorf("could not parse memory request %v : %v", requestStr, err)
}
limit, err2 := resource.ParseQuantity(limitStr)
if err2 != nil {
return false, fmt.Errorf("could not parse memory limit %v : %v", limitStr, err2)
}
return request.Cmp(limit) == -1, nil
}

View File

@ -69,6 +69,17 @@ var substringMatch = []struct {
{regexp.MustCompile(`aaaa (\d+) bbbb`), "aaaa 123 bbbb", nil}, {regexp.MustCompile(`aaaa (\d+) bbbb`), "aaaa 123 bbbb", nil},
} }
var requestIsSmallerThanLimitTests = []struct {
request string
limit string
out bool
}{
{"1G", "2G", true},
{"1G", "1Gi", true}, // G is 1000^3 bytes, Gi is 1024^3 bytes
{"1024Mi", "1G", false},
{"1e9", "1G", false}, // 1e9 bytes == 1G
}
func TestRandomPassword(t *testing.T) { func TestRandomPassword(t *testing.T) {
const pwdLength = 10 const pwdLength = 10
pwd := RandomPassword(pwdLength) pwd := RandomPassword(pwdLength)
@ -143,3 +154,15 @@ func TestMapContains(t *testing.T) {
} }
} }
} }
func TestRequestIsSmallerThanLimit(t *testing.T) {
for _, tt := range requestIsSmallerThanLimitTests {
res, err := RequestIsSmallerThanLimit(tt.request, tt.limit)
if err != nil {
t.Errorf("RequestIsSmallerThanLimit returned unexpected error: %#v", err)
}
if res != tt.out {
t.Errorf("RequestIsSmallerThanLimit expected: %#v, got: %#v", tt.out, res)
}
}
}

View File

@ -94,7 +94,7 @@ function build_operator_binary(){
# redirecting stderr greatly reduces non-informative output during normal builds # redirecting stderr greatly reduces non-informative output during normal builds
echo "Build operator binary (stderr redirected to /dev/null)..." echo "Build operator binary (stderr redirected to /dev/null)..."
make tools deps local > /dev/null 2>&1 make clean tools deps local test > /dev/null 2>&1
} }
@ -215,6 +215,7 @@ function main(){
clean_up clean_up
start_minikube start_minikube
kubectl create namespace test
start_operator start_operator
forward_ports forward_ports
check_health check_health