Merge branch 'master' into feature/actions-for-secret-syncing

This commit is contained in:
erthalion 2019-01-23 17:30:15 +01:00
commit b44bf1dd3f
53 changed files with 729 additions and 233 deletions

5
.golangci.yml Normal file
View File

@ -0,0 +1,5 @@
# https://github.com/golangci/golangci/wiki/Configuration
service:
prepare:
- make deps

View File

@ -1,11 +1,4 @@
# for github.com
approvals:
groups:
zalando:
minimum: 2
from:
orgs:
- "zalando"
X-Zalando-Team: "acid"
# type should be one of [code, doc, config, tools, secrets]
# code will be the default value, if X-Zalando-Type is not found in .zappr.yml

View File

@ -1,2 +1,2 @@
# global owners
* @alexeyklyukin @erthalion @zerg-junior @Jan-M @CyberDem0n @avaczi
* @alexeyklyukin @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu

View File

@ -1,4 +1,4 @@
.PHONY: clean local linux macos docker push scm-source.json
.PHONY: clean local test linux macos docker push scm-source.json
BINARY ?= postgres-operator
BUILD_FLAGS ?= -v
@ -30,6 +30,11 @@ else
DOCKERFILE = Dockerfile
endif
ifdef CDP_PULL_REQUEST_NUMBER
CDP_TAG := -${CDP_BUILD_VERSION}
endif
PATH := $(GOPATH)/bin:$(PATH)
SHELL := env PATH=$(PATH) $(SHELL)
@ -52,13 +57,18 @@ docker-context: scm-source.json linux
cp build/linux/${BINARY} scm-source.json docker/build/
docker: ${DOCKERDIR}/${DOCKERFILE} docker-context
cd "${DOCKERDIR}" && docker build --rm -t "$(IMAGE):$(TAG)$(DEBUG_POSTFIX)" -f "${DOCKERFILE}" .
echo `(env)`
echo "Tag ${TAG}"
echo "Version ${VERSION}"
echo "CDP tag ${CDP_TAG}"
echo "git describe $(shell git describe --tags --always --dirty)"
cd "${DOCKERDIR}" && docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_POSTFIX)" -f "${DOCKERFILE}" .
indocker-race:
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.8.1 bash -c "make linux"
push:
docker push "$(IMAGE):$(TAG)"
docker push "$(IMAGE):$(TAG)$(CDP_TAG)"
scm-source.json: .git
echo '{\n "url": "git:$(GITURL)",\n "revision": "$(GITHEAD)",\n "author": "$(USER)",\n "status": "$(GITSTATUS)"\n}' > scm-source.json
@ -76,3 +86,6 @@ vet:
deps:
@glide install --strip-vendor
test:
@go test ./...

View File

@ -4,6 +4,7 @@
[![Coverage Status](https://coveralls.io/repos/github/zalando-incubator/postgres-operator/badge.svg)](https://coveralls.io/github/zalando-incubator/postgres-operator)
[![Go Report Card](https://goreportcard.com/badge/github.com/zalando-incubator/postgres-operator)](https://goreportcard.com/report/github.com/zalando-incubator/postgres-operator)
[![GoDoc](https://godoc.org/github.com/zalando-incubator/postgres-operator?status.svg)](https://godoc.org/github.com/zalando-incubator/postgres-operator)
[![golangci](https://golangci.com/badges/github.com/zalando-incubator/postgres-operator.svg)](https://golangci.com/r/github.com/zalando-incubator/postgres-operator)
## Introduction
@ -67,12 +68,14 @@ kubectl create -f manifests/configmap.yaml # configuration
kubectl create -f manifests/operator-service-account-rbac.yaml # identity and permissions
kubectl create -f manifests/postgres-operator.yaml # deployment
# create a Postgres cluster
# create a Postgres cluster in a non-default namespace
kubectl create namespace test
kubectl config set-context minikube --namespace=test
kubectl create -f manifests/minimal-postgres-manifest.yaml
# connect to the Postgres master via psql
# operator creates the relevant k8s secret
export HOST_PORT=$(minikube service acid-minimal-cluster --url | sed 's,.*/,,')
export HOST_PORT=$(minikube service --namespace test acid-minimal-cluster --url | sed 's,.*/,,')
export PGHOST=$(echo $HOST_PORT | cut -d: -f 1)
export PGPORT=$(echo $HOST_PORT | cut -d: -f 2)
export PGPASSWORD=$(kubectl get secret postgres.acid-minimal-cluster.credentials -o 'jsonpath={.data.password}' | base64 -d)
@ -88,13 +91,14 @@ cd postgres-operator
./run_operator_locally.sh
```
Note we provide the `/manifests` directory as an example only; you should consider adjusting the manifests to your particular setting.
## Running and testing the operator
The best way to test the operator is to run it in [minikube](https://kubernetes.io/docs/getting-started-guides/minikube/).
Minikube is a tool to run Kubernetes cluster locally.
The best way to test the operator is to run it locally in [minikube](https://kubernetes.io/docs/getting-started-guides/minikube/). See developer docs(`docs/developer.yaml`) for details.
### Configuration Options
The operator can be configured with the provided ConfigMap (`manifests/configmap.yaml`).
The operator can be configured with the provided ConfigMap(`manifests/configmap.yaml`) or the operator's own CRD.

View File

@ -47,14 +47,14 @@ func init() {
log.Printf("Fully qualified configmap name: %v", config.ConfigMapName)
}
if crd_interval := os.Getenv("CRD_READY_WAIT_INTERVAL"); crd_interval != "" {
config.CRDReadyWaitInterval = mustParseDuration(crd_interval)
if crdInterval := os.Getenv("CRD_READY_WAIT_INTERVAL"); crdInterval != "" {
config.CRDReadyWaitInterval = mustParseDuration(crdInterval)
} else {
config.CRDReadyWaitInterval = 4 * time.Second
}
if crd_timeout := os.Getenv("CRD_READY_WAIT_TIMEOUT"); crd_timeout != "" {
config.CRDReadyWaitTimeout = mustParseDuration(crd_timeout)
if crdTimeout := os.Getenv("CRD_READY_WAIT_TIMEOUT"); crdTimeout != "" {
config.CRDReadyWaitTimeout = mustParseDuration(crdTimeout)
} else {
config.CRDReadyWaitTimeout = 30 * time.Second
}

View File

@ -20,9 +20,6 @@ pipeline:
mv go /usr/local
ln -s /usr/local/go/bin/go /usr/bin/go
go version
- desc: 'Install Docker'
cmd: |
curl -sSL https://get.docker.com/ | sh
- desc: 'Symlink sources into the GOPATH'
cmd: |
mkdir -p $OPERATOR_TOP_DIR

View File

@ -1,6 +1,6 @@
## Create ConfigMap
ConfigMap is used to store the configuration of the operator
A ConfigMap is used to store the configuration of the operator.
```bash
$ kubectl create -f manifests/configmap.yaml
@ -41,12 +41,14 @@ manifests:
```bash
$ kubectl create namespace test
$ kubectl config set-context --namespace=test
$ kubectl config set-context $(kubectl config current-context) --namespace=test
```
All subsequent `kubectl` commands will work with the `test` namespace. The
operator will run in this namespace and look up needed resources - such as its
config map - there.
operator will run in this namespace and look up needed resources - such as its
ConfigMap - there. Please note that the namespace for service accounts and
cluster role bindings in [operator RBAC rules](../manifests/operator-service-account-rbac.yaml)
needs to be adjusted to the non-default value.
## Specify the namespace to watch
@ -56,8 +58,10 @@ replicas to 5" and reacting to the requests, in this example by actually
scaling up.
By default, the operator watches the namespace it is deployed to. You can
change this by altering the `WATCHED_NAMESPACE` env var in the operator
deployment manifest or the `watched_namespace` field in the operator configmap.
change this by setting the `WATCHED_NAMESPACE` var in the `env` section of the
[operator deployment](../manifests/postgres-operator.yaml) manifest or by
altering the `watched_namespace` field in the operator
[ConfigMap](../manifests/configmap.yaml#L6).
In the case both are set, the env var takes the precedence. To make the
operator listen to all namespaces, explicitly set the field/env var to "`*`".
@ -75,7 +79,7 @@ in the case database pods need to talk to the Kubernetes API (e.g. when using
Kubernetes-native configuration of Patroni). The operator checks that the
`pod_service_account_name` exists in the target namespace, and, if not, deploys
there the `pod_service_account_definition` from the operator
[`Config`](pkg/util/config/config.go) with the default value of:
[`Config`](../pkg/util/config/config.go) with the default value of:
```yaml
apiVersion: v1
@ -86,13 +90,13 @@ metadata:
In this definition, the operator overwrites the account's name to match
`pod_service_account_name` and the `default` namespace to match the target
namespace. The operator performs **no** further syncing of this account.
namespace. The operator performs **no** further syncing of this account.
## Role-based access control for the operator
The `manifests/operator-service-account-rbac.yaml` defines cluster roles and bindings needed
for the operator to function under access control restrictions. To deploy the
operator with this RBAC policy use:
The `manifests/operator-service-account-rbac.yaml` defines cluster roles and
bindings needed for the operator to function under access control restrictions.
To deploy the operator with this RBAC policy use:
```bash
$ kubectl create -f manifests/configmap.yaml
@ -103,18 +107,18 @@ operator with this RBAC policy use:
Note that the service account in `operator-rbac.yaml` is named
`zalando-postgres-operator`. You may have to change the `service_account_name`
in the operator configmap and `serviceAccountName` in the postgres-operator
in the operator ConfigMap and `serviceAccountName` in the postgres-operator
deployment appropriately.
This is done intentionally, as to avoid breaking those setups that already work
This is done intentionally to avoid breaking those setups that already work
with the default `operator` account. In the future the operator should ideally
be run under the `zalando-postgres-operator` service account.
The service account defined in `operator-rbac.yaml` acquires some privileges
not really used by the operator (i.e. we only need list and watch on
configmaps), this is also done intentionally to avoid breaking things if
someone decides to configure the same service account in the operator's
configmap to run postgres clusters.
The service account defined in `operator-rbac.yaml` acquires some privileges
not really used by the operator (i.e. we only need `list` and `watch` on
`configmaps` resources), this is also done intentionally to avoid breaking
things if someone decides to configure the same service account in the
operator's ConfigMap to run postgres clusters.
### Use taints and tolerations for dedicated PostgreSQL nodes
@ -144,12 +148,12 @@ data:
## Custom Pod Environment Variables
It is possible to configure a config map which is used by the Postgres pods as
It is possible to configure a ConfigMap which is used by the Postgres pods as
an additional provider for environment variables.
One use case is to customize the Spilo image and configure it with environment
variables. The config map with the additional settings is configured in the
operator's main config map:
variables. The ConfigMap with the additional settings is configured in the
operator's main ConfigMap:
**postgres-operator ConfigMap**
@ -186,37 +190,61 @@ instances permitted by each Postgres cluster managed by the operator. If either
`min_instances` or `max_instances` is set to a non-zero value, the operator may
adjust the number of instances specified in the cluster manifest to match
either the min or the max boundary. For instance, of a cluster manifest has 1
instance and the min_instances is set to 3, the cluster will be created with 3
instances. By default, both parameters are set to -1.
instance and the `min_instances` is set to 3, the cluster will be created with 3
instances. By default, both parameters are set to `-1`.
## Load balancers
For any Postgresql/Spilo cluster, the operator creates two separate k8s
For any Postgresql/Spilo cluster, the operator creates two separate Kubernetes
services: one for the master pod and one for replica pods. To expose these
services to an outer network, one can attach load balancers to them by setting
`enableMasterLoadBalancer` and/or `enableReplicaLoadBalancer` to `true` in the
cluster manifest. In the case any of these variables are omitted from the
manifest, the operator configmap's settings `enable_master_load_balancer` and
`enable_replica_load_balancer` apply. Note that the operator settings affect
all Postgresql services running in a namespace watched by the operator.
all Postgresql services running in all namespaces watched by the operator.
To limit the range of IP adresses that can reach a load balancer, specify the
desired ranges in the `allowedSourceRanges` field (applies to both master and
replica load balancers). To prevent exposing load balancers to the entire
Internet, this field is set at cluster creation time to `127.0.0.1/32` unless
overwritten explicitly. If you want to revoke all IP ranges from an existing
cluster, please set the `allowedSourceRanges` field to `127.0.0.1/32` or to an
empty sequence `[]`. Setting the field to `null` or omitting it entirely may
lead to Kubernetes removing this field from the manifest due to its
[handling of null fields](https://kubernetes.io/docs/concepts/overview/object-management-kubectl/declarative-config/#how-apply-calculates-differences-and-merges-changes).
Then the resultant manifest will not contain the necessary change, and the
operator will respectively do noting with the existing source ranges.
## Running periodic 'autorepair' scans of Kubernetes objects
The Postgres operator periodically scans all Kubernetes objects belonging to
each cluster and repairs all discrepancies between them and the definitions
generated from the current cluster manifest. There are two types of scans: a
`sync scan`, running every `resync_period` seconds for every cluster, and the
`repair scan`, coming every `repair_period` only for those clusters that didn't
report success as a result of the last operation applied to them.
generated from the current cluster manifest. There are two types of scans:
* `sync scan`, running every `resync_period` seconds for every cluster
* `repair scan`, coming every `repair_period` only for those clusters that didn't
report success as a result of the last operation applied to them.
## Postgres roles supported by the operator
The operator is capable of maintaining roles of multiple kinds within a Postgres database cluster:
The operator is capable of maintaining roles of multiple kinds within a
Postgres database cluster:
1. **System roles** are roles necessary for the proper work of Postgres itself such as a replication role or the initial superuser role. The operator delegates creating such roles to Patroni and only establishes relevant secrets.
* **System roles** are roles necessary for the proper work of Postgres itself such as a replication role or the initial superuser role. The operator delegates creating such roles to Patroni and only establishes relevant secrets.
2. **Infrastructure roles** are roles for processes originating from external systems, e.g. monitoring robots. The operator creates such roles in all PG clusters it manages assuming k8s secrets with the relevant credentials exist beforehand.
* **Infrastructure roles** are roles for processes originating from external systems, e.g. monitoring robots. The operator creates such roles in all Postgres clusters it manages assuming that Kubernetes secrets with the relevant credentials exist beforehand.
3. **Per-cluster robot users** are also roles for processes originating from external systems but defined for an individual Postgres cluster in its manifest. A typical example is a role for connections from an application that uses the database.
* **Per-cluster robot users** are also roles for processes originating from external systems but defined for an individual Postgres cluster in its manifest. A typical example is a role for connections from an application that uses the database.
4. **Human users** originate from the Teams API that returns list of the team members given a team id. Operator differentiates between (a) product teams that own a particular Postgres cluster and are granted admin rights to maintain it, and (b) Postgres superuser teams that get the superuser access to all PG databases running in a k8s cluster for the purposes of maintaining and troubleshooting.
* **Human users** originate from the Teams API that returns a list of the team members given a team id. The operator differentiates between (a) product teams that own a particular Postgres cluster and are granted admin rights to maintain it, and (b) Postgres superuser teams that get the superuser access to all Postgres databases running in a Kubernetes cluster for the purposes of maintaining and troubleshooting.
## Understanding rolling update of Spilo pods
The operator logs reasons for a rolling update with the `info` level and
a diff between the old and new StatefulSet specs with the `debug` level.
To read the latter log entry with the escaped characters rendered, view it
in CLI with `echo -e`. Note that the resultant message will contain some
noise because the `PodTemplate` used by the operator is yet to be updated
with the default values used internally in Kubernetes.

View File

@ -188,13 +188,13 @@ defaults to 4)
* /workers/$id/logs - log of the operations performed by a given worker
* /clusters/ - list of teams and clusters known to the operator
* /clusters/$team - list of clusters for the given team
* /cluster/$team/$clustername - detailed status of the cluster, including the
* /clusters/$team/$namespace/$clustername - detailed status of the cluster, including the
specifications for CRD, master and replica services, endpoints and
statefulsets, as well as any errors and the worker that cluster is assigned
to.
* /cluster/$team/$clustername/logs/ - logs of all operations performed to the
* /clusters/$team/$namespace/$clustername/logs/ - logs of all operations performed to the
cluster so far.
* /cluster/$team/$clustername/history/ - history of cluster changes triggered
* /clusters/$team/$namespace/$clustername/history/ - history of cluster changes triggered
by the changes of the manifest (shows the somewhat obscure diff and what
exactly has triggered the change)
@ -275,3 +275,12 @@ Type 'help' for list of commands.
(dlv) c
PASS
```
To test the multinamespace setup, you can use
```
./run_operator_locally.sh --rebuild-operator
```
It will automatically create an `acid-minimal-cluster` in the namespace `test`. Then you can for example check the Patroni logs:
```
kubectl logs acid-minimal-cluster-0
```

View File

@ -51,6 +51,10 @@ Please, report any issues discovered to https://github.com/zalando-incubator/pos
## Talks
1. "Blue elephant on-demand: Postgres + Kubernetes" talk by Oleksii Kliukin and Jan Mussler, FOSDEM 2018: [video](https://fosdem.org/2018/schedule/event/blue_elephant_on_demand_postgres_kubernetes/) | [slides (pdf)](https://www.postgresql.eu/events/fosdem2018/sessions/session/1735/slides/59/FOSDEM%202018_%20Blue_Elephant_On_Demand.pdf)
1. "PostgreSQL and Kubernetes: DBaaS without a vendor-lock" talk by Oleksii Kliukin, PostgreSQL Sessions 2018: [slides](https://speakerdeck.com/alexeyklyukin/postgresql-and-kubernetes-dbaas-without-a-vendor-lock)
2. "Kube-Native Postgres" talk by Josh Berkus, KubeCon 2017: [video](https://www.youtube.com/watch?v=Zn1vd7sQ_bc)
2. "PostgreSQL High Availability on Kubernetes with Patroni" talk by Oleksii Kliukin, Atmosphere 2018: [video](https://www.youtube.com/watch?v=cFlwQOPPkeg) | [slides](https://speakerdeck.com/alexeyklyukin/postgresql-high-availability-on-kubernetes-with-patroni)
2. "Blue elephant on-demand: Postgres + Kubernetes" talk by Oleksii Kliukin and Jan Mussler, FOSDEM 2018: [video](https://fosdem.org/2018/schedule/event/blue_elephant_on_demand_postgres_kubernetes/) | [slides (pdf)](https://www.postgresql.eu/events/fosdem2018/sessions/session/1735/slides/59/FOSDEM%202018_%20Blue_Elephant_On_Demand.pdf)
3. "Kube-Native Postgres" talk by Josh Berkus, KubeCon 2017: [video](https://www.youtube.com/watch?v=Zn1vd7sQ_bc)

View File

@ -96,7 +96,19 @@ Those are parameters grouped directly under the `spec` key in the manifest.
that should be assigned to the cluster pods. When not specified, the value
is taken from the `pod_priority_class_name` operator parameter, if not set
then the default priority class is taken. The priority class itself must be defined in advance.
* **enableShmVolume**
Start a database pod without limitations on shm memory. By default docker
limit `/dev/shm` to `64M` (see e.g. the [docker
issue](https://github.com/docker-library/postgres/issues/416), which could be
not enough if PostgreSQL uses parallel workers heavily. If this option is
present and value is `true`, to the target database pod will be mounted a new
tmpfs volume to remove this limitation. If it's not present, the decision
about mounting a volume will be made based on operator configuration
(`enable_shm_volume`, which is `true` by default). It it's present and value
is `false`, then no volume will be mounted no matter how operator was
configured (so you can override the operator configuration).
## Postgres parameters
Those parameters are grouped under the `postgresql` top-level key.
@ -112,6 +124,7 @@ Those parameters are grouped under the `postgresql` top-level key.
cluster. Optional (Spilo automatically sets reasonable defaults for
parameters like work_mem or max_connections).
## Patroni parameters
Those parameters are grouped under the `patroni` top-level key. See the [patroni
@ -151,6 +164,9 @@ explanation of `ttl` and `loop_wait` parameters.
patroni `maximum_lag_on_failover` parameter value, optional. The default is
set by the Spilo docker image. Optional.
* **slots**
permanent replication slots that Patroni preserves after failover by re-creating them on the new primary immediately after doing a promote. Slots could be reconfigured with the help of `patronictl edit-config`. It is the responsibility of a user to avoid clashes in names between replication slots automatically created by Patroni for cluster members and permanent replication slots. Optional.
## Postgres container resources
Those parameters define [CPU and memory requests and

View File

@ -10,29 +10,37 @@ configuration.
configuration structure. There is an
[example](https://github.com/zalando-incubator/postgres-operator/blob/master/manifests/configmap.yaml)
* CRD-based configuration. The configuration is stored in the custom YAML
manifest, an instance of the custom resource definition (CRD) called
`OperatorConfiguration`. This CRD is registered by the operator
during the start when `POSTGRES_OPERATOR_CONFIGURATION_OBJECT` variable is
set to a non-empty value. The CRD-based configuration is a regular YAML
document; non-scalar keys are simply represented in the usual YAML way. The
usage of the CRD-based configuration is triggered by setting the
`POSTGRES_OPERATOR_CONFIGURATION_OBJECT` variable, which should point to the
`postgresql-operator-configuration` object name in the operators namespace.
* CRD-based configuration. The configuration is stored in a custom YAML
manifest. The manifest is an instance of the custom resource definition (CRD) called
`OperatorConfiguration`. The operator registers this CRD
during the start and uses it for configuration if the [operator deployment manifest ](https://github.com/zalando-incubator/postgres-operator/blob/master/manifests/postgres-operator.yaml#L21) sets the `POSTGRES_OPERATOR_CONFIGURATION_OBJECT` env variable to a non-empty value. The variable should point to the
`postgresql-operator-configuration` object in the operator's namespace.
The CRD-based configuration is a regular YAML
document; non-scalar keys are simply represented in the usual YAML way.
There are no default values built-in in the operator, each parameter that is
not supplied in the configuration receives an empty value. In order to
create your own configuration just copy the [default
one](https://github.com/zalando-incubator/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml)
and change it.
CRD-based configuration is more natural and powerful then the one based on
To test the CRD-based configuration locally, use the following
```bash
kubectl create -f manifests/operator-service-account-rbac.yaml
kubectl create -f manifests/postgres-operator.yaml # set the env var as mentioned above
kubectl create -f manifests/postgresql-operator-default-configuration.yaml
kubectl get operatorconfigurations postgresql-operator-default-configuration -o yaml
```
Note that the operator first registers the definition of the CRD `OperatorConfiguration` and then waits for an instance of the CRD to be created. In between these two event the operator pod may be failing since it cannot fetch the not-yet-existing `OperatorConfiguration` instance.
The CRD-based configuration is more powerful than the one based on
ConfigMaps and should be used unless there is a compatibility requirement to
use an already existing configuration. Even in that case, it should be rather
straightforward to convert the configmap based configuration into the CRD-based
one and restart the operator. The ConfigMaps-based configuration will be
deprecated and subsequently removed in future releases.
Note that for the CRD-based configuration configuration groups below correspond
Note that for the CRD-based configuration groups of configuration options below correspond
to the non-leaf keys in the target YAML (i.e. for the Kubernetes resources the
key is `kubernetes`). The key is mentioned alongside the group description. The
ConfigMap-based configuration is flat and does not allow non-leaf keys.
@ -46,7 +54,6 @@ They will be deprecated and removed in the future.
Variable names are underscore-separated words.
## General
Those are top-level keys, containing both leaf keys and groups.
@ -221,6 +228,17 @@ CRD-based configuration.
memory limits for the postgres containers, unless overridden by cluster-specific
settings. The default is `1Gi`.
* **set_memory_request_to_limit**
Set `memory_request` to `memory_limit` for all Postgres clusters (the default value is also increased). This prevents certain cases of memory overcommitment at the cost of overprovisioning memory and potential scheduling problems for containers with high memory limits due to the lack of memory on Kubernetes cluster nodes. This affects all containers created by the operator (Postgres, Scalyr sidecar, and other sidecars); to set resources for the operator's own container, change the [operator deployment manually](https://github.com/zalando-incubator/postgres-operator/blob/master/manifests/postgres-operator.yaml#L13). The default is `false`.
* **enable_shm_volume**
Instruct operator to start any new database pod without limitations on shm
memory. If this option is enabled, to the target database pod will be mounted
a new tmpfs volume to remove shm memory limitation (see e.g. the [docker
issue](https://github.com/docker-library/postgres/issues/416)). This option
is global for an operator object, and can be overwritten by `enableShmVolume`
parameter from Postgres manifest. The default is `true`
## Operator timeouts
This set of parameters define various timeouts related to some operator
@ -290,12 +308,12 @@ In the CRD-based configuration they are grouped under the `load_balancer` key.
replaced with the hosted zone (the value of the `db_hosted_zone` parameter).
No other placeholders are allowed.
## AWS or GSC interaction
## AWS or GCP interaction
The options in this group configure operator interactions with non-Kubernetes
objects from AWS or Google cloud. They have no effect unless you are using
objects from Amazon Web Services (AWS) or Google Cloud Platform (GCP). They have no effect unless you are using
either. In the CRD-based configuration those options are grouped under the
`aws_or_gcp` key.
`aws_or_gcp` key. Note the GCP integration is not yet officially supported.
* **wal_s3_bucket**
S3 bucket to use for shipping WAL segments with WAL-E. A bucket has to be
@ -304,8 +322,7 @@ either. In the CRD-based configuration those options are grouped under the
* **log_s3_bucket**
S3 bucket to use for shipping postgres daily logs. Works only with S3 on AWS.
The bucket has to be present and accessible by Postgres pods. At the moment
Spilo does not yet support this. The default is empty.
The bucket has to be present and accessible by Postgres pods. The default is empty.
* **kube_iam_role**
AWS IAM role to supply in the `iam.amazonaws.com/role` annotation of Postgres
@ -324,7 +341,7 @@ Options to aid debugging of the operator itself. Grouped under the `debug` key.
boolean parameter that toggles verbose debug logs from the operator. The
default is `true`.
* **enable_db_access**
* **enable_database_access**
boolean parameter that toggles the functionality of the operator that require
access to the postgres database, i.e. creating databases and users. The default
is `true`.
@ -363,6 +380,9 @@ key.
role name to grant to team members created from the Teams API. The default is
`admin`, that role is created by Spilo as a `NOLOGIN` role.
* **enable_admin_role_for_users**
if `true`, the `team_admin_role` will have the rights to grant roles coming from PG manifests. Such roles will be created as in "CREATE ROLE 'role_from_manifest' ... ADMIN 'team_admin_role'". The default is `true`.
* **pam_role_name**
when set, the operator will add all team member roles to this group and add a
`pg_hba` line to authenticate members of that role via `pam`. The default is
@ -380,7 +400,7 @@ key.
infrastructure role. The default is `admin`.
* **postgres_superuser_teams**
List of teams which members need the superuser role in each PG database cluster to administer Postgres and maintain infrastructure built around it. The default is `postgres_superuser`.
List of teams which members need the superuser role in each PG database cluster to administer Postgres and maintain infrastructure built around it. The default is empty.
## Logging and REST API

View File

@ -20,7 +20,7 @@ spec:
- createdb
# role for application foo
foo_user:
foo_user: # or 'foo_user: []'
#databases: name->owner
databases:
@ -57,12 +57,11 @@ $ psql -U postgres
Postgres operator allows defining roles to be created in the resulting database
cluster. It covers three use-cases:
* create application roles specific to the cluster described in the manifest:
`manifest roles`.
* create application roles that should be automatically created on every
cluster managed by the operator: `infrastructure roles`.
* automatically create users for every member of the team owning the database
cluster: `teams API roles`.
* `manifest roles`: create application roles specific to the cluster described in the manifest.
* `infrastructure roles`: create application roles that should be automatically created on every
cluster managed by the operator.
* `teams API roles`: automatically create users for every member of the team owning the database
cluster.
In the next sections, we will cover those use cases in more details.
@ -74,8 +73,8 @@ for an example of `zalando` role, defined with `superuser` and `createdb`
flags.
Manifest roles are defined as a dictionary, with a role name as a key and a
list of role options as a value. For a role without any options supply an empty
list.
list of role options as a value. For a role without any options it is best to supply the empty
list `[]`. It is also possible to leave this field empty as in our example manifests, but in certain cases such empty field may removed by Kubernetes [due to the `null` value it gets](https://kubernetes.io/docs/concepts/overview/object-management-kubectl/declarative-config/#how-apply-calculates-differences-and-merges-changes) (`foobar_user:` is equivalent to `foobar_user: null`).
The operator accepts the following options: `superuser`, `inherit`, `login`,
`nologin`, `createrole`, `createdb`, `replication`, `bypassrls`.
@ -99,10 +98,13 @@ An infrastructure role is a role that should be present on every PostgreSQL
cluster managed by the operator. An example of such a role is a monitoring
user. There are two ways to define them:
* Exclusively via the infrastructure roles secret (specified by the
`infrastructure_roles_secret_name` parameter).
* With the infrastructure roles secret only
* With both the the secret and the infrastructure role ConfigMap.
The role definition looks like this (values are base64 encoded):
### Infrastructure roles secret
The infrastructure roles secret is specified by the `infrastructure_roles_secret_name`
parameter. The role definition looks like this (values are base64 encoded):
```yaml
user1: ZGJ1c2Vy
@ -110,25 +112,29 @@ The role definition looks like this (values are base64 encoded):
inrole1: b3BlcmF0b3I=
```
A block above describes the infrastructure role 'dbuser' with the password
'secret' that is the member of the 'operator' role. For the following
The block above describes the infrastructure role 'dbuser' with password
'secret' that is a member of the 'operator' role. For the following
definitions one must increase the index, i.e. the next role will be defined as
'user2' and so on. Note that there is no way to specify role options (like
superuser or nologin) this way, and the resulting role will automatically be a
login role.
'user2' and so on. The resulting role will automatically be a login role.
* Via both the infrastructure roles secret and the infrastructure role
configmap (with the same name as the infrastructure roles secret).
Note that with definitions that solely use the infrastructure roles secret
there is no way to specify role options (like superuser or nologin) or role
memberships. This is where the ConfigMap comes into play.
The infrastructure roles secret should contain an entry with 'rolename:
rolepassword' for each role, and the role description should be specified in
the configmap. Below is the example:
### Secret plus ConfigMap
A [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/)
allows for defining more details regarding the infrastructure roles.
Therefore, one should use the new style that specifies infrastructure roles
using both the secret and a ConfigMap. The ConfigMap must have the same name as
the secret. The secret should contain an entry with 'rolename:rolepassword' for
each role.
```yaml
dbuser: c2VjcmV0
```
and the configmap definition for that user:
And the role description for that user should be specified in the ConfigMap.
```yaml
data:
@ -140,18 +146,13 @@ and the configmap definition for that user:
log_statement: all
```
Note that the definition above allows for more details than the one that relies
solely on the infrastructure role secret. In particular, one can allow
membership in multiple roles via the `inrole` array parameter, define role
flags via the `user_flags` list and supply per-role options through the
`db_parameters` dictionary. All those parameters are optional.
One can allow membership in multiple roles via the `inrole` array parameter,
define role flags via the `user_flags` list and supply per-role options through
the `db_parameters` dictionary. All those parameters are optional.
The definitions that solely use the infrastructure roles secret are more
limited and considered legacy ones; one should use the new style that specifies
infrastructure roles using both the secret and the configmap. You can mix both
in the infrastructure role secret, as long as your new-style definition can be
clearly distinguished from the old-style one (for instance, do not name
new-style roles`userN`).
Both definitions can be mixed in the infrastructure role secret, as long as
your new-style definition can be clearly distinguished from the old-style one
(for instance, do not name new-style roles `userN`).
Since an infrastructure role is created uniformly on all clusters managed by
the operator, it makes no sense to define it without the password. Such
@ -238,9 +239,8 @@ metadata:
uid: efd12e58-5786-11e8-b5a7-06148230260c
```
Note that timezone required for `timestamp` (offset relative to UTC, see RFC
3339 section 5.6)
Note that timezone is required for `timestamp`. Otherwise, offset is relative
to UTC, see [RFC 3339 section 5.6) 3339 section 5.6](https://www.ietf.org/rfc/rfc3339.txt).
## Sidecar Support

View File

@ -6,19 +6,20 @@ metadata:
spec:
teamId: "ACID"
volume:
size: 5Gi
size: 1Gi
numberOfInstances: 2
users: #Application/Robot users
zalando:
- superuser
- createdb
enableMasterLoadBalancer: true
enableReplicaLoadBalancer: true
enableReplicaLoadBalancer: true
allowedSourceRanges: # load balancers' source ranges for both master and replica services
- 127.0.0.1/32
databases:
foo: zalando
#Expert section
enableShmVolume: true
postgresql:
version: "10"
parameters:
@ -31,7 +32,7 @@ spec:
memory: 100Mi
limits:
cpu: 300m
memory: 3000Mi
memory: 300Mi
patroni:
initdb:
encoding: "UTF8"
@ -40,6 +41,13 @@ spec:
pg_hba:
- hostssl all all 0.0.0.0/0 md5
- host all all 0.0.0.0/0 md5
slots:
permanent_physical_1:
type: physical
permanent_logical_1:
type: logical
database: foo
plugin: pgoutput
ttl: 30
loop_wait: &loop_wait 10
retry_timeout: 10

View File

@ -3,21 +3,23 @@ kind: ConfigMap
metadata:
name: postgres-operator
data:
# if set to the "*", listen to all namespaces
# watched_namespace: development
watched_namespace: "*" # listen to all namespaces
cluster_labels: application:spilo
cluster_name_label: version
pod_role_label: spilo-role
debug_logging: "true"
workers: "4"
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-10:1.4-p8
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-11:1.5-p42
pod_service_account_name: "zalando-postgres-operator"
secret_name_template: '{username}.{cluster}.credentials'
super_username: postgres
enable_teams_api: "false"
# set_memory_request_to_limit: "true"
# postgres_superuser_teams: "postgres_superusers"
# enable_team_superuser: "false"
# team_admin_role: "admin"
# enable_admin_role_for_users: "true"
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
# team_api_role_configuration: "log_statement:all"
# infrastructure_roles_secret_name: postgresql-infrastructure-roles

View File

@ -2,6 +2,7 @@ apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: acid-minimal-cluster
namespace: test # assumes namespace exists beforehand
spec:
teamId: "ACID"
volume:
@ -14,7 +15,8 @@ spec:
- createdb
# role for application foo
foo_user:
foo_user: []
#databases: name->owner
databases:

View File

@ -14,6 +14,7 @@ rules:
- acid.zalan.do
resources:
- postgresqls
- operatorconfigurations
verbs:
- "*"
- apiGroups:

View File

@ -12,9 +12,20 @@ spec:
serviceAccountName: zalando-postgres-operator
containers:
- name: postgres-operator
image: registry.opensource.zalan.do/acid/postgres-operator:v1.0.0
image: registry.opensource.zalan.do/acid/smoke-tested-postgres-operator:v1.0.0-21-ge39915c
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 500m
memory: 250Mi
limits:
cpu: 2000m
memory: 500Mi
env:
# provided additional ENV vars can overwrite individual config map entries
- name: CONFIG_MAP_NAME
value: "postgres-operator"
# In order to use the CRD OperatorConfiguration instead, uncomment these lines and comment out the two lines above
# - name: POSTGRES_OPERATOR_CONFIGURATION_OBJECT
# value: postgresql-operator-default-configuration

View File

@ -4,7 +4,7 @@ metadata:
name: postgresql-operator-default-configuration
configuration:
etcd_host: ""
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-10:1.4-p8
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-11:1.5-p42
workers: 4
min_instances: -1
max_instances: -1
@ -68,6 +68,7 @@ configuration:
protected_role_names:
- admin
# teams_api_url: ""
# postgres_superuser_teams: "postgres_superusers"
logging_rest_api:
api_port: 8008
ring_log_lines: 100

View File

@ -1,10 +1,7 @@
package v1
// ClusterStatusUnknown etc : status of a Postgres cluster known to the operator
const (
serviceNameMaxLength = 63
clusterNameMaxLength = serviceNameMaxLength - len("-repl")
serviceNameRegexString = `^[a-z]([-a-z0-9]*[a-z0-9])?$`
ClusterStatusUnknown PostgresStatus = ""
ClusterStatusCreating PostgresStatus = "Creating"
ClusterStatusUpdating PostgresStatus = "Updating"
@ -14,3 +11,9 @@ const (
ClusterStatusRunning PostgresStatus = "Running"
ClusterStatusInvalid PostgresStatus = "Invalid"
)
const (
serviceNameMaxLength = 63
clusterNameMaxLength = serviceNameMaxLength - len("-repl")
serviceNameRegexString = `^[a-z]([-a-z0-9]*[a-z0-9])?$`
)

View File

@ -6,6 +6,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// CRDResource* define names necesssary for the k8s CRD API
const (
PostgresCRDResourceKind = "postgresql"
PostgresCRDResourcePlural = "postgresqls"
@ -39,6 +40,7 @@ func buildCRD(name, kind, plural, short string) *apiextv1beta1.CustomResourceDef
}
}
// PostgresCRD returns CustomResourceDefinition built from PostgresCRDResource
func PostgresCRD() *apiextv1beta1.CustomResourceDefinition {
return buildCRD(PostgresCRDResouceName,
PostgresCRDResourceKind,
@ -46,6 +48,7 @@ func PostgresCRD() *apiextv1beta1.CustomResourceDefinition {
PostgresCRDResourceShort)
}
// ConfigurationCRD returns CustomResourceDefinition built from OperatorConfigCRDResource
func ConfigurationCRD() *apiextv1beta1.CustomResourceDefinition {
return buildCRD(OperatorConfigCRDResourceName,
OperatorConfigCRDResouceKind,

View File

@ -1,6 +1,6 @@
// Package v1 is the v1 version of the API.
// +k8s:deepcopy-gen=package,register
// Package v1 is the v1 version of the API.
// +groupName=acid.zalan.do
package v1

View File

@ -104,6 +104,7 @@ func (p *Postgresql) UnmarshalJSON(data []byte) error {
return nil
}
// UnmarshalJSON convert to Duration from byte slice of json
func (d *Duration) UnmarshalJSON(b []byte) error {
var (
v interface{}

View File

@ -13,6 +13,8 @@ import (
// +genclient:onlyVerbs=get
// +genclient:noStatus
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// OperatorConfiguration defines the specification for the OperatorConfiguration.
type OperatorConfiguration struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
@ -21,6 +23,8 @@ type OperatorConfiguration struct {
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// OperatorConfigurationList is used in the k8s API calls
type OperatorConfigurationList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
@ -28,11 +32,13 @@ type OperatorConfigurationList struct {
Items []OperatorConfiguration `json:"items"`
}
// PostgresUsersConfiguration defines the system users of Postgres.
type PostgresUsersConfiguration struct {
SuperUsername string `json:"super_username,omitempty"`
ReplicationUsername string `json:"replication_username,omitempty"`
}
// KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself
type KubernetesMetaConfiguration struct {
PodServiceAccountName string `json:"pod_service_account_name,omitempty"`
// TODO: change it to the proper json
@ -55,6 +61,7 @@ type KubernetesMetaConfiguration struct {
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
}
// PostgresPodResourcesDefaults defines the spec of default resources
type PostgresPodResourcesDefaults struct {
DefaultCPURequest string `json:"default_cpu_request,omitempty"`
DefaultMemoryRequest string `json:"default_memory_request,omitempty"`
@ -62,6 +69,7 @@ type PostgresPodResourcesDefaults struct {
DefaultMemoryLimit string `json:"default_memory_limit,omitempty"`
}
// OperatorTimeouts defines the timeout of ResourceCheck, PodWait, ReadyWait
type OperatorTimeouts struct {
ResourceCheckInterval Duration `json:"resource_check_interval,omitempty"`
ResourceCheckTimeout Duration `json:"resource_check_timeout,omitempty"`
@ -71,6 +79,7 @@ type OperatorTimeouts struct {
ReadyWaitTimeout Duration `json:"ready_wait_timeout,omitempty"`
}
// LoadBalancerConfiguration defines the LB configuration
type LoadBalancerConfiguration struct {
DbHostedZone string `json:"db_hosted_zone,omitempty"`
EnableMasterLoadBalancer bool `json:"enable_master_load_balancer,omitempty"`
@ -79,6 +88,8 @@ type LoadBalancerConfiguration struct {
ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"`
}
// AWSGCPConfiguration defines the configuration for AWS
// TODO complete Google Cloud Platform (GCP) configuration
type AWSGCPConfiguration struct {
WALES3Bucket string `json:"wal_s3_bucket,omitempty"`
AWSRegion string `json:"aws_region,omitempty"`
@ -86,11 +97,13 @@ type AWSGCPConfiguration struct {
KubeIAMRole string `json:"kube_iam_role,omitempty"`
}
// OperatorDebugConfiguration defines options for the debug mode
type OperatorDebugConfiguration struct {
DebugLogging bool `json:"debug_logging,omitempty"`
EnableDBAccess bool `json:"enable_database_access,omitempty"`
}
// TeamsAPIConfiguration defines the configration of TeamsAPI
type TeamsAPIConfiguration struct {
EnableTeamsAPI bool `json:"enable_teams_api,omitempty"`
TeamsAPIUrl string `json:"teams_api_url,omitempty"`
@ -103,12 +116,14 @@ type TeamsAPIConfiguration struct {
PostgresSuperuserTeams []string `json:"postgres_superuser_teams,omitempty"`
}
// LoggingRESTAPIConfiguration defines Logging API conf
type LoggingRESTAPIConfiguration struct {
APIPort int `json:"api_port,omitempty"`
RingLogLines int `json:"ring_log_lines,omitempty"`
ClusterHistoryEntries int `json:"cluster_history_entries,omitempty"`
}
// ScalyrConfiguration defines the configuration for ScalyrAPI
type ScalyrConfiguration struct {
ScalyrAPIKey string `json:"scalyr_api_key,omitempty"`
ScalyrImage string `json:"scalyr_image,omitempty"`
@ -119,6 +134,7 @@ type ScalyrConfiguration struct {
ScalyrMemoryLimit string `json:"scalyr_memory_limit,omitempty"`
}
// OperatorConfigurationData defines the operation config
type OperatorConfigurationData struct {
EtcdHost string `json:"etcd_host,omitempty"`
DockerImage string `json:"docker_image,omitempty"`
@ -131,6 +147,7 @@ type OperatorConfigurationData struct {
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`
PostgresPodResources PostgresPodResourcesDefaults `json:"postgres_pod_resources"`
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
Timeouts OperatorTimeouts `json:"timeouts"`
LoadBalancer LoadBalancerConfiguration `json:"load_balancer"`
AWSGCP AWSGCPConfiguration `json:"aws_or_gcp"`
@ -140,6 +157,7 @@ type OperatorConfigurationData struct {
Scalyr ScalyrConfiguration `json:"scalyr"`
}
// OperatorConfigurationUsers defines configration for super user
type OperatorConfigurationUsers struct {
SuperUserName string `json:"superuser_name,omitempty"`
Replication string `json:"replication_user_name,omitempty"`
@ -147,4 +165,5 @@ type OperatorConfigurationUsers struct {
TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"`
}
//Duration shortens this frequently used name
type Duration time.Duration

View File

@ -9,7 +9,8 @@ import (
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
//Postgresql defines PostgreSQL Custom Resource Definition Object.
// Postgresql defines PostgreSQL Custom Resource Definition Object.
type Postgresql struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
@ -51,9 +52,11 @@ type PostgresSpec struct {
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
Sidecars []Sidecar `json:"sidecars,omitempty"`
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
ShmVolume *bool `json:"enableShmVolume,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PostgresqlList defines a list of PostgreSQL clusters.
type PostgresqlList struct {
metav1.TypeMeta `json:",inline"`
@ -90,18 +93,19 @@ type ResourceDescription struct {
// Resources describes requests and limits for the cluster resouces.
type Resources struct {
ResourceRequest ResourceDescription `json:"requests,omitempty"`
ResourceLimits ResourceDescription `json:"limits,omitempty"`
ResourceRequests ResourceDescription `json:"requests,omitempty"`
ResourceLimits ResourceDescription `json:"limits,omitempty"`
}
// Patroni contains Patroni-specific configuration
type Patroni struct {
InitDB map[string]string `json:"initdb"`
PgHba []string `json:"pg_hba"`
TTL uint32 `json:"ttl"`
LoopWait uint32 `json:"loop_wait"`
RetryTimeout uint32 `json:"retry_timeout"`
MaximumLagOnFailover float32 `json:"maximum_lag_on_failover"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213
InitDB map[string]string `json:"initdb"`
PgHba []string `json:"pg_hba"`
TTL uint32 `json:"ttl"`
LoopWait uint32 `json:"loop_wait"`
RetryTimeout uint32 `json:"retry_timeout"`
MaximumLagOnFailover float32 `json:"maximum_lag_on_failover"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213
Slots map[string]map[string]string `json:"slots"`
}
// CloneDescription describes which cluster the new should clone and up to which point in time

View File

@ -8,15 +8,20 @@ import (
"github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do"
)
// APIVersion of the `postgresql` and `operator` CRDs
const (
APIVersion = "v1"
)
var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
// An instance of runtime.SchemeBuilder, global for this package
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
//AddToScheme is localSchemeBuilder.AddToScheme
AddToScheme = localSchemeBuilder.AddToScheme
//SchemeGroupVersion has GroupName and APIVersion
SchemeGroupVersion = schema.GroupVersion{Group: acidzalando.GroupName, Version: APIVersion}
)

View File

@ -14,6 +14,7 @@ var (
serviceNameRegex = regexp.MustCompile(serviceNameRegexString)
)
// Clone convenience wrapper around DeepCopy
func (p *Postgresql) Clone() *Postgresql {
if p == nil {
return nil
@ -83,6 +84,7 @@ func validateCloneClusterDescription(clone *CloneDescription) error {
return nil
}
// Success of the current Status
func (status PostgresStatus) Success() bool {
return status != ClusterStatusAddFailed &&
status != ClusterStatusUpdateFailed &&

View File

@ -132,7 +132,7 @@ var unmarshalCluster = []struct {
// This error message can vary between Go versions, so compute it for the current version.
Error: json.Unmarshal([]byte(`{"teamId": 0}`), &PostgresSpec{}).Error(),
},
[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), nil},
[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), nil},
{[]byte(`{
"kind": "Postgresql",
"apiVersion": "acid.zalan.do/v1",
@ -189,7 +189,14 @@ var unmarshalCluster = []struct {
"ttl": 30,
"loop_wait": 10,
"retry_timeout": 10,
"maximum_lag_on_failover": 33554432
"maximum_lag_on_failover": 33554432,
"slots" : {
"permanent_logical_1" : {
"type" : "logical",
"database" : "foo",
"plugin" : "pgoutput"
}
}
},
"maintenanceWindows": [
"Mon:01:00-06:00",
@ -230,10 +237,11 @@ var unmarshalCluster = []struct {
LoopWait: 10,
RetryTimeout: 10,
MaximumLagOnFailover: 33554432,
Slots: map[string]map[string]string{"permanent_logical_1": {"type": "logical", "database": "foo", "plugin": "pgoutput"}},
},
Resources: Resources{
ResourceRequest: ResourceDescription{CPU: "10m", Memory: "50Mi"},
ResourceLimits: ResourceDescription{CPU: "300m", Memory: "3000Mi"},
ResourceRequests: ResourceDescription{CPU: "10m", Memory: "50Mi"},
ResourceLimits: ResourceDescription{CPU: "300m", Memory: "3000Mi"},
},
TeamID: "ACID",
@ -265,7 +273,7 @@ var unmarshalCluster = []struct {
},
Error: "",
},
[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"volume":{"size":"5Gi","storageClass":"SSD"},"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}}}`), nil},
[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"volume":{"size":"5Gi","storageClass":"SSD"},"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}}}`), nil},
{
[]byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "teapot-testcluster1"}, "spec": {"teamId": "acid"}}`),
Postgresql{
@ -280,7 +288,7 @@ var unmarshalCluster = []struct {
Status: ClusterStatusInvalid,
Error: errors.New("name must match {TEAM}-{NAME} format").Error(),
},
[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), nil},
[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), nil},
{
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "clone": {"cluster": "team-batman"}}}`),
out: Postgresql{
@ -300,12 +308,12 @@ var unmarshalCluster = []struct {
},
Error: "",
},
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}}}`), err: nil},
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}}}`), err: nil},
{[]byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1"`),
Postgresql{},
[]byte{},
errors.New("unexpected end of JSON input")},
{[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`),
{[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`),
Postgresql{},
[]byte{},
errors.New("invalid character 'q' looking for beginning of value")}}
@ -491,7 +499,7 @@ func TestMarshal(t *testing.T) {
t.Errorf("Marshal error: %v", err)
}
if !bytes.Equal(m, tt.marshal) {
t.Errorf("Marshal Postgresql expected: %q, got: %q", string(tt.marshal), string(m))
t.Errorf("Marshal Postgresql \nexpected: %q, \ngot: %q", string(tt.marshal), string(m))
}
}
}
@ -499,11 +507,11 @@ func TestMarshal(t *testing.T) {
func TestPostgresMeta(t *testing.T) {
for _, tt := range unmarshalCluster {
if a := tt.out.GetObjectKind(); a != &tt.out.TypeMeta {
t.Errorf("GetObjectKindMeta expected: %v, got: %v", tt.out.TypeMeta, a)
t.Errorf("GetObjectKindMeta \nexpected: %v, \ngot: %v", tt.out.TypeMeta, a)
}
if a := tt.out.GetObjectMeta(); reflect.DeepEqual(a, tt.out.ObjectMeta) {
t.Errorf("GetObjectMeta expected: %v, got: %v", tt.out.ObjectMeta, a)
t.Errorf("GetObjectMeta \nexpected: %v, \ngot: %v", tt.out.ObjectMeta, a)
}
}
}

View File

@ -320,6 +320,23 @@ func (in *Patroni) DeepCopyInto(out *Patroni) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Slots != nil {
in, out := &in.Slots, &out.Slots
*out = make(map[string]map[string]string, len(*in))
for key, val := range *in {
var outVal map[string]string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
(*out)[key] = outVal
}
}
return
}
@ -556,7 +573,7 @@ func (in *ResourceDescription) DeepCopy() *ResourceDescription {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Resources) DeepCopyInto(out *Resources) {
*out = *in
out.ResourceRequest = in.ResourceRequest
out.ResourceRequests = in.ResourceRequests
out.ResourceLimits = in.ResourceLimits
return
}

View File

@ -48,11 +48,22 @@ type Server struct {
controller controllerInformer
}
const (
teamRe = `(?P<team>[a-zA-Z][a-zA-Z0-9\-_]*)`
namespaceRe = `(?P<namespace>[a-z0-9]([-a-z0-9\-_]*[a-z0-9])?)`
clusterRe = `(?P<cluster>[a-zA-Z][a-zA-Z0-9\-_]*)`
)
var (
clusterStatusURL = regexp.MustCompile(`^/clusters/(?P<team>[a-zA-Z][a-zA-Z0-9]*)/(?P<namespace>[a-z0-9]([-a-z0-9]*[a-z0-9])?)/(?P<cluster>[a-zA-Z][a-zA-Z0-9-]*)/?$`)
clusterLogsURL = regexp.MustCompile(`^/clusters/(?P<team>[a-zA-Z][a-zA-Z0-9]*)/(?P<namespace>[a-z0-9]([-a-z0-9]*[a-z0-9])?)/(?P<cluster>[a-zA-Z][a-zA-Z0-9-]*)/logs/?$`)
clusterHistoryURL = regexp.MustCompile(`^/clusters/(?P<team>[a-zA-Z][a-zA-Z0-9]*)/(?P<namespace>[a-z0-9]([-a-z0-9]*[a-z0-9])?)/(?P<cluster>[a-zA-Z][a-zA-Z0-9-]*)/history/?$`)
teamURL = regexp.MustCompile(`^/clusters/(?P<team>[a-zA-Z][a-zA-Z0-9]*)/?$`)
clusterStatusRe = fmt.Sprintf(`^/clusters/%s/%s/%s/?$`, teamRe, namespaceRe, clusterRe)
clusterLogsRe = fmt.Sprintf(`^/clusters/%s/%s/%s/logs/?$`, teamRe, namespaceRe, clusterRe)
clusterHistoryRe = fmt.Sprintf(`^/clusters/%s/%s/%s/history/?$`, teamRe, namespaceRe, clusterRe)
teamURLRe = fmt.Sprintf(`^/clusters/%s/?$`, teamRe)
clusterStatusURL = regexp.MustCompile(clusterStatusRe)
clusterLogsURL = regexp.MustCompile(clusterLogsRe)
clusterHistoryURL = regexp.MustCompile(clusterHistoryRe)
teamURL = regexp.MustCompile(teamURLRe)
workerLogsURL = regexp.MustCompile(`^/workers/(?P<id>\d+)/logs/?$`)
workerEventsQueueURL = regexp.MustCompile(`^/workers/(?P<id>\d+)/queue/?$`)
workerStatusURL = regexp.MustCompile(`^/workers/(?P<id>\d+)/status/?$`)

View File

@ -0,0 +1,30 @@
package apiserver
import (
"testing"
)
const (
clusterStatusTest = "/clusters/test-id/test_namespace/testcluster/"
clusterStatusNumericTest = "/clusters/test-id-1/test_namespace/testcluster/"
clusterLogsTest = "/clusters/test-id/test_namespace/testcluster/logs/"
teamTest = "/clusters/test-id/"
)
func TestUrlRegexps(t *testing.T) {
if clusterStatusURL.FindStringSubmatch(clusterStatusTest) == nil {
t.Errorf("clusterStatusURL can't match %s", clusterStatusTest)
}
if clusterStatusURL.FindStringSubmatch(clusterStatusNumericTest) == nil {
t.Errorf("clusterStatusURL can't match %s", clusterStatusNumericTest)
}
if clusterLogsURL.FindStringSubmatch(clusterLogsTest) == nil {
t.Errorf("clusterLogsURL can't match %s", clusterLogsTest)
}
if teamURL.FindStringSubmatch(teamTest) == nil {
t.Errorf("teamURL can't match %s", teamTest)
}
}

View File

@ -12,7 +12,7 @@ import (
"github.com/sirupsen/logrus"
"k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
policybeta1 "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@ -367,7 +367,9 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *v1beta1.StatefulSet) *comp
needsRollUpdate = true
reasons = append(reasons, "new statefulset's container specification doesn't match the current one")
} else {
needsRollUpdate, reasons = c.compareContainers(c.Statefulset, statefulSet)
var containerReasons []string
needsRollUpdate, containerReasons = c.compareContainers(c.Statefulset, statefulSet)
reasons = append(reasons, containerReasons...)
}
if len(c.Statefulset.Spec.Template.Spec.Containers) == 0 {
c.logger.Warningf("statefulset %q has no container", util.NameFromMeta(c.Statefulset.ObjectMeta))
@ -375,7 +377,6 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *v1beta1.StatefulSet) *comp
}
// In the comparisons below, the needsReplace and needsRollUpdate flags are never reset, since checks fall through
// and the combined effect of all the changes should be applied.
// TODO: log all reasons for changing the statefulset, not just the last one.
// TODO: make sure this is in sync with generatePodTemplate, ideally by using the same list of fields to generate
// the template and the diff
if c.Statefulset.Spec.Template.Spec.ServiceAccountName != statefulSet.Spec.Template.Spec.ServiceAccountName {
@ -386,7 +387,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *v1beta1.StatefulSet) *comp
if *c.Statefulset.Spec.Template.Spec.TerminationGracePeriodSeconds != *statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds {
needsReplace = true
needsRollUpdate = true
reasons = append(reasons, "new statefulset's terminationGracePeriodSeconds doesn't match the current one")
reasons = append(reasons, "new statefulset's terminationGracePeriodSeconds doesn't match the current one")
}
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Affinity, statefulSet.Spec.Template.Spec.Affinity) {
needsReplace = true
@ -462,23 +463,23 @@ func newCheck(msg string, cond containerCondition) containerCheck {
// compareContainers: compare containers from two stateful sets
// and return:
// * whether or not roll update is needed
// * whether or not a rolling update is needed
// * a list of reasons in a human readable format
func (c *Cluster) compareContainers(setA, setB *v1beta1.StatefulSet) (bool, []string) {
reasons := make([]string, 0)
needsRollUpdate := false
checks := []containerCheck{
newCheck("new statefulset's container %d name doesn't match the current one",
newCheck("new statefulset's container %s (index %d) name doesn't match the current one",
func(a, b v1.Container) bool { return a.Name != b.Name }),
newCheck("new statefulset's container %d image doesn't match the current one",
newCheck("new statefulset's container %s (index %d) image doesn't match the current one",
func(a, b v1.Container) bool { return a.Image != b.Image }),
newCheck("new statefulset's container %d ports don't match the current one",
newCheck("new statefulset's container %s (index %d) ports don't match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Ports, b.Ports) }),
newCheck("new statefulset's container %d resources don't match the current ones",
newCheck("new statefulset's container %s (index %d) resources don't match the current ones",
func(a, b v1.Container) bool { return !compareResources(&a.Resources, &b.Resources) }),
newCheck("new statefulset's container %d environment doesn't match the current one",
newCheck("new statefulset's container %s (index %d) environment doesn't match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Env, b.Env) }),
newCheck("new statefulset's container %d environment sources don't match the current one",
newCheck("new statefulset's container %s (index %d) environment sources don't match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }),
}
@ -487,7 +488,7 @@ func (c *Cluster) compareContainers(setA, setB *v1beta1.StatefulSet) (bool, []st
for _, check := range checks {
if check.condition(containerA, containerB) {
needsRollUpdate = true
reasons = append(reasons, fmt.Sprintf(check.reason, index))
reasons = append(reasons, fmt.Sprintf(check.reason, containerA.Name, index))
}
}
}
@ -754,11 +755,16 @@ func (c *Cluster) initRobotUsers() error {
if err != nil {
return fmt.Errorf("invalid flags for user %q: %v", username, err)
}
adminRole := ""
if c.OpConfig.EnableAdminRoleForUsers {
adminRole = c.OpConfig.TeamAdminRole
}
newRole := spec.PgUser{
Origin: spec.RoleOriginManifest,
Name: username,
Password: util.RandomPassword(constants.PasswordLength),
Flags: flags,
Origin: spec.RoleOriginManifest,
Name: username,
Password: util.RandomPassword(constants.PasswordLength),
Flags: flags,
AdminRole: adminRole,
}
if currentRole, present := c.pgUsers[username]; present {
c.pgUsers[username] = c.resolveNameConflict(&currentRole, &newRole)
@ -917,7 +923,7 @@ func (c *Cluster) GetStatus() *ClusterStatus {
func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) error {
var err error
c.logger.Debugf("failing over from %q to %q", curMaster.Name, candidate)
c.logger.Debugf("switching over from %q to %q", curMaster.Name, candidate)
var wg sync.WaitGroup
@ -943,12 +949,12 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e
}()
if err = c.patroni.Switchover(curMaster, candidate.Name); err == nil {
c.logger.Debugf("successfully failed over from %q to %q", curMaster.Name, candidate)
c.logger.Debugf("successfully switched over from %q to %q", curMaster.Name, candidate)
if err = <-podLabelErr; err != nil {
err = fmt.Errorf("could not get master pod label: %v", err)
}
} else {
err = fmt.Errorf("could not failover: %v", err)
err = fmt.Errorf("could not switch over: %v", err)
}
// signal the role label waiting goroutine to close the shop and go home

View File

@ -18,6 +18,7 @@ import (
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando-incubator/postgres-operator/pkg/spec"
"github.com/zalando-incubator/postgres-operator/pkg/util"
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
"k8s.io/apimachinery/pkg/labels"
)
@ -36,11 +37,12 @@ type pgUser struct {
}
type patroniDCS struct {
TTL uint32 `json:"ttl,omitempty"`
LoopWait uint32 `json:"loop_wait,omitempty"`
RetryTimeout uint32 `json:"retry_timeout,omitempty"`
MaximumLagOnFailover float32 `json:"maximum_lag_on_failover,omitempty"`
PGBootstrapConfiguration map[string]interface{} `json:"postgresql,omitempty"`
TTL uint32 `json:"ttl,omitempty"`
LoopWait uint32 `json:"loop_wait,omitempty"`
RetryTimeout uint32 `json:"retry_timeout,omitempty"`
MaximumLagOnFailover float32 `json:"maximum_lag_on_failover,omitempty"`
PGBootstrapConfiguration map[string]interface{} `json:"postgresql,omitempty"`
Slots map[string]map[string]string `json:"slots,omitempty"`
}
type pgBootstrap struct {
@ -91,18 +93,18 @@ func (c *Cluster) makeDefaultResources() acidv1.Resources {
defaultRequests := acidv1.ResourceDescription{CPU: config.DefaultCPURequest, Memory: config.DefaultMemoryRequest}
defaultLimits := acidv1.ResourceDescription{CPU: config.DefaultCPULimit, Memory: config.DefaultMemoryLimit}
return acidv1.Resources{ResourceRequest: defaultRequests, ResourceLimits: defaultLimits}
return acidv1.Resources{ResourceRequests: defaultRequests, ResourceLimits: defaultLimits}
}
func generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) {
var err error
specRequests := resources.ResourceRequest
specRequests := resources.ResourceRequests
specLimits := resources.ResourceLimits
result := v1.ResourceRequirements{}
result.Requests, err = fillResourceList(specRequests, defaultResources.ResourceRequest)
result.Requests, err = fillResourceList(specRequests, defaultResources.ResourceRequests)
if err != nil {
return nil, fmt.Errorf("could not fill resource requests: %v", err)
}
@ -215,6 +217,9 @@ PatroniInitDBParams:
if patroni.TTL != 0 {
config.Bootstrap.DCS.TTL = patroni.TTL
}
if patroni.Slots != nil {
config.Bootstrap.DCS.Slots = patroni.Slots
}
config.PgLocalConfiguration = make(map[string]interface{})
config.PgLocalConfiguration[patroniPGBinariesParameterName] = fmt.Sprintf(pgBinariesLocationTemplate, pg.PgVersion)
@ -334,7 +339,6 @@ func generateSpiloContainer(
envVars []v1.EnvVar,
volumeMounts []v1.VolumeMount,
) *v1.Container {
privilegedMode := true
return &v1.Container{
Name: name,
@ -373,8 +377,8 @@ func generateSidecarContainers(sidecars []acidv1.Sidecar,
resources, err := generateResourceRequirements(
makeResources(
sidecar.Resources.ResourceRequest.CPU,
sidecar.Resources.ResourceRequest.Memory,
sidecar.Resources.ResourceRequests.CPU,
sidecar.Resources.ResourceRequests.Memory,
sidecar.Resources.ResourceLimits.CPU,
sidecar.Resources.ResourceLimits.Memory,
),
@ -392,6 +396,16 @@ func generateSidecarContainers(sidecars []acidv1.Sidecar,
return nil, nil
}
// Check whether or not we're requested to mount an shm volume,
// taking into account that PostgreSQL manifest has precedence.
func mountShmVolumeNeeded(opConfig config.Config, pgSpec *acidv1.PostgresSpec) bool {
if pgSpec.ShmVolume != nil {
return *pgSpec.ShmVolume
}
return opConfig.ShmVolume
}
func generatePodTemplate(
namespace string,
labels labels.Set,
@ -403,6 +417,7 @@ func generatePodTemplate(
podServiceAccountName string,
kubeIAMRole string,
priorityClassName string,
shmVolume bool,
) (*v1.PodTemplateSpec, error) {
terminateGracePeriodSeconds := terminateGracePeriod
@ -416,6 +431,10 @@ func generatePodTemplate(
Tolerations: *tolerationsSpec,
}
if shmVolume {
addShmVolume(&podSpec)
}
if nodeAffinity != nil {
podSpec.Affinity = nodeAffinity
}
@ -471,6 +490,18 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
Name: "PGUSER_SUPERUSER",
Value: c.OpConfig.SuperUsername,
},
{
Name: "KUBERNETES_SCOPE_LABEL",
Value: c.OpConfig.ClusterNameLabel,
},
{
Name: "KUBERNETES_ROLE_LABEL",
Value: c.OpConfig.PodRoleLabel,
},
{
Name: "KUBERNETES_LABELS",
Value: labels.Set(c.OpConfig.ClusterLabels).String(),
},
{
Name: "PGPASSWORD_SUPERUSER",
ValueFrom: &v1.EnvVarSource{
@ -621,7 +652,7 @@ func getBucketScopeSuffix(uid string) string {
func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) acidv1.Resources {
return acidv1.Resources{
ResourceRequest: acidv1.ResourceDescription{
ResourceRequests: acidv1.ResourceDescription{
CPU: cpuRequest,
Memory: memoryRequest,
},
@ -640,6 +671,61 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
podTemplate *v1.PodTemplateSpec
volumeClaimTemplate *v1.PersistentVolumeClaim
)
// Improve me. Please.
if c.OpConfig.SetMemoryRequestToLimit {
// controller adjusts the default memory request at operator startup
request := spec.Resources.ResourceRequests.Memory
if request == "" {
request = c.OpConfig.DefaultMemoryRequest
}
limit := spec.Resources.ResourceLimits.Memory
if limit == "" {
limit = c.OpConfig.DefaultMemoryLimit
}
isSmaller, err := util.RequestIsSmallerThanLimit(request, limit)
if err != nil {
return nil, err
}
if isSmaller {
c.logger.Warningf("The memory request of %v for the Postgres container is increased to match the memory limit of %v.", request, limit)
spec.Resources.ResourceRequests.Memory = limit
}
// controller adjusts the Scalyr sidecar request at operator startup
// as this sidecar is managed separately
// adjust sidecar containers defined for that particular cluster
for _, sidecar := range spec.Sidecars {
// TODO #413
sidecarRequest := sidecar.Resources.ResourceRequests.Memory
if request == "" {
request = c.OpConfig.DefaultMemoryRequest
}
sidecarLimit := sidecar.Resources.ResourceLimits.Memory
if limit == "" {
limit = c.OpConfig.DefaultMemoryLimit
}
isSmaller, err := util.RequestIsSmallerThanLimit(sidecarRequest, sidecarLimit)
if err != nil {
return nil, err
}
if isSmaller {
c.logger.Warningf("The memory request of %v for the %v sidecar container is increased to match the memory limit of %v.", sidecar.Resources.ResourceRequests.Memory, sidecar.Name, sidecar.Resources.ResourceLimits.Memory)
sidecar.Resources.ResourceRequests.Memory = sidecar.Resources.ResourceLimits.Memory
}
}
}
defaultResources := c.makeDefaultResources()
resourceRequirements, err := generateResourceRequirements(spec.Resources, defaultResources)
@ -666,8 +752,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
// generate environment variables for the spilo container
spiloEnvVars := deduplicateEnvVars(
c.generateSpiloPodEnvVars(c.Postgresql.GetUID(), spiloConfiguration, &spec.Clone, customPodEnvVarsList),
c.containerName(), c.logger)
c.generateSpiloPodEnvVars(c.Postgresql.GetUID(), spiloConfiguration, &spec.Clone,
customPodEnvVarsList), c.containerName(), c.logger)
// pickup the docker image for the spilo container
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage)
@ -675,9 +761,15 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
volumeMounts := generateVolumeMounts()
// generate the spilo container
spiloContainer := generateSpiloContainer(c.containerName(), &effectiveDockerImage, resourceRequirements, spiloEnvVars, volumeMounts)
c.logger.Debugf("Generating Spilo container, environment variables: %v", spiloEnvVars)
spiloContainer := generateSpiloContainer(c.containerName(),
&effectiveDockerImage,
resourceRequirements,
spiloEnvVars,
volumeMounts,
)
// resolve conflicts between operator-global and per-cluster sidecards
// resolve conflicts between operator-global and per-cluster sidecars
sideCars := c.mergeSidecars(spec.Sidecars)
resourceRequirementsScalyrSidecar := makeResources(
@ -706,7 +798,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
// generate pod template for the statefulset, based on the spilo container and sidecards
// generate pod template for the statefulset, based on the spilo container and sidecars
if podTemplate, err = generatePodTemplate(
c.Namespace,
c.labelsSet(true),
@ -717,7 +809,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
c.OpConfig.PodServiceAccountName,
c.OpConfig.KubeIAMRole,
effectivePodPriorityClassName); err != nil {
effectivePodPriorityClassName,
mountShmVolumeNeeded(c.OpConfig, spec)); err != nil {
return nil, fmt.Errorf("could not generate pod template: %v", err)
}
@ -824,6 +917,32 @@ func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
return newcur
}
// To avoid issues with limited /dev/shm inside docker environment, when
// PostgreSQL can't allocate enough of dsa segments from it, we can
// mount an extra memory volume
//
// see https://docs.okd.io/latest/dev_guide/shared_memory.html
func addShmVolume(podSpec *v1.PodSpec) {
volumes := append(podSpec.Volumes, v1.Volume{
Name: constants.ShmVolumeName,
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{
Medium: "Memory",
},
},
})
pgIdx := constants.PostgresContainerIdx
mounts := append(podSpec.Containers[pgIdx].VolumeMounts,
v1.VolumeMount{
Name: constants.ShmVolumeName,
MountPath: constants.ShmVolumePath,
})
podSpec.Containers[0].VolumeMounts = mounts
podSpec.Volumes = volumes
}
func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string) (*v1.PersistentVolumeClaim, error) {
var storageClassName *string
@ -958,16 +1077,17 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
if c.shouldCreateLoadBalancerForService(role, spec) {
// safe default value: lock load balancer to only local address unless overridden explicitly.
sourceRanges := []string{localHost}
allowedSourceRanges := spec.AllowedSourceRanges
if len(allowedSourceRanges) >= 0 {
sourceRanges = allowedSourceRanges
// spec.AllowedSourceRanges evaluates to the empty slice of zero length
// when omitted or set to 'null'/empty sequence in the PG manifest
if len(spec.AllowedSourceRanges) > 0 {
serviceSpec.LoadBalancerSourceRanges = spec.AllowedSourceRanges
} else {
// safe default value: lock a load balancer only to the local address unless overridden explicitly
serviceSpec.LoadBalancerSourceRanges = []string{localHost}
}
c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges)
serviceSpec.Type = v1.ServiceTypeLoadBalancer
serviceSpec.LoadBalancerSourceRanges = sourceRanges
annotations = map[string]string{
constants.ZalandoDNSNameAnnotation: dnsName,

View File

@ -1,8 +1,11 @@
package cluster
import (
"k8s.io/api/core/v1"
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
"github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil"
"testing"
)
@ -75,3 +78,54 @@ func TestCreateLoadBalancerLogic(t *testing.T) {
}
}
}
func TestShmVolume(t *testing.T) {
testName := "TestShmVolume"
tests := []struct {
subTest string
podSpec *v1.PodSpec
shmPos int
}{
{
subTest: "empty PodSpec",
podSpec: &v1.PodSpec{
Volumes: []v1.Volume{},
Containers: []v1.Container{
v1.Container{
VolumeMounts: []v1.VolumeMount{},
},
},
},
shmPos: 0,
},
{
subTest: "non empty PodSpec",
podSpec: &v1.PodSpec{
Volumes: []v1.Volume{v1.Volume{}},
Containers: []v1.Container{
v1.Container{
VolumeMounts: []v1.VolumeMount{
v1.VolumeMount{},
},
},
},
},
shmPos: 1,
},
}
for _, tt := range tests {
addShmVolume(tt.podSpec)
volumeName := tt.podSpec.Volumes[tt.shmPos].Name
volumeMountName := tt.podSpec.Containers[0].VolumeMounts[tt.shmPos].Name
if volumeName != constants.ShmVolumeName {
t.Errorf("%s %s: Expected volume %s was not created, have %s instead",
testName, tt.subTest, constants.ShmVolumeName, volumeName)
}
if volumeMountName != constants.ShmVolumeName {
t.Errorf("%s %s: Expected mount %s was not created, have %s instead",
testName, tt.subTest, constants.ShmVolumeName, volumeMountName)
}
}
}

View File

@ -4,7 +4,7 @@ import (
"fmt"
"math/rand"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/zalando-incubator/postgres-operator/pkg/spec"
@ -77,11 +77,7 @@ func (c *Cluster) deletePod(podName spec.NamespacedName) error {
return err
}
if err := c.waitForPodDeletion(ch); err != nil {
return err
}
return nil
return c.waitForPodDeletion(ch)
}
func (c *Cluster) unregisterPodSubscriber(podName spec.NamespacedName) {
@ -122,7 +118,7 @@ func (c *Cluster) movePodFromEndOfLifeNode(pod *v1.Pod) (*v1.Pod, error) {
if eol, err = c.podIsEndOfLife(pod); err != nil {
return nil, fmt.Errorf("could not get node %q: %v", pod.Spec.NodeName, err)
} else if !eol {
c.logger.Infof("pod %q is already on a live node", podName)
c.logger.Infof("check failed: pod %q is already on a live node", podName)
return pod, nil
}
@ -162,7 +158,7 @@ func (c *Cluster) masterCandidate(oldNodeName string) (*v1.Pod, error) {
}
if len(replicas) == 0 {
c.logger.Warningf("no available master candidates, migration will cause longer downtime of the master instance")
c.logger.Warningf("no available master candidates, migration will cause longer downtime of Postgres cluster")
return nil, nil
}
@ -193,18 +189,18 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
return fmt.Errorf("could not get pod: %v", err)
}
c.logger.Infof("migrating master pod %q", podName)
c.logger.Infof("starting process to migrate master pod %q", podName)
if eol, err = c.podIsEndOfLife(oldMaster); err != nil {
return fmt.Errorf("could not get node %q: %v", oldMaster.Spec.NodeName, err)
}
if !eol {
c.logger.Debugf("pod is already on a live node")
c.logger.Debugf("no action needed: master pod is already on a live node")
return nil
}
if role := PostgresRole(oldMaster.Labels[c.OpConfig.PodRoleLabel]); role != Master {
c.logger.Warningf("pod %q is not a master", podName)
c.logger.Warningf("no action needed: pod %q is not the master (anymore)", podName)
return nil
}
// we must have a statefulset in the cluster for the migration to work
@ -219,10 +215,10 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
// We may not have a cached statefulset if the initial cluster sync has aborted, revert to the spec in that case.
if *c.Statefulset.Spec.Replicas > 1 {
if masterCandidatePod, err = c.masterCandidate(oldMaster.Spec.NodeName); err != nil {
return fmt.Errorf("could not get new master candidate: %v", err)
return fmt.Errorf("could not find suitable replica pod as candidate for failover: %v", err)
}
} else {
c.logger.Warningf("single master pod for cluster %q, migration will cause longer downtime of the master instance", c.clusterName())
c.logger.Warningf("migrating single pod cluster %q, this will cause downtime of the Postgres cluster until pod is back", c.clusterName())
}
// there are two cases for each postgres cluster that has its master pod on the node to migrate from:
@ -256,15 +252,15 @@ func (c *Cluster) MigrateReplicaPod(podName spec.NamespacedName, fromNodeName st
return fmt.Errorf("could not get pod: %v", err)
}
c.logger.Infof("migrating replica pod %q", podName)
c.logger.Infof("migrating replica pod %q to live node", podName)
if replicaPod.Spec.NodeName != fromNodeName {
c.logger.Infof("pod %q has already migrated to node %q", podName, replicaPod.Spec.NodeName)
c.logger.Infof("check failed: pod %q has already migrated to node %q", podName, replicaPod.Spec.NodeName)
return nil
}
if role := PostgresRole(replicaPod.Labels[c.OpConfig.PodRoleLabel]); role != Replica {
return fmt.Errorf("pod %q is not a replica", podName)
return fmt.Errorf("check failed: pod %q is not a replica", podName)
}
_, err = c.movePodFromEndOfLifeNode(replicaPod)
@ -296,7 +292,7 @@ func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) {
}
func (c *Cluster) recreatePods() error {
c.setProcessName("recreating pods")
c.setProcessName("starting to recreate pods")
ls := c.labelsSet(false)
namespace := c.Namespace
@ -337,10 +333,10 @@ func (c *Cluster) recreatePods() error {
// failover if we have not observed a master pod when re-creating former replicas.
if newMasterPod == nil && len(replicas) > 0 {
if err := c.Switchover(masterPod, masterCandidate(replicas)); err != nil {
c.logger.Warningf("could not perform failover: %v", err)
c.logger.Warningf("could not perform switch over: %v", err)
}
} else if newMasterPod == nil && len(replicas) == 0 {
c.logger.Warningf("cannot switch master role before re-creating the pod: no replicas")
c.logger.Warningf("cannot perform switch over before re-creating the pod: no replicas")
}
c.logger.Infof("recreating old master pod %q", util.NameFromMeta(masterPod.ObjectMeta))

View File

@ -132,16 +132,17 @@ func (c *Cluster) preScaleDown(newStatefulSet *v1beta1.StatefulSet) error {
return nil
}
// setRollingUpdateFlagForStatefulSet sets the indicator or the rolling upgrade requirement
// setRollingUpdateFlagForStatefulSet sets the indicator or the rolling update requirement
// in the StatefulSet annotation.
func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *v1beta1.StatefulSet, val bool) {
anno := sset.GetAnnotations()
c.logger.Debugf("rolling upgrade flag has been set to %t", val)
if anno == nil {
anno = make(map[string]string)
}
anno[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(val)
sset.SetAnnotations(anno)
c.logger.Debugf("statefulset's rolling update annotation has been set to %t", val)
}
// applyRollingUpdateFlagforStatefulSet sets the rolling update flag for the cluster's StatefulSet
@ -176,9 +177,9 @@ func (c *Cluster) getRollingUpdateFlagFromStatefulSet(sset *v1beta1.StatefulSet,
return flag
}
// mergeRollingUpdateFlagUsingCache return the value of the rollingUpdate flag from the passed
// mergeRollingUpdateFlagUsingCache returns the value of the rollingUpdate flag from the passed
// statefulset, however, the value can be cleared if there is a cached flag in the cluster that
// is set to false (the disrepancy could be a result of a failed StatefulSet update).s
// is set to false (the discrepancy could be a result of a failed StatefulSet update)
func (c *Cluster) mergeRollingUpdateFlagUsingCache(runningStatefulSet *v1beta1.StatefulSet) bool {
var (
cachedStatefulsetExists, clearRollingUpdateFromCache, podsRollingUpdateRequired bool
@ -198,7 +199,7 @@ func (c *Cluster) mergeRollingUpdateFlagUsingCache(runningStatefulSet *v1beta1.S
c.logger.Infof("clearing the rolling update flag based on the cached information")
podsRollingUpdateRequired = false
} else {
c.logger.Infof("found a statefulset with an unfinished pods rolling update")
c.logger.Infof("found a statefulset with an unfinished rolling update of the pods")
}
}
@ -436,7 +437,11 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
func (c *Cluster) deleteService(role PostgresRole) error {
c.logger.Debugf("deleting service %s", role)
service := c.Services[role]
service, ok := c.Services[role]
if !ok {
c.logger.Debugf("No service for %s role was found, nothing to delete", role)
return nil
}
if err := c.KubeClient.Services(service.Namespace).Delete(service.Name, c.deleteOptions); err != nil {
return err

View File

@ -2,6 +2,7 @@ package cluster
import (
"fmt"
"k8s.io/api/core/v1"
policybeta1 "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -280,6 +281,7 @@ func (c *Cluster) syncStatefulSet() error {
podsRollingUpdateRequired = true
c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired)
}
c.logStatefulSetChanges(c.Statefulset, desiredSS, false, cmp.reasons)
if !cmp.replace {

View File

@ -1,12 +1,13 @@
package cluster
import (
"time"
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
"k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
policybeta1 "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/types"
"time"
)
// PostgresRole describes role of the node
@ -20,6 +21,7 @@ const (
Replica PostgresRole = "replica"
)
// PodEventType represents the type of a pod-related event
type PodEventType string
// Possible values for the EventType

View File

@ -179,7 +179,7 @@ func (c *Cluster) logStatefulSetChanges(old, new *v1beta1.StatefulSet, isUpdate
if !reflect.DeepEqual(old.Annotations, new.Annotations) {
c.logger.Debugf("metadata.annotation diff\n%s\n", util.PrettyDiff(old.Annotations, new.Annotations))
}
c.logger.Debugf("spec diff\n%s\n", util.PrettyDiff(old.Spec, new.Spec))
c.logger.Debugf("spec diff between old and new statefulsets: \n%s\n", util.PrettyDiff(old.Spec, new.Spec))
if len(reasons) > 0 {
for _, reason := range reasons {
@ -460,6 +460,7 @@ func (c *Cluster) setSpec(newSpec *acidv1.Postgresql) {
c.specMu.Unlock()
}
// GetSpec returns a copy of the operator-side spec of a Postgres cluster in a thread-safe manner
func (c *Cluster) GetSpec() (*acidv1.Postgresql, error) {
c.specMu.RLock()
defer c.specMu.RUnlock()

View File

@ -128,6 +128,29 @@ func (c *Controller) initOperatorConfig() {
c.opConfig = config.NewFromMap(configMapData)
c.warnOnDeprecatedOperatorParameters()
if c.opConfig.SetMemoryRequestToLimit {
isSmaller, err := util.RequestIsSmallerThanLimit(c.opConfig.DefaultMemoryRequest, c.opConfig.DefaultMemoryLimit)
if err != nil {
panic(err)
}
if isSmaller {
c.logger.Warningf("The default memory request of %v for Postgres containers is increased to match the default memory limit of %v.", c.opConfig.DefaultMemoryRequest, c.opConfig.DefaultMemoryLimit)
c.opConfig.DefaultMemoryRequest = c.opConfig.DefaultMemoryLimit
}
isSmaller, err = util.RequestIsSmallerThanLimit(c.opConfig.ScalyrMemoryRequest, c.opConfig.ScalyrMemoryLimit)
if err != nil {
panic(err)
}
if isSmaller {
c.logger.Warningf("The memory request of %v for the Scalyr sidecar container is increased to match the memory limit of %v.", c.opConfig.ScalyrMemoryRequest, c.opConfig.ScalyrMemoryLimit)
c.opConfig.ScalyrMemoryRequest = c.opConfig.ScalyrMemoryLimit
}
// generateStatefulSet adjusts values for individual Postgres clusters
}
}
func (c *Controller) modifyConfigFromEnvironment() {

View File

@ -55,6 +55,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest
result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit
result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval)
result.ResourceCheckTimeout = time.Duration(fromCRD.Timeouts.ResourceCheckTimeout)

View File

@ -452,8 +452,14 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1.
if informerOldSpec != nil { //update, delete
uid = informerOldSpec.GetUID()
clusterName = util.NameFromMeta(informerOldSpec.ObjectMeta)
// user is fixing previously incorrect spec
if eventType == EventUpdate && informerNewSpec.Error == "" && informerOldSpec.Error != "" {
eventType = EventSync
}
// set current error to be one of the new spec if present
if informerNewSpec != nil {
clusterError = informerNewSpec.Error
} else {
clusterError = informerOldSpec.Error

View File

@ -49,6 +49,7 @@ type PgUser struct {
Flags []string `yaml:"user_flags"`
MemberOf []string `yaml:"inrole"`
Parameters map[string]string `yaml:"db_parameters"`
AdminRole string `yaml:"admin_role"`
}
// PgUserMap maps user names to the definitions.
@ -125,6 +126,7 @@ func (n *NamespacedName) Decode(value string) error {
return n.DecodeWorker(value, GetOperatorNamespace())
}
// UnmarshalJSON converts a byte slice to NamespacedName
func (n *NamespacedName) UnmarshalJSON(data []byte) error {
result := NamespacedName{}
var tmp string

View File

@ -38,6 +38,7 @@ type Resources struct {
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
MaxInstances int32 `name:"max_instances" default:"-1"`
MinInstances int32 `name:"min_instances" default:"-1"`
ShmVolume bool `name:"enable_shm_volume" default:"true"`
}
// Auth describes authentication specific configuration parameters
@ -89,6 +90,7 @@ type Config struct {
EnableTeamsAPI bool `name:"enable_teams_api" default:"true"`
EnableTeamSuperuser bool `name:"enable_team_superuser" default:"false"`
TeamAdminRole string `name:"team_admin_role" default:"admin"`
EnableAdminRoleForUsers bool `name:"enable_admin_role_for_users" default:"true"`
EnableMasterLoadBalancer bool `name:"enable_master_load_balancer" default:"true"`
EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"`
// deprecated and kept for backward compatibility
@ -104,6 +106,7 @@ type Config struct {
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
ProtectedRoles []string `name:"protected_role_names" default:"admin"`
PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""`
SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" defaults:"false"`
}
// MustMarshal marshals the config or panics

View File

@ -19,6 +19,7 @@ type fieldInfo struct {
Field reflect.Value
}
// StringTemplate is a convenience alias
type StringTemplate string
func decoderFrom(field reflect.Value) (d decoder) {
@ -221,12 +222,14 @@ func getMapPairsFromString(value string) (pairs []string, err error) {
return
}
// Decode cast value to StringTemplate
func (f *StringTemplate) Decode(value string) error {
*f = StringTemplate(value)
return nil
}
// Format formatted string from StringTemplate
func (f *StringTemplate) Format(a ...string) string {
res := string(*f)
@ -237,6 +240,7 @@ func (f *StringTemplate) Format(a ...string) string {
return res
}
// MarshalJSON converts a StringTemplate to byte slice
func (f StringTemplate) MarshalJSON() ([]byte, error) {
return json.Marshal(string(f))
}

View File

@ -5,6 +5,7 @@ import "time"
// General kubernetes-related constants
const (
PostgresContainerName = "postgres"
PostgresContainerIdx = 0
K8sAPIPath = "/apis"
StatefulsetDeletionInterval = 1 * time.Second
StatefulsetDeletionTimeout = 30 * time.Second

View File

@ -10,4 +10,7 @@ const (
PostgresConnectRetryTimeout = 2 * time.Minute
PostgresConnectTimeout = 15 * time.Second
ShmVolumeName = "dshm"
ShmVolumePath = "/dev/shm"
)

View File

@ -17,8 +17,10 @@ type Ticker struct {
ticker *time.Ticker
}
// Stop is a convenience wrapper around ticker.Stop
func (t *Ticker) Stop() { t.ticker.Stop() }
// Tick is a convenience wrapper around ticker.C
func (t *Ticker) Tick() { <-t.ticker.C }
// Retry is a wrapper around RetryWorker that provides a real RetryTicker

View File

@ -43,6 +43,7 @@ type httpClient interface {
Do(req *http.Request) (*http.Response, error)
}
// Interface to the TeamsAPIClient
type Interface interface {
TeamInfo(teamID, token string) (tm *Team, err error)
}

View File

@ -5,9 +5,10 @@ import (
"fmt"
"strings"
"reflect"
"github.com/zalando-incubator/postgres-operator/pkg/spec"
"github.com/zalando-incubator/postgres-operator/pkg/util"
"reflect"
)
const (
@ -19,6 +20,7 @@ const (
doBlockStmt = `SET LOCAL synchronous_commit = 'local'; DO $$ BEGIN %s; END;$$;`
passwordTemplate = "ENCRYPTED PASSWORD '%s'"
inRoleTemplate = `IN ROLE %s`
adminTemplate = `ADMIN %s`
)
// DefaultUserSyncStrategy implements a user sync strategy that merges already existing database users
@ -113,6 +115,9 @@ func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.D
if len(user.MemberOf) > 0 {
userFlags = append(userFlags, fmt.Sprintf(inRoleTemplate, quoteMemberList(user)))
}
if user.AdminRole != "" {
userFlags = append(userFlags, fmt.Sprintf(adminTemplate, user.AdminRole))
}
if user.Password == "" {
userPassword = "PASSWORD NULL"

View File

@ -3,12 +3,14 @@ package util
import (
"crypto/md5" // #nosec we need it to for PostgreSQL md5 passwords
"encoding/hex"
"fmt"
"math/rand"
"regexp"
"strings"
"time"
"github.com/motomux/pretty"
resource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/zalando-incubator/postgres-operator/pkg/spec"
@ -127,3 +129,19 @@ func Coalesce(val, defaultVal string) string {
}
return val
}
// RequestIsSmallerThanLimit
func RequestIsSmallerThanLimit(requestStr, limitStr string) (bool, error) {
request, err := resource.ParseQuantity(requestStr)
if err != nil {
return false, fmt.Errorf("could not parse memory request %v : %v", requestStr, err)
}
limit, err2 := resource.ParseQuantity(limitStr)
if err2 != nil {
return false, fmt.Errorf("could not parse memory limit %v : %v", limitStr, err2)
}
return request.Cmp(limit) == -1, nil
}

View File

@ -69,6 +69,17 @@ var substringMatch = []struct {
{regexp.MustCompile(`aaaa (\d+) bbbb`), "aaaa 123 bbbb", nil},
}
var requestIsSmallerThanLimitTests = []struct {
request string
limit string
out bool
}{
{"1G", "2G", true},
{"1G", "1Gi", true}, // G is 1000^3 bytes, Gi is 1024^3 bytes
{"1024Mi", "1G", false},
{"1e9", "1G", false}, // 1e9 bytes == 1G
}
func TestRandomPassword(t *testing.T) {
const pwdLength = 10
pwd := RandomPassword(pwdLength)
@ -143,3 +154,15 @@ func TestMapContains(t *testing.T) {
}
}
}
func TestRequestIsSmallerThanLimit(t *testing.T) {
for _, tt := range requestIsSmallerThanLimitTests {
res, err := RequestIsSmallerThanLimit(tt.request, tt.limit)
if err != nil {
t.Errorf("RequestIsSmallerThanLimit returned unexpected error: %#v", err)
}
if res != tt.out {
t.Errorf("RequestIsSmallerThanLimit expected: %#v, got: %#v", tt.out, res)
}
}
}

View File

@ -94,7 +94,7 @@ function build_operator_binary(){
# redirecting stderr greatly reduces non-informative output during normal builds
echo "Build operator binary (stderr redirected to /dev/null)..."
make tools deps local > /dev/null 2>&1
make clean tools deps local test > /dev/null 2>&1
}
@ -121,7 +121,7 @@ function deploy_self_built_image() {
# update the tag in the postgres operator conf
# since the image with this tag already exists on the machine,
# docker should not attempt to fetch it from the registry due to imagePullPolicy
sed --expression "s/\(image\:.*\:\).*$/\1$TAG/" manifests/postgres-operator.yaml > "$PATH_TO_LOCAL_OPERATOR_MANIFEST"
sed --expression "s/\(image\:.*\:\).*$/\1$TAG/; s/smoke-tested-//" manifests/postgres-operator.yaml > "$PATH_TO_LOCAL_OPERATOR_MANIFEST"
retry "kubectl create -f \"$PATH_TO_LOCAL_OPERATOR_MANIFEST\"" "attempt to create $PATH_TO_LOCAL_OPERATOR_MANIFEST resource"
}
@ -215,6 +215,7 @@ function main(){
clean_up
start_minikube
kubectl create namespace test
start_operator
forward_ports
check_health