merge with master
This commit is contained in:
commit
36bb4e77df
3
Makefile
3
Makefile
|
|
@ -79,7 +79,8 @@ scm-source.json: .git
|
||||||
|
|
||||||
tools:
|
tools:
|
||||||
GO111MODULE=on go get -u honnef.co/go/tools/cmd/staticcheck
|
GO111MODULE=on go get -u honnef.co/go/tools/cmd/staticcheck
|
||||||
GO111MODULE=on go get k8s.io/client-go@kubernetes-1.16.0
|
GO111MODULE=on go get k8s.io/client-go@kubernetes-1.16.3
|
||||||
|
GO111MODULE=on go mod tidy
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
@gofmt -l -w -s $(DIRS)
|
@gofmt -l -w -s $(DIRS)
|
||||||
|
|
|
||||||
|
|
@ -107,10 +107,14 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
|
enable_init_containers:
|
||||||
|
type: boolean
|
||||||
enable_pod_antiaffinity:
|
enable_pod_antiaffinity:
|
||||||
type: boolean
|
type: boolean
|
||||||
enable_pod_disruption_budget:
|
enable_pod_disruption_budget:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enable_sidecars:
|
||||||
|
type: boolean
|
||||||
infrastructure_roles_secret_name:
|
infrastructure_roles_secret_name:
|
||||||
type: string
|
type: string
|
||||||
inherited_labels:
|
inherited_labels:
|
||||||
|
|
@ -298,3 +302,7 @@ spec:
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
scalyr_server_url:
|
scalyr_server_url:
|
||||||
type: string
|
type: string
|
||||||
|
status:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
|
|
||||||
|
|
@ -222,7 +222,7 @@ spec:
|
||||||
# only the format of the given number.
|
# only the format of the given number.
|
||||||
#
|
#
|
||||||
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
|
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
|
||||||
pattern: '^(\d+m|\d+\.\d{1,3})$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
# Note: the value specified here must not be zero or be lower
|
# Note: the value specified here must not be zero or be lower
|
||||||
# than the corresponding request.
|
# than the corresponding request.
|
||||||
memory:
|
memory:
|
||||||
|
|
@ -253,7 +253,7 @@ spec:
|
||||||
# only the format of the given number.
|
# only the format of the given number.
|
||||||
#
|
#
|
||||||
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
|
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
|
||||||
pattern: '^(\d+m|\d+\.\d{1,3})$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
# Note: the value specified here must not be zero or be higher
|
# Note: the value specified here must not be zero or be higher
|
||||||
# than the corresponding limit.
|
# than the corresponding limit.
|
||||||
memory:
|
memory:
|
||||||
|
|
|
||||||
|
|
@ -59,10 +59,14 @@ configKubernetes:
|
||||||
# annotations attached to each database pod
|
# annotations attached to each database pod
|
||||||
# custom_pod_annotations: keya:valuea,keyb:valueb
|
# custom_pod_annotations: keya:valuea,keyb:valueb
|
||||||
|
|
||||||
|
# enables initContainers to run actions before Spilo is started
|
||||||
|
enable_init_containers: "true"
|
||||||
# toggles pod anti affinity on the Postgres pods
|
# toggles pod anti affinity on the Postgres pods
|
||||||
enable_pod_antiaffinity: "false"
|
enable_pod_antiaffinity: "false"
|
||||||
# toggles PDB to set to MinAvailabe 0 or 1
|
# toggles PDB to set to MinAvailabe 0 or 1
|
||||||
enable_pod_disruption_budget: "true"
|
enable_pod_disruption_budget: "true"
|
||||||
|
# enables sidecar containers to run alongside Spilo in the same pod
|
||||||
|
enable_sidecars: "true"
|
||||||
# name of the secret containing infrastructure roles names and passwords
|
# name of the secret containing infrastructure roles names and passwords
|
||||||
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -63,10 +63,14 @@ configKubernetes:
|
||||||
# keya: valuea
|
# keya: valuea
|
||||||
# keyb: valueb
|
# keyb: valueb
|
||||||
|
|
||||||
|
# enables initContainers to run actions before Spilo is started
|
||||||
|
enable_init_containers: true
|
||||||
# toggles pod anti affinity on the Postgres pods
|
# toggles pod anti affinity on the Postgres pods
|
||||||
enable_pod_antiaffinity: false
|
enable_pod_antiaffinity: false
|
||||||
# toggles PDB to set to MinAvailabe 0 or 1
|
# toggles PDB to set to MinAvailabe 0 or 1
|
||||||
enable_pod_disruption_budget: true
|
enable_pod_disruption_budget: true
|
||||||
|
# enables sidecar containers to run alongside Spilo in the same pod
|
||||||
|
enable_sidecars: true
|
||||||
# name of the secret containing infrastructure roles names and passwords
|
# name of the secret containing infrastructure roles names and passwords
|
||||||
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -41,8 +41,8 @@ function aws_upload {
|
||||||
args=()
|
args=()
|
||||||
|
|
||||||
[[ ! -z "$EXPECTED_SIZE" ]] && args+=("--expected-size=$EXPECTED_SIZE")
|
[[ ! -z "$EXPECTED_SIZE" ]] && args+=("--expected-size=$EXPECTED_SIZE")
|
||||||
[[ ! -z "$LOGICAL_BACKUP_S3_ENDPOINT" ]] && args+=("--endpoint-url=\"$LOGICAL_BACKUP_S3_ENDPOINT\"")
|
[[ ! -z "$LOGICAL_BACKUP_S3_ENDPOINT" ]] && args+=("--endpoint-url=$LOGICAL_BACKUP_S3_ENDPOINT")
|
||||||
[[ ! "$LOGICAL_BACKUP_S3_SSE" == "" ]] && args+=("--sse=\"$LOGICAL_BACKUP_S3_SSE\"")
|
[[ ! "$LOGICAL_BACKUP_S3_SSE" == "" ]] && args+=("--sse=$LOGICAL_BACKUP_S3_SSE")
|
||||||
|
|
||||||
aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}" --debug
|
aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}" --debug
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,26 @@
|
||||||
Learn how to configure and manage the Postgres Operator in your Kubernetes (K8s)
|
Learn how to configure and manage the Postgres Operator in your Kubernetes (K8s)
|
||||||
environment.
|
environment.
|
||||||
|
|
||||||
|
## Minor and major version upgrade
|
||||||
|
|
||||||
|
Minor version upgrades for PostgreSQL are handled via updating the Spilo Docker
|
||||||
|
image. The operator will carry out a rolling update of Pods which includes a
|
||||||
|
switchover (planned failover) of the master to the Pod with new minor version.
|
||||||
|
The switch should usually take less than 5 seconds, still clients have to
|
||||||
|
reconnect.
|
||||||
|
|
||||||
|
Major version upgrades are supported via [cloning](user.md#clone-directly). The
|
||||||
|
new cluster manifest must have a higher `version` string than the source cluster
|
||||||
|
and will be created from a basebackup. Depending of the cluster size, downtime
|
||||||
|
in this case can be significant as writes to the database should be stopped and
|
||||||
|
all WAL files should be archived first before cloning is started.
|
||||||
|
|
||||||
|
Note, that simply changing the version string in the `postgresql` manifest does
|
||||||
|
not work at present and leads to errors. Neither Patroni nor Postgres Operator
|
||||||
|
can do in place `pg_upgrade`. Still, it can be executed manually in the Postgres
|
||||||
|
container, which is tricky (i.e. systems need to be stopped, replicas have to be
|
||||||
|
synced) but of course faster than cloning.
|
||||||
|
|
||||||
## CRD Validation
|
## CRD Validation
|
||||||
|
|
||||||
[CustomResourceDefinitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions)
|
[CustomResourceDefinitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions)
|
||||||
|
|
@ -95,8 +115,6 @@ is used by the operator to connect to the clusters after creation.
|
||||||
|
|
||||||
## Role-based access control for the operator
|
## Role-based access control for the operator
|
||||||
|
|
||||||
### Service account and cluster roles
|
|
||||||
|
|
||||||
The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml)
|
The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml)
|
||||||
defines the service account, cluster roles and bindings needed for the operator
|
defines the service account, cluster roles and bindings needed for the operator
|
||||||
to function under access control restrictions. To deploy the operator with this
|
to function under access control restrictions. To deploy the operator with this
|
||||||
|
|
@ -110,6 +128,8 @@ kubectl create -f manifests/postgres-operator.yaml
|
||||||
kubectl create -f manifests/minimal-postgres-manifest.yaml
|
kubectl create -f manifests/minimal-postgres-manifest.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Service account and cluster roles
|
||||||
|
|
||||||
Note that the service account is named `zalando-postgres-operator`. You may have
|
Note that the service account is named `zalando-postgres-operator`. You may have
|
||||||
to change the `service_account_name` in the operator ConfigMap and
|
to change the `service_account_name` in the operator ConfigMap and
|
||||||
`serviceAccountName` in the `postgres-operator` deployment appropriately. This
|
`serviceAccountName` in the `postgres-operator` deployment appropriately. This
|
||||||
|
|
@ -117,12 +137,6 @@ is done intentionally to avoid breaking those setups that already work with the
|
||||||
default `operator` account. In the future the operator should ideally be run
|
default `operator` account. In the future the operator should ideally be run
|
||||||
under the `zalando-postgres-operator` service account.
|
under the `zalando-postgres-operator` service account.
|
||||||
|
|
||||||
The service account defined in `operator-service-account-rbac.yaml` acquires
|
|
||||||
some privileges not used by the operator (i.e. we only need `list` and `watch`
|
|
||||||
on `configmaps` resources). This is also done intentionally to avoid breaking
|
|
||||||
things if someone decides to configure the same service account in the
|
|
||||||
operator's ConfigMap to run Postgres clusters.
|
|
||||||
|
|
||||||
### Give K8s users access to create/list `postgresqls`
|
### Give K8s users access to create/list `postgresqls`
|
||||||
|
|
||||||
By default `postgresql` custom resources can only be listed and changed by
|
By default `postgresql` custom resources can only be listed and changed by
|
||||||
|
|
@ -158,7 +172,6 @@ metadata:
|
||||||
name: postgres-operator
|
name: postgres-operator
|
||||||
data:
|
data:
|
||||||
toleration: "key:postgres,operator:Exists,effect:NoSchedule"
|
toleration: "key:postgres,operator:Exists,effect:NoSchedule"
|
||||||
...
|
|
||||||
```
|
```
|
||||||
|
|
||||||
For an OperatorConfiguration resource the toleration should be defined like
|
For an OperatorConfiguration resource the toleration should be defined like
|
||||||
|
|
@ -173,7 +186,6 @@ configuration:
|
||||||
kubernetes:
|
kubernetes:
|
||||||
toleration:
|
toleration:
|
||||||
postgres: "key:postgres,operator:Exists,effect:NoSchedule"
|
postgres: "key:postgres,operator:Exists,effect:NoSchedule"
|
||||||
...
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that the K8s version 1.13 brings [taint-based eviction](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/#taint-based-evictions)
|
Note that the K8s version 1.13 brings [taint-based eviction](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/#taint-based-evictions)
|
||||||
|
|
@ -251,7 +263,6 @@ metadata:
|
||||||
name: postgres-operator
|
name: postgres-operator
|
||||||
data:
|
data:
|
||||||
inherited_labels: application,environment
|
inherited_labels: application,environment
|
||||||
...
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**OperatorConfiguration**
|
**OperatorConfiguration**
|
||||||
|
|
@ -266,7 +277,6 @@ configuration:
|
||||||
inherited_labels:
|
inherited_labels:
|
||||||
- application
|
- application
|
||||||
- environment
|
- environment
|
||||||
...
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**cluster manifest**
|
**cluster manifest**
|
||||||
|
|
@ -280,7 +290,7 @@ metadata:
|
||||||
application: my-app
|
application: my-app
|
||||||
environment: demo
|
environment: demo
|
||||||
spec:
|
spec:
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
**network policy**
|
**network policy**
|
||||||
|
|
@ -295,7 +305,6 @@ spec:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
application: my-app
|
application: my-app
|
||||||
environment: demo
|
environment: demo
|
||||||
...
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -318,7 +327,6 @@ metadata:
|
||||||
data:
|
data:
|
||||||
# referencing config map with custom settings
|
# referencing config map with custom settings
|
||||||
pod_environment_configmap: postgres-pod-config
|
pod_environment_configmap: postgres-pod-config
|
||||||
...
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**OperatorConfiguration**
|
**OperatorConfiguration**
|
||||||
|
|
@ -332,7 +340,6 @@ configuration:
|
||||||
kubernetes:
|
kubernetes:
|
||||||
# referencing config map with custom settings
|
# referencing config map with custom settings
|
||||||
pod_environment_configmap: postgres-pod-config
|
pod_environment_configmap: postgres-pod-config
|
||||||
...
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**referenced ConfigMap `postgres-pod-config`**
|
**referenced ConfigMap `postgres-pod-config`**
|
||||||
|
|
@ -413,12 +420,12 @@ external systems but defined for an individual Postgres cluster in its manifest.
|
||||||
A typical example is a role for connections from an application that uses the
|
A typical example is a role for connections from an application that uses the
|
||||||
database.
|
database.
|
||||||
|
|
||||||
* **Human users** originate from the Teams API that returns a list of the team
|
* **Human users** originate from the [Teams API](user.md#teams-api-roles) that
|
||||||
members given a team id. The operator differentiates between (a) product teams
|
returns a list of the team members given a team id. The operator differentiates
|
||||||
that own a particular Postgres cluster and are granted admin rights to maintain
|
between (a) product teams that own a particular Postgres cluster and are granted
|
||||||
it, and (b) Postgres superuser teams that get the superuser access to all
|
admin rights to maintain it, and (b) Postgres superuser teams that get the
|
||||||
Postgres databases running in a K8s cluster for the purposes of maintaining and
|
superuser access to all Postgres databases running in a K8s cluster for the
|
||||||
troubleshooting.
|
purposes of maintaining and troubleshooting.
|
||||||
|
|
||||||
## Understanding rolling update of Spilo pods
|
## Understanding rolling update of Spilo pods
|
||||||
|
|
||||||
|
|
@ -482,7 +489,7 @@ A secret can be pre-provisioned in different ways:
|
||||||
|
|
||||||
With the v1.2 release the Postgres Operator is shipped with a browser-based
|
With the v1.2 release the Postgres Operator is shipped with a browser-based
|
||||||
configuration user interface (UI) that simplifies managing Postgres clusters
|
configuration user interface (UI) that simplifies managing Postgres clusters
|
||||||
with the operator. The UI runs with Node.js and comes with it's own docker
|
with the operator. The UI runs with Node.js and comes with it's own Docker
|
||||||
image.
|
image.
|
||||||
|
|
||||||
Run NPM to continuously compile `tags/js` code. Basically, it creates an
|
Run NPM to continuously compile `tags/js` code. Basically, it creates an
|
||||||
|
|
@ -494,14 +501,14 @@ Run NPM to continuously compile `tags/js` code. Basically, it creates an
|
||||||
|
|
||||||
To build the Docker image open a shell and change to the `ui` folder. Then run:
|
To build the Docker image open a shell and change to the `ui` folder. Then run:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.2.0 .
|
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.2.0 .
|
||||||
```
|
```
|
||||||
|
|
||||||
Apply all manifests for the `ui/manifests` folder to deploy the Postgres
|
Apply all manifests for the `ui/manifests` folder to deploy the Postgres
|
||||||
Operator UI on K8s. For local tests you don't need the Ingress resource.
|
Operator UI on K8s. For local tests you don't need the Ingress resource.
|
||||||
|
|
||||||
```
|
```bash
|
||||||
kubectl apply -f ui/manifests
|
kubectl apply -f ui/manifests
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
@ -511,6 +518,6 @@ to the K8s and Postgres Operator REST API. You can use the provided
|
||||||
`run_local.sh` script for this. Make sure it uses the correct URL to your K8s
|
`run_local.sh` script for this. Make sure it uses the correct URL to your K8s
|
||||||
API server, e.g. for minikube it would be `https://192.168.99.100:8443`.
|
API server, e.g. for minikube it would be `https://192.168.99.100:8443`.
|
||||||
|
|
||||||
```
|
```bash
|
||||||
./run_local.sh
|
./run_local.sh
|
||||||
```
|
```
|
||||||
|
|
|
||||||
|
|
@ -40,7 +40,7 @@ This would take a while to complete. You have to redo `make deps` every time
|
||||||
your dependencies list changes, i.e. after adding a new library dependency.
|
your dependencies list changes, i.e. after adding a new library dependency.
|
||||||
|
|
||||||
Build the operator with the `make docker` command. You may define the TAG
|
Build the operator with the `make docker` command. You may define the TAG
|
||||||
variable to assign an explicit tag to your docker image and the IMAGE to set
|
variable to assign an explicit tag to your Docker image and the IMAGE to set
|
||||||
the image name. By default, the tag is computed with
|
the image name. By default, the tag is computed with
|
||||||
`git describe --tags --always --dirty` and the image is
|
`git describe --tags --always --dirty` and the image is
|
||||||
`registry.opensource.zalan.do/acid/postgres-operator`
|
`registry.opensource.zalan.do/acid/postgres-operator`
|
||||||
|
|
@ -60,10 +60,10 @@ The binary will be placed into the build directory.
|
||||||
|
|
||||||
## Deploying self build image
|
## Deploying self build image
|
||||||
|
|
||||||
The fastest way to run and test your docker image locally is to reuse the docker
|
The fastest way to run and test your Docker image locally is to reuse the Docker
|
||||||
from [minikube](https://github.com/kubernetes/minikube/releases) or use the
|
environment from [minikube](https://github.com/kubernetes/minikube/releases)
|
||||||
`load docker-image` from [kind](https://kind.sigs.k8s.io/). The following steps
|
or use the `load docker-image` from [kind](https://kind.sigs.k8s.io/). The
|
||||||
will get you the docker image built and deployed.
|
following steps will get you the Docker image built and deployed.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# minikube
|
# minikube
|
||||||
|
|
@ -163,7 +163,7 @@ The operator also supports pprof endpoints listed at the
|
||||||
* /debug/pprof/trace
|
* /debug/pprof/trace
|
||||||
|
|
||||||
It's possible to attach a debugger to troubleshoot postgres-operator inside a
|
It's possible to attach a debugger to troubleshoot postgres-operator inside a
|
||||||
docker container. It's possible with [gdb](https://www.gnu.org/software/gdb/)
|
Docker container. It's possible with [gdb](https://www.gnu.org/software/gdb/)
|
||||||
and [delve](https://github.com/derekparker/delve). Since the latter one is a
|
and [delve](https://github.com/derekparker/delve). Since the latter one is a
|
||||||
specialized debugger for Go, we will use it as an example. To use it you need:
|
specialized debugger for Go, we will use it as an example. To use it you need:
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,7 @@ manages PostgreSQL clusters on Kubernetes (K8s):
|
||||||
|
|
||||||
2. The operator also watches updates to [its own configuration](../manifests/configmap.yaml)
|
2. The operator also watches updates to [its own configuration](../manifests/configmap.yaml)
|
||||||
and alters running Postgres clusters if necessary. For instance, if the
|
and alters running Postgres clusters if necessary. For instance, if the
|
||||||
docker image in a pod is changed, the operator carries out the rolling
|
Docker image in a pod is changed, the operator carries out the rolling
|
||||||
update, which means it re-spawns pods of each managed StatefulSet one-by-one
|
update, which means it re-spawns pods of each managed StatefulSet one-by-one
|
||||||
with the new Docker image.
|
with the new Docker image.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -157,9 +157,12 @@ export PGPORT=$(echo $HOST_PORT | cut -d: -f 2)
|
||||||
```
|
```
|
||||||
|
|
||||||
Retrieve the password from the K8s Secret that is created in your cluster.
|
Retrieve the password from the K8s Secret that is created in your cluster.
|
||||||
|
Non-encrypted connections are rejected by default, so set the SSL mode to
|
||||||
|
require:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export PGPASSWORD=$(kubectl get secret postgres.acid-minimal-cluster.credentials -o 'jsonpath={.data.password}' | base64 -d)
|
export PGPASSWORD=$(kubectl get secret postgres.acid-minimal-cluster.credentials -o 'jsonpath={.data.password}' | base64 -d)
|
||||||
|
export PGSSLMODE=require
|
||||||
psql -U postgres
|
psql -U postgres
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -62,7 +62,7 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
||||||
field.
|
field.
|
||||||
|
|
||||||
* **dockerImage**
|
* **dockerImage**
|
||||||
custom docker image that overrides the **docker_image** operator parameter.
|
custom Docker image that overrides the **docker_image** operator parameter.
|
||||||
It should be a [Spilo](https://github.com/zalando/spilo) image. Optional.
|
It should be a [Spilo](https://github.com/zalando/spilo) image. Optional.
|
||||||
|
|
||||||
* **spiloFSGroup**
|
* **spiloFSGroup**
|
||||||
|
|
@ -124,7 +124,7 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
||||||
|
|
||||||
|
|
||||||
* **enableShmVolume**
|
* **enableShmVolume**
|
||||||
Start a database pod without limitations on shm memory. By default docker
|
Start a database pod without limitations on shm memory. By default Docker
|
||||||
limit `/dev/shm` to `64M` (see e.g. the [docker
|
limit `/dev/shm` to `64M` (see e.g. the [docker
|
||||||
issue](https://github.com/docker-library/postgres/issues/416), which could be
|
issue](https://github.com/docker-library/postgres/issues/416), which could be
|
||||||
not enough if PostgreSQL uses parallel workers heavily. If this option is
|
not enough if PostgreSQL uses parallel workers heavily. If this option is
|
||||||
|
|
@ -185,19 +185,19 @@ explanation of `ttl` and `loop_wait` parameters.
|
||||||
|
|
||||||
* **ttl**
|
* **ttl**
|
||||||
Patroni `ttl` parameter value, optional. The default is set by the Spilo
|
Patroni `ttl` parameter value, optional. The default is set by the Spilo
|
||||||
docker image. Optional.
|
Docker image. Optional.
|
||||||
|
|
||||||
* **loop_wait**
|
* **loop_wait**
|
||||||
Patroni `loop_wait` parameter value, optional. The default is set by the
|
Patroni `loop_wait` parameter value, optional. The default is set by the
|
||||||
Spilo docker image. Optional.
|
Spilo Docker image. Optional.
|
||||||
|
|
||||||
* **retry_timeout**
|
* **retry_timeout**
|
||||||
Patroni `retry_timeout` parameter value, optional. The default is set by the
|
Patroni `retry_timeout` parameter value, optional. The default is set by the
|
||||||
Spilo docker image. Optional.
|
Spilo Docker image. Optional.
|
||||||
|
|
||||||
* **maximum_lag_on_failover**
|
* **maximum_lag_on_failover**
|
||||||
Patroni `maximum_lag_on_failover` parameter value, optional. The default is
|
Patroni `maximum_lag_on_failover` parameter value, optional. The default is
|
||||||
set by the Spilo docker image. Optional.
|
set by the Spilo Docker image. Optional.
|
||||||
|
|
||||||
* **slots**
|
* **slots**
|
||||||
permanent replication slots that Patroni preserves after failover by
|
permanent replication slots that Patroni preserves after failover by
|
||||||
|
|
@ -320,7 +320,7 @@ defined in the sidecar dictionary:
|
||||||
name of the sidecar. Required.
|
name of the sidecar. Required.
|
||||||
|
|
||||||
* **image**
|
* **image**
|
||||||
docker image of the sidecar. Required.
|
Docker image of the sidecar. Required.
|
||||||
|
|
||||||
* **env**
|
* **env**
|
||||||
a dictionary of environment variables. Use usual Kubernetes definition
|
a dictionary of environment variables. Use usual Kubernetes definition
|
||||||
|
|
|
||||||
|
|
@ -81,15 +81,15 @@ Those are top-level keys, containing both leaf keys and groups.
|
||||||
Kubernetes-native DCS).
|
Kubernetes-native DCS).
|
||||||
|
|
||||||
* **docker_image**
|
* **docker_image**
|
||||||
Spilo docker image for Postgres instances. For production, don't rely on the
|
Spilo Docker image for Postgres instances. For production, don't rely on the
|
||||||
default image, as it might be not the most up-to-date one. Instead, build
|
default image, as it might be not the most up-to-date one. Instead, build
|
||||||
your own Spilo image from the [github
|
your own Spilo image from the [github
|
||||||
repository](https://github.com/zalando/spilo).
|
repository](https://github.com/zalando/spilo).
|
||||||
|
|
||||||
* **sidecar_docker_images**
|
* **sidecar_docker_images**
|
||||||
a map of sidecar names to docker images for the containers to run alongside
|
a map of sidecar names to Docker images to run with Spilo. In case of the name
|
||||||
Spilo. In case of the name conflict with the definition in the cluster
|
conflict with the definition in the cluster manifest the cluster-specific one
|
||||||
manifest the cluster-specific one is preferred.
|
is preferred.
|
||||||
|
|
||||||
* **enable_shm_volume**
|
* **enable_shm_volume**
|
||||||
Instruct operator to start any new database pod without limitations on shm
|
Instruct operator to start any new database pod without limitations on shm
|
||||||
|
|
@ -196,6 +196,14 @@ configuration they are grouped under the `kubernetes` key.
|
||||||
[admin docs](../administrator.md#pod-disruption-budget) for more information.
|
[admin docs](../administrator.md#pod-disruption-budget) for more information.
|
||||||
Default is true.
|
Default is true.
|
||||||
|
|
||||||
|
* **enable_init_containers**
|
||||||
|
global option to allow for creating init containers to run actions before
|
||||||
|
Spilo is started. Default is true.
|
||||||
|
|
||||||
|
* **enable_sidecars**
|
||||||
|
global option to allow for creating sidecar containers to run alongside Spilo
|
||||||
|
on the same pod. Default is true.
|
||||||
|
|
||||||
* **secret_name_template**
|
* **secret_name_template**
|
||||||
a template for the name of the database user secrets generated by the
|
a template for the name of the database user secrets generated by the
|
||||||
operator. `{username}` is replaced with name of the secret, `{cluster}` with
|
operator. `{username}` is replaced with name of the secret, `{cluster}` with
|
||||||
|
|
|
||||||
141
docs/user.md
141
docs/user.md
|
|
@ -13,7 +13,7 @@ kind: postgresql
|
||||||
metadata:
|
metadata:
|
||||||
name: acid-minimal-cluster
|
name: acid-minimal-cluster
|
||||||
spec:
|
spec:
|
||||||
teamId: "ACID"
|
teamId: "acid"
|
||||||
volume:
|
volume:
|
||||||
size: 1Gi
|
size: 1Gi
|
||||||
numberOfInstances: 2
|
numberOfInstances: 2
|
||||||
|
|
@ -30,7 +30,7 @@ spec:
|
||||||
databases:
|
databases:
|
||||||
foo: zalando
|
foo: zalando
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "10"
|
version: "11"
|
||||||
```
|
```
|
||||||
|
|
||||||
Once you cloned the Postgres Operator [repository](https://github.com/zalando/postgres-operator)
|
Once you cloned the Postgres Operator [repository](https://github.com/zalando/postgres-operator)
|
||||||
|
|
@ -40,6 +40,17 @@ you can find this example also in the manifests folder:
|
||||||
kubectl create -f manifests/minimal-postgres-manifest.yaml
|
kubectl create -f manifests/minimal-postgres-manifest.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Make sure, the `spec` section of the manifest contains at least a `teamId`, the
|
||||||
|
`numberOfInstances` and the `postgresql` object with the `version` specified.
|
||||||
|
The minimum volume size to run the `postgresql` resource on Elastic Block
|
||||||
|
Storage (EBS) is `1Gi`.
|
||||||
|
|
||||||
|
Note, that the name of the cluster must start with the `teamId` and `-`. At
|
||||||
|
Zalando we use team IDs (nicknames) to lower the chance of duplicate cluster
|
||||||
|
names and colliding entities. The team ID would also be used to query an API to
|
||||||
|
get all members of a team and create [database roles](#teams-api-roles) for
|
||||||
|
them.
|
||||||
|
|
||||||
## Watch pods being created
|
## Watch pods being created
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|
@ -62,10 +73,12 @@ kubectl port-forward $PGMASTER 6432:5432
|
||||||
|
|
||||||
Open another CLI and connect to the database. Use the generated secret of the
|
Open another CLI and connect to the database. Use the generated secret of the
|
||||||
`postgres` robot user to connect to our `acid-minimal-cluster` master running
|
`postgres` robot user to connect to our `acid-minimal-cluster` master running
|
||||||
in Minikube:
|
in Minikube. As non-encrypted connections are rejected by default set the SSL
|
||||||
|
mode to require:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export PGPASSWORD=$(kubectl get secret postgres.acid-minimal-cluster.credentials -o 'jsonpath={.data.password}' | base64 -d)
|
export PGPASSWORD=$(kubectl get secret postgres.acid-minimal-cluster.credentials -o 'jsonpath={.data.password}' | base64 -d)
|
||||||
|
export PGSSLMODE=require
|
||||||
psql -U postgres -p 6432
|
psql -U postgres -p 6432
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
@ -77,8 +90,7 @@ cluster. It covers three use-cases:
|
||||||
* `manifest roles`: create application roles specific to the cluster described
|
* `manifest roles`: create application roles specific to the cluster described
|
||||||
in the manifest.
|
in the manifest.
|
||||||
* `infrastructure roles`: create application roles that should be automatically
|
* `infrastructure roles`: create application roles that should be automatically
|
||||||
created on every
|
created on every cluster managed by the operator.
|
||||||
cluster managed by the operator.
|
|
||||||
* `teams API roles`: automatically create users for every member of the team
|
* `teams API roles`: automatically create users for every member of the team
|
||||||
owning the database cluster.
|
owning the database cluster.
|
||||||
|
|
||||||
|
|
@ -128,9 +140,9 @@ The infrastructure roles secret is specified by the `infrastructure_roles_secret
|
||||||
parameter. The role definition looks like this (values are base64 encoded):
|
parameter. The role definition looks like this (values are base64 encoded):
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
user1: ZGJ1c2Vy
|
user1: ZGJ1c2Vy
|
||||||
password1: c2VjcmV0
|
password1: c2VjcmV0
|
||||||
inrole1: b3BlcmF0b3I=
|
inrole1: b3BlcmF0b3I=
|
||||||
```
|
```
|
||||||
|
|
||||||
The block above describes the infrastructure role 'dbuser' with password
|
The block above describes the infrastructure role 'dbuser' with password
|
||||||
|
|
@ -151,19 +163,19 @@ secret and a ConfigMap. The ConfigMap must have the same name as the secret.
|
||||||
The secret should contain an entry with 'rolename:rolepassword' for each role.
|
The secret should contain an entry with 'rolename:rolepassword' for each role.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
dbuser: c2VjcmV0
|
dbuser: c2VjcmV0
|
||||||
```
|
```
|
||||||
|
|
||||||
And the role description for that user should be specified in the ConfigMap.
|
And the role description for that user should be specified in the ConfigMap.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
data:
|
data:
|
||||||
dbuser: |
|
dbuser: |
|
||||||
inrole: [operator, admin] # following roles will be assigned to the new user
|
inrole: [operator, admin] # following roles will be assigned to the new user
|
||||||
user_flags:
|
user_flags:
|
||||||
- createdb
|
- createdb
|
||||||
db_parameters: # db parameters, applied for this particular user
|
db_parameters: # db parameters, applied for this particular user
|
||||||
log_statement: all
|
log_statement: all
|
||||||
```
|
```
|
||||||
|
|
||||||
One can allow membership in multiple roles via the `inrole` array parameter,
|
One can allow membership in multiple roles via the `inrole` array parameter,
|
||||||
|
|
@ -182,6 +194,50 @@ See [infrastructure roles secret](../manifests/infrastructure-roles.yaml)
|
||||||
and [infrastructure roles configmap](../manifests/infrastructure-roles-configmap.yaml)
|
and [infrastructure roles configmap](../manifests/infrastructure-roles-configmap.yaml)
|
||||||
for the examples.
|
for the examples.
|
||||||
|
|
||||||
|
### Teams API roles
|
||||||
|
|
||||||
|
These roles are meant for database activity of human users. It's possible to
|
||||||
|
configure the operator to automatically create database roles for lets say all
|
||||||
|
employees of one team. They are not listed in the manifest and there are no K8s
|
||||||
|
secrets created for them. Instead they would use an OAuth2 token to connect. To
|
||||||
|
get all members of the team the operator queries a defined API endpoint that
|
||||||
|
returns usernames. A minimal Teams API should work like this:
|
||||||
|
|
||||||
|
```
|
||||||
|
/.../<teamname> -> ["name","anothername"]
|
||||||
|
```
|
||||||
|
|
||||||
|
A ["fake" Teams API](../manifests/fake-teams-api.yaml) deployment is provided
|
||||||
|
in the manifests folder to set up a basic API around whatever services is used
|
||||||
|
for user management. The Teams API's URL is set in the operator's
|
||||||
|
[configuration](reference/operator_parameters.md#automatic-creation-of-human-users-in-the-database)
|
||||||
|
and `enable_teams_api` must be set to `true`. There are more settings available
|
||||||
|
to choose superusers, group roles, [PAM configuration](https://github.com/CyberDem0n/pam-oauth2)
|
||||||
|
etc. An OAuth2 token can be passed to the Teams API via a secret. The name for
|
||||||
|
this secret is configurable with the `oauth_token_secret_name` parameter.
|
||||||
|
|
||||||
|
## Resource definition
|
||||||
|
|
||||||
|
The compute resources to be used for the Postgres containers in the pods can be
|
||||||
|
specified in the postgresql cluster manifest.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
spec:
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 100Mi
|
||||||
|
limits:
|
||||||
|
cpu: 300m
|
||||||
|
memory: 300Mi
|
||||||
|
```
|
||||||
|
|
||||||
|
The minimum limit to properly run the `postgresql` resource is `256m` for `cpu`
|
||||||
|
and `256Mi` for `memory`. If a lower value is set in the manifest the operator
|
||||||
|
will cancel ADD or UPDATE events on this resource with an error. If no
|
||||||
|
resources are defined in the manifest the operator will obtain the configured
|
||||||
|
[default requests](reference/operator_parameters.md#kubernetes-resource-requests).
|
||||||
|
|
||||||
## Use taints and tolerations for dedicated PostgreSQL nodes
|
## Use taints and tolerations for dedicated PostgreSQL nodes
|
||||||
|
|
||||||
To ensure Postgres pods are running on nodes without any other application pods,
|
To ensure Postgres pods are running on nodes without any other application pods,
|
||||||
|
|
@ -189,12 +245,7 @@ you can use [taints and tolerations](https://kubernetes.io/docs/concepts/configu
|
||||||
and configure the required toleration in the manifest.
|
and configure the required toleration in the manifest.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: "acid.zalan.do/v1"
|
|
||||||
kind: postgresql
|
|
||||||
metadata:
|
|
||||||
name: acid-minimal-cluster
|
|
||||||
spec:
|
spec:
|
||||||
teamId: "ACID"
|
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: postgres
|
- key: postgres
|
||||||
operator: Exists
|
operator: Exists
|
||||||
|
|
@ -212,11 +263,6 @@ section in the spec. There are two options here:
|
||||||
### Clone directly
|
### Clone directly
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: "acid.zalan.do/v1"
|
|
||||||
kind: postgresql
|
|
||||||
|
|
||||||
metadata:
|
|
||||||
name: acid-test-cluster
|
|
||||||
spec:
|
spec:
|
||||||
clone:
|
clone:
|
||||||
cluster: "acid-batman"
|
cluster: "acid-batman"
|
||||||
|
|
@ -232,11 +278,6 @@ means that you can clone only from clusters within the same namespace.
|
||||||
### Clone from S3
|
### Clone from S3
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: "acid.zalan.do/v1"
|
|
||||||
kind: postgresql
|
|
||||||
|
|
||||||
metadata:
|
|
||||||
name: acid-test-cluster
|
|
||||||
spec:
|
spec:
|
||||||
clone:
|
clone:
|
||||||
uid: "efd12e58-5786-11e8-b5a7-06148230260c"
|
uid: "efd12e58-5786-11e8-b5a7-06148230260c"
|
||||||
|
|
@ -265,10 +306,6 @@ For non AWS S3 following settings can be set to support cloning from other S3
|
||||||
implementations:
|
implementations:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: "acid.zalan.do/v1"
|
|
||||||
kind: postgresql
|
|
||||||
metadata:
|
|
||||||
name: acid-test-cluster
|
|
||||||
spec:
|
spec:
|
||||||
clone:
|
clone:
|
||||||
uid: "efd12e58-5786-11e8-b5a7-06148230260c"
|
uid: "efd12e58-5786-11e8-b5a7-06148230260c"
|
||||||
|
|
@ -305,7 +342,7 @@ Things to note:
|
||||||
- There is no way to transform a non-standby cluster to a standby cluster
|
- There is no way to transform a non-standby cluster to a standby cluster
|
||||||
through the operator. Adding the standby section to the manifest of a running
|
through the operator. Adding the standby section to the manifest of a running
|
||||||
Postgres cluster will have no effect. However, it can be done through Patroni
|
Postgres cluster will have no effect. However, it can be done through Patroni
|
||||||
by adding the [standby_cluster] (https://github.com/zalando/patroni/blob/bd2c54581abb42a7d3a3da551edf0b8732eefd27/docs/replica_bootstrap.rst#standby-cluster)
|
by adding the [standby_cluster](https://github.com/zalando/patroni/blob/bd2c54581abb42a7d3a3da551edf0b8732eefd27/docs/replica_bootstrap.rst#standby-cluster)
|
||||||
section using `patronictl edit-config`. Note that the transformed standby
|
section using `patronictl edit-config`. Note that the transformed standby
|
||||||
cluster will not be doing any streaming. It will be in standby mode and allow
|
cluster will not be doing any streaming. It will be in standby mode and allow
|
||||||
read-only transactions only.
|
read-only transactions only.
|
||||||
|
|
@ -317,13 +354,7 @@ used for log aggregation, monitoring, backups or other tasks. A sidecar can be
|
||||||
specified like this:
|
specified like this:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: "acid.zalan.do/v1"
|
|
||||||
kind: postgresql
|
|
||||||
|
|
||||||
metadata:
|
|
||||||
name: acid-minimal-cluster
|
|
||||||
spec:
|
spec:
|
||||||
...
|
|
||||||
sidecars:
|
sidecars:
|
||||||
- name: "container-name"
|
- name: "container-name"
|
||||||
image: "company/image:tag"
|
image: "company/image:tag"
|
||||||
|
|
@ -350,6 +381,10 @@ variables are always passed to sidecars:
|
||||||
The PostgreSQL volume is shared with sidecars and is mounted at
|
The PostgreSQL volume is shared with sidecars and is mounted at
|
||||||
`/home/postgres/pgdata`.
|
`/home/postgres/pgdata`.
|
||||||
|
|
||||||
|
**Note**: The operator will not create a cluster if sidecar containers are
|
||||||
|
specified but globally disabled in the configuration. The `enable_sidecars`
|
||||||
|
option must be set to `true`.
|
||||||
|
|
||||||
## InitContainers Support
|
## InitContainers Support
|
||||||
|
|
||||||
Each cluster can specify arbitrary init containers to run. These containers can
|
Each cluster can specify arbitrary init containers to run. These containers can
|
||||||
|
|
@ -357,13 +392,7 @@ be used to run custom actions before any normal and sidecar containers start.
|
||||||
An init container can be specified like this:
|
An init container can be specified like this:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: "acid.zalan.do/v1"
|
|
||||||
kind: postgresql
|
|
||||||
|
|
||||||
metadata:
|
|
||||||
name: acid-minimal-cluster
|
|
||||||
spec:
|
spec:
|
||||||
...
|
|
||||||
initContainers:
|
initContainers:
|
||||||
- name: "container-name"
|
- name: "container-name"
|
||||||
image: "company/image:tag"
|
image: "company/image:tag"
|
||||||
|
|
@ -374,18 +403,17 @@ spec:
|
||||||
|
|
||||||
`initContainers` accepts full `v1.Container` definition.
|
`initContainers` accepts full `v1.Container` definition.
|
||||||
|
|
||||||
|
**Note**: The operator will not create a cluster if `initContainers` are
|
||||||
|
specified but globally disabled in the configuration. The
|
||||||
|
`enable_init_containers` option must be set to `true`.
|
||||||
|
|
||||||
## Increase volume size
|
## Increase volume size
|
||||||
|
|
||||||
PostgreSQL operator supports statefulset volume resize if you're using the
|
Postgres operator supports statefulset volume resize if you're using the
|
||||||
operator on top of AWS. For that you need to change the size field of the
|
operator on top of AWS. For that you need to change the size field of the
|
||||||
volume description in the cluster manifest and apply the change:
|
volume description in the cluster manifest and apply the change:
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
apiVersion: "acid.zalan.do/v1"
|
|
||||||
kind: postgresql
|
|
||||||
|
|
||||||
metadata:
|
|
||||||
name: acid-test-cluster
|
|
||||||
spec:
|
spec:
|
||||||
volume:
|
volume:
|
||||||
size: 5Gi # new volume size
|
size: 5Gi # new volume size
|
||||||
|
|
@ -414,7 +442,8 @@ size of volumes that correspond to the previously running pods is not changed.
|
||||||
You can enable logical backups from the cluster manifest by adding the following
|
You can enable logical backups from the cluster manifest by adding the following
|
||||||
parameter in the spec section:
|
parameter in the spec section:
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
|
spec:
|
||||||
enableLogicalBackup: true
|
enableLogicalBackup: true
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -179,20 +179,14 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
|
|
||||||
# update the cluster-wide image of the logical backup pod
|
# update the cluster-wide image of the logical backup pod
|
||||||
image = "test-image-name"
|
image = "test-image-name"
|
||||||
config_patch = {
|
patch_logical_backup_image = {
|
||||||
"configuration": {
|
"configuration": {
|
||||||
"logical_backup": {
|
"logical_backup": {
|
||||||
"logical_backup_docker_image": image,
|
"logical_backup_docker_image": image,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.update_config(patch_logical_backup_image)
|
||||||
"acid.zalan.do", "v1", "default", "operatorconfigurations", "postgresql-operator-default-configuration", config_patch)
|
|
||||||
|
|
||||||
operator_pod = k8s.api.core_v1.list_namespaced_pod(
|
|
||||||
'default', label_selector="name=postgres-operator").items[0].metadata.name
|
|
||||||
k8s.api.core_v1.delete_namespaced_pod(operator_pod, "default") # restart reloads the conf
|
|
||||||
k8s.wait_for_operator_pod_start()
|
|
||||||
|
|
||||||
jobs = k8s.get_logical_backup_job().items
|
jobs = k8s.get_logical_backup_job().items
|
||||||
actual_image = jobs[0].spec.job_template.spec.template.spec.containers[0].image
|
actual_image = jobs[0].spec.job_template.spec.template.spec.containers[0].image
|
||||||
|
|
@ -319,12 +313,21 @@ class K8s:
|
||||||
def wait_for_logical_backup_job_creation(self):
|
def wait_for_logical_backup_job_creation(self):
|
||||||
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
||||||
|
|
||||||
def apply_kustomization(self, path):
|
def update_config(self, patch):
|
||||||
subprocess.run(["kubectl", "apply", "-k", path])
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
|
"acid.zalan.do", "v1", "default", "operatorconfigurations", "postgresql-operator-default-configuration", patch)
|
||||||
|
|
||||||
|
operator_pod = self.api.core_v1.list_namespaced_pod(
|
||||||
|
'default', label_selector="name=postgres-operator").items[0].metadata.name
|
||||||
|
self.api.core_v1.delete_namespaced_pod(operator_pod, "default") # restart reloads the conf
|
||||||
|
self.wait_for_operator_pod_start()
|
||||||
|
|
||||||
def create_with_kubectl(self, path):
|
def create_with_kubectl(self, path):
|
||||||
subprocess.run(["kubectl", "create", "-f", path])
|
subprocess.run(["kubectl", "create", "-f", path])
|
||||||
|
|
||||||
|
def apply_kustomization(self, path):
|
||||||
|
subprocess.run(["kubectl", "apply", "-k", path])
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|
|
||||||
20
go.mod
20
go.mod
|
|
@ -3,23 +3,23 @@ module github.com/zalando/postgres-operator
|
||||||
go 1.12
|
go 1.12
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/aws/aws-sdk-go v1.25.1
|
github.com/aws/aws-sdk-go v1.25.44
|
||||||
github.com/emicklei/go-restful v2.9.6+incompatible // indirect
|
github.com/emicklei/go-restful v2.9.6+incompatible // indirect
|
||||||
github.com/evanphx/json-patch v4.5.0+incompatible // indirect
|
github.com/evanphx/json-patch v4.5.0+incompatible // indirect
|
||||||
github.com/googleapis/gnostic v0.3.0 // indirect
|
github.com/googleapis/gnostic v0.3.0 // indirect
|
||||||
github.com/imdario/mergo v0.3.7 // indirect
|
github.com/imdario/mergo v0.3.8 // indirect
|
||||||
github.com/lib/pq v1.2.0
|
github.com/lib/pq v1.2.0
|
||||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
|
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
|
||||||
github.com/sirupsen/logrus v1.4.2
|
github.com/sirupsen/logrus v1.4.2
|
||||||
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c // indirect
|
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect
|
||||||
golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933 // indirect
|
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect
|
||||||
golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 // indirect
|
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 // indirect
|
||||||
golang.org/x/tools v0.0.0-20191127201027-ecd32218bd7f // indirect
|
golang.org/x/tools v0.0.0-20191209225234-22774f7dae43 // indirect
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||||
gopkg.in/yaml.v2 v2.2.5
|
gopkg.in/yaml.v2 v2.2.4
|
||||||
k8s.io/api v0.0.0-20191121015604-11707872ac1c
|
k8s.io/api v0.0.0-20191121015604-11707872ac1c
|
||||||
k8s.io/apiextensions-apiserver v0.0.0-20191121021419-88daf26ec3b8
|
k8s.io/apiextensions-apiserver v0.0.0-20191204090421-cd61debedab5
|
||||||
k8s.io/apimachinery v0.0.0-20191121015412-41065c7a8c2a
|
k8s.io/apimachinery v0.0.0-20191203211716-adc6f4cd9e7d
|
||||||
k8s.io/client-go v11.0.0+incompatible
|
k8s.io/client-go v0.0.0-20191204082520-bc9b51d240b2
|
||||||
k8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e
|
k8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
#!/bin/bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
set -o nounset
|
set -o nounset
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
#!/bin/bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
set -o nounset
|
set -o nounset
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ spec:
|
||||||
- name: date
|
- name: date
|
||||||
image: busybox
|
image: busybox
|
||||||
command: [ "/bin/date" ]
|
command: [ "/bin/date" ]
|
||||||
teamId: "ACID"
|
teamId: "acid"
|
||||||
volume:
|
volume:
|
||||||
size: 1Gi
|
size: 1Gi
|
||||||
# storageClass: my-sc
|
# storageClass: my-sc
|
||||||
|
|
|
||||||
|
|
@ -23,11 +23,13 @@ data:
|
||||||
# enable_admin_role_for_users: "true"
|
# enable_admin_role_for_users: "true"
|
||||||
# enable_crd_validation: "true"
|
# enable_crd_validation: "true"
|
||||||
# enable_database_access: "true"
|
# enable_database_access: "true"
|
||||||
|
# enable_init_containers: "true"
|
||||||
enable_master_load_balancer: "false"
|
enable_master_load_balancer: "false"
|
||||||
# enable_pod_antiaffinity: "false"
|
# enable_pod_antiaffinity: "false"
|
||||||
# enable_pod_disruption_budget: "true"
|
# enable_pod_disruption_budget: "true"
|
||||||
enable_replica_load_balancer: "false"
|
enable_replica_load_balancer: "false"
|
||||||
# enable_shm_volume: "true"
|
# enable_shm_volume: "true"
|
||||||
|
# enable_sidecars: "true"
|
||||||
# enable_team_superuser: "false"
|
# enable_team_superuser: "false"
|
||||||
enable_teams_api: "false"
|
enable_teams_api: "false"
|
||||||
# etcd_host: ""
|
# etcd_host: ""
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ metadata:
|
||||||
name: acid-minimal-cluster
|
name: acid-minimal-cluster
|
||||||
namespace: default
|
namespace: default
|
||||||
spec:
|
spec:
|
||||||
teamId: "ACID"
|
teamId: "acid"
|
||||||
volume:
|
volume:
|
||||||
size: 1Gi
|
size: 1Gi
|
||||||
numberOfInstances: 2
|
numberOfInstances: 2
|
||||||
|
|
|
||||||
|
|
@ -83,10 +83,14 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
|
enable_init_containers:
|
||||||
|
type: boolean
|
||||||
enable_pod_antiaffinity:
|
enable_pod_antiaffinity:
|
||||||
type: boolean
|
type: boolean
|
||||||
enable_pod_disruption_budget:
|
enable_pod_disruption_budget:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enable_sidecars:
|
||||||
|
type: boolean
|
||||||
infrastructure_roles_secret_name:
|
infrastructure_roles_secret_name:
|
||||||
type: string
|
type: string
|
||||||
inherited_labels:
|
inherited_labels:
|
||||||
|
|
@ -274,3 +278,7 @@ spec:
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
scalyr_server_url:
|
scalyr_server_url:
|
||||||
type: string
|
type: string
|
||||||
|
status:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
|
|
||||||
|
|
@ -26,14 +26,16 @@ configuration:
|
||||||
# custom_pod_annotations:
|
# custom_pod_annotations:
|
||||||
# keya: valuea
|
# keya: valuea
|
||||||
# keyb: valueb
|
# keyb: valueb
|
||||||
|
enable_init_containers: true
|
||||||
enable_pod_antiaffinity: false
|
enable_pod_antiaffinity: false
|
||||||
enable_pod_disruption_budget: true
|
enable_pod_disruption_budget: true
|
||||||
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
enable_sidecars: true
|
||||||
|
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
||||||
# inherited_labels:
|
# inherited_labels:
|
||||||
# - application
|
# - application
|
||||||
# - environment
|
# - environment
|
||||||
# node_readiness_label:
|
# node_readiness_label:
|
||||||
# status: ready
|
# status: ready
|
||||||
oauth_token_secret_name: postgresql-operator
|
oauth_token_secret_name: postgresql-operator
|
||||||
pdb_name_format: "postgres-{cluster}-pdb"
|
pdb_name_format: "postgres-{cluster}-pdb"
|
||||||
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
|
|
|
||||||
|
|
@ -186,7 +186,7 @@ spec:
|
||||||
# only the format of the given number.
|
# only the format of the given number.
|
||||||
#
|
#
|
||||||
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
|
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
|
||||||
pattern: '^(\d+m|\d+\.\d{1,3})$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
# Note: the value specified here must not be zero or be lower
|
# Note: the value specified here must not be zero or be lower
|
||||||
# than the corresponding request.
|
# than the corresponding request.
|
||||||
memory:
|
memory:
|
||||||
|
|
@ -217,7 +217,7 @@ spec:
|
||||||
# only the format of the given number.
|
# only the format of the given number.
|
||||||
#
|
#
|
||||||
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
|
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
|
||||||
pattern: '^(\d+m|\d+\.\d{1,3})$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
# Note: the value specified here must not be zero or be higher
|
# Note: the value specified here must not be zero or be higher
|
||||||
# than the corresponding limit.
|
# than the corresponding limit.
|
||||||
memory:
|
memory:
|
||||||
|
|
@ -325,3 +325,7 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
subPath:
|
subPath:
|
||||||
type: string
|
type: string
|
||||||
|
status:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ metadata:
|
||||||
name: acid-standby-cluster
|
name: acid-standby-cluster
|
||||||
namespace: default
|
namespace: default
|
||||||
spec:
|
spec:
|
||||||
teamId: "ACID"
|
teamId: "acid"
|
||||||
volume:
|
volume:
|
||||||
size: 1Gi
|
size: 1Gi
|
||||||
numberOfInstances: 1
|
numberOfInstances: 1
|
||||||
|
|
|
||||||
|
|
@ -356,7 +356,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
||||||
"cpu": {
|
"cpu": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0",
|
Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0",
|
||||||
Pattern: "^(\\d+m|\\d+\\.\\d{1,3})$",
|
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||||
},
|
},
|
||||||
"memory": {
|
"memory": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
|
|
@ -372,7 +372,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
||||||
"cpu": {
|
"cpu": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0",
|
Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0",
|
||||||
Pattern: "^(\\d+m|\\d+\\.\\d{1,3})$",
|
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||||
},
|
},
|
||||||
"memory": {
|
"memory": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
|
|
@ -578,6 +578,14 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"status": {
|
||||||
|
Type: "object",
|
||||||
|
AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{
|
||||||
|
Schema: &apiextv1beta1.JSONSchemaProps{
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -686,12 +694,18 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"enable_init_containers": {
|
||||||
|
Type: "boolean",
|
||||||
|
},
|
||||||
"enable_pod_antiaffinity": {
|
"enable_pod_antiaffinity": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
},
|
},
|
||||||
"enable_pod_disruption_budget": {
|
"enable_pod_disruption_budget": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
},
|
},
|
||||||
|
"enable_sidecars": {
|
||||||
|
Type: "boolean",
|
||||||
|
},
|
||||||
"infrastructure_roles_secret_name": {
|
"infrastructure_roles_secret_name": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
},
|
},
|
||||||
|
|
@ -994,6 +1008,14 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"status": {
|
||||||
|
Type: "object",
|
||||||
|
AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{
|
||||||
|
Schema: &apiextv1beta1.JSONSchemaProps{
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -50,6 +50,8 @@ type KubernetesMetaConfiguration struct {
|
||||||
WatchedNamespace string `json:"watched_namespace,omitempty"`
|
WatchedNamespace string `json:"watched_namespace,omitempty"`
|
||||||
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
|
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
|
||||||
EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"`
|
EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"`
|
||||||
|
EnableInitContainers *bool `json:"enable_init_containers,omitempty"`
|
||||||
|
EnableSidecars *bool `json:"enable_sidecars,omitempty"`
|
||||||
SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"`
|
SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"`
|
||||||
ClusterDomain string `json:"cluster_domain"`
|
ClusterDomain string `json:"cluster_domain"`
|
||||||
OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"`
|
OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"`
|
||||||
|
|
|
||||||
|
|
@ -180,7 +180,7 @@ var unmarshalCluster = []struct {
|
||||||
"name": "acid-testcluster1"
|
"name": "acid-testcluster1"
|
||||||
},
|
},
|
||||||
"spec": {
|
"spec": {
|
||||||
"teamId": "ACID",
|
"teamId": "acid",
|
||||||
"pod_priority_class_name": "spilo-pod-priority",
|
"pod_priority_class_name": "spilo-pod-priority",
|
||||||
"volume": {
|
"volume": {
|
||||||
"size": "5Gi",
|
"size": "5Gi",
|
||||||
|
|
@ -290,7 +290,7 @@ var unmarshalCluster = []struct {
|
||||||
ResourceLimits: ResourceDescription{CPU: "300m", Memory: "3000Mi"},
|
ResourceLimits: ResourceDescription{CPU: "300m", Memory: "3000Mi"},
|
||||||
},
|
},
|
||||||
|
|
||||||
TeamID: "ACID",
|
TeamID: "acid",
|
||||||
AllowedSourceRanges: []string{"127.0.0.1/32"},
|
AllowedSourceRanges: []string{"127.0.0.1/32"},
|
||||||
NumberOfInstances: 2,
|
NumberOfInstances: 2,
|
||||||
Users: map[string]UserFlags{"zalando": {"superuser", "createdb"}},
|
Users: map[string]UserFlags{"zalando": {"superuser", "createdb"}},
|
||||||
|
|
@ -319,7 +319,7 @@ var unmarshalCluster = []struct {
|
||||||
},
|
},
|
||||||
Error: "",
|
Error: "",
|
||||||
},
|
},
|
||||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
||||||
err: nil},
|
err: nil},
|
||||||
// example with teamId set in input
|
// example with teamId set in input
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -81,6 +81,16 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
|
||||||
*out = new(bool)
|
*out = new(bool)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.EnableInitContainers != nil {
|
||||||
|
in, out := &in.EnableInitContainers, &out.EnableInitContainers
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.EnableSidecars != nil {
|
||||||
|
in, out := &in.EnableSidecars, &out.EnableSidecars
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
out.OAuthTokenSecretName = in.OAuthTokenSecretName
|
out.OAuthTokenSecretName = in.OAuthTokenSecretName
|
||||||
out.InfrastructureRolesSecretName = in.InfrastructureRolesSecretName
|
out.InfrastructureRolesSecretName = in.InfrastructureRolesSecretName
|
||||||
if in.ClusterLabels != nil {
|
if in.ClusterLabels != nil {
|
||||||
|
|
|
||||||
|
|
@ -227,6 +227,10 @@ func (c *Cluster) Create() error {
|
||||||
|
|
||||||
c.setStatus(acidv1.ClusterStatusCreating)
|
c.setStatus(acidv1.ClusterStatusCreating)
|
||||||
|
|
||||||
|
if err = c.validateResources(&c.Spec); err != nil {
|
||||||
|
return fmt.Errorf("insufficient resource limits specified: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
for _, role := range []PostgresRole{Master, Replica} {
|
for _, role := range []PostgresRole{Master, Replica} {
|
||||||
|
|
||||||
if c.Endpoints[role] != nil {
|
if c.Endpoints[role] != nil {
|
||||||
|
|
@ -491,6 +495,44 @@ func compareResourcesAssumeFirstNotNil(a *v1.ResourceRequirements, b *v1.Resourc
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) validateResources(spec *acidv1.PostgresSpec) error {
|
||||||
|
|
||||||
|
// setting limits too low can cause unnecessary evictions / OOM kills
|
||||||
|
const (
|
||||||
|
cpuMinLimit = "256m"
|
||||||
|
memoryMinLimit = "256Mi"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
isSmaller bool
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
cpuLimit := spec.Resources.ResourceLimits.CPU
|
||||||
|
if cpuLimit != "" {
|
||||||
|
isSmaller, err = util.IsSmallerQuantity(cpuLimit, cpuMinLimit)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error validating CPU limit: %v", err)
|
||||||
|
}
|
||||||
|
if isSmaller {
|
||||||
|
return fmt.Errorf("defined CPU limit %s is below required minimum %s to properly run postgresql resource", cpuLimit, cpuMinLimit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
memoryLimit := spec.Resources.ResourceLimits.Memory
|
||||||
|
if memoryLimit != "" {
|
||||||
|
isSmaller, err = util.IsSmallerQuantity(memoryLimit, memoryMinLimit)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error validating memory limit: %v", err)
|
||||||
|
}
|
||||||
|
if isSmaller {
|
||||||
|
return fmt.Errorf("defined memory limit %s is below required minimum %s to properly run postgresql resource", memoryLimit, memoryMinLimit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Update changes Kubernetes objects according to the new specification. Unlike the sync case, the missing object
|
// Update changes Kubernetes objects according to the new specification. Unlike the sync case, the missing object
|
||||||
// (i.e. service) is treated as an error
|
// (i.e. service) is treated as an error
|
||||||
// logical backup cron jobs are an exception: a user-initiated Update can enable a logical backup job
|
// logical backup cron jobs are an exception: a user-initiated Update can enable a logical backup job
|
||||||
|
|
@ -501,6 +543,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
oldStatus := c.Status
|
||||||
c.setStatus(acidv1.ClusterStatusUpdating)
|
c.setStatus(acidv1.ClusterStatusUpdating)
|
||||||
c.setSpec(newSpec)
|
c.setSpec(newSpec)
|
||||||
|
|
||||||
|
|
@ -512,6 +555,22 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
if err := c.validateResources(&newSpec.Spec); err != nil {
|
||||||
|
err = fmt.Errorf("insufficient resource limits specified: %v", err)
|
||||||
|
|
||||||
|
// cancel update only when (already too low) pod resources were edited
|
||||||
|
// if cluster was successfully running before the update, continue but log a warning
|
||||||
|
isCPULimitSmaller, err2 := util.IsSmallerQuantity(newSpec.Spec.Resources.ResourceLimits.CPU, oldSpec.Spec.Resources.ResourceLimits.CPU)
|
||||||
|
isMemoryLimitSmaller, err3 := util.IsSmallerQuantity(newSpec.Spec.Resources.ResourceLimits.Memory, oldSpec.Spec.Resources.ResourceLimits.Memory)
|
||||||
|
|
||||||
|
if oldStatus.Running() && !isCPULimitSmaller && !isMemoryLimitSmaller && err2 == nil && err3 == nil {
|
||||||
|
c.logger.Warning(err)
|
||||||
|
} else {
|
||||||
|
updateFailed = true
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if oldSpec.Spec.PgVersion != newSpec.Spec.PgVersion { // PG versions comparison
|
if oldSpec.Spec.PgVersion != newSpec.Spec.PgVersion { // PG versions comparison
|
||||||
c.logger.Warningf("postgresql version change(%q -> %q) has no effect", oldSpec.Spec.PgVersion, newSpec.Spec.PgVersion)
|
c.logger.Warningf("postgresql version change(%q -> %q) has no effect", oldSpec.Spec.PgVersion, newSpec.Spec.PgVersion)
|
||||||
//we need that hack to generate statefulset with the old version
|
//we need that hack to generate statefulset with the old version
|
||||||
|
|
|
||||||
|
|
@ -720,6 +720,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
|
initContainers []v1.Container
|
||||||
sidecarContainers []v1.Container
|
sidecarContainers []v1.Container
|
||||||
podTemplate *v1.PodTemplateSpec
|
podTemplate *v1.PodTemplateSpec
|
||||||
volumeClaimTemplate *v1.PersistentVolumeClaim
|
volumeClaimTemplate *v1.PersistentVolumeClaim
|
||||||
|
|
@ -740,7 +741,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
limit = c.OpConfig.DefaultMemoryLimit
|
limit = c.OpConfig.DefaultMemoryLimit
|
||||||
}
|
}
|
||||||
|
|
||||||
isSmaller, err := util.RequestIsSmallerThanLimit(request, limit)
|
isSmaller, err := util.IsSmallerQuantity(request, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -767,7 +768,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
limit = c.OpConfig.DefaultMemoryLimit
|
limit = c.OpConfig.DefaultMemoryLimit
|
||||||
}
|
}
|
||||||
|
|
||||||
isSmaller, err := util.RequestIsSmallerThanLimit(sidecarRequest, sidecarLimit)
|
isSmaller, err := util.IsSmallerQuantity(sidecarRequest, sidecarLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -786,6 +787,13 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
|
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if spec.InitContainers != nil && len(spec.InitContainers) > 0 {
|
||||||
|
if c.OpConfig.EnableInitContainers != nil && !(*c.OpConfig.EnableInitContainers) {
|
||||||
|
c.logger.Warningf("initContainers specified but disabled in configuration - next statefulset creation would fail")
|
||||||
|
}
|
||||||
|
initContainers = spec.InitContainers
|
||||||
|
}
|
||||||
|
|
||||||
customPodEnvVarsList := make([]v1.EnvVar, 0)
|
customPodEnvVarsList := make([]v1.EnvVar, 0)
|
||||||
|
|
||||||
if c.OpConfig.PodEnvironmentConfigMap != "" {
|
if c.OpConfig.PodEnvironmentConfigMap != "" {
|
||||||
|
|
@ -872,9 +880,14 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
}
|
}
|
||||||
|
|
||||||
// generate sidecar containers
|
// generate sidecar containers
|
||||||
if sidecarContainers, err = generateSidecarContainers(sideCars, volumeMounts, defaultResources,
|
if sideCars != nil && len(sideCars) > 0 {
|
||||||
c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger); err != nil {
|
if c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) {
|
||||||
return nil, fmt.Errorf("could not generate sidecar containers: %v", err)
|
c.logger.Warningf("sidecars specified but disabled in configuration - next statefulset creation would fail")
|
||||||
|
}
|
||||||
|
if sidecarContainers, err = generateSidecarContainers(sideCars, volumeMounts, defaultResources,
|
||||||
|
c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger); err != nil {
|
||||||
|
return nil, fmt.Errorf("could not generate sidecar containers: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
|
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
|
||||||
|
|
@ -894,7 +907,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
c.labelsSet(true),
|
c.labelsSet(true),
|
||||||
annotations,
|
annotations,
|
||||||
spiloContainer,
|
spiloContainer,
|
||||||
spec.InitContainers,
|
initContainers,
|
||||||
sidecarContainers,
|
sidecarContainers,
|
||||||
&tolerationSpec,
|
&tolerationSpec,
|
||||||
effectiveFSGroup,
|
effectiveFSGroup,
|
||||||
|
|
@ -1412,7 +1425,7 @@ func (c *Cluster) generatePodDisruptionBudget() *policybeta1.PodDisruptionBudget
|
||||||
pdbEnabled := c.OpConfig.EnablePodDisruptionBudget
|
pdbEnabled := c.OpConfig.EnablePodDisruptionBudget
|
||||||
|
|
||||||
// if PodDisruptionBudget is disabled or if there are no DB pods, set the budget to 0.
|
// if PodDisruptionBudget is disabled or if there are no DB pods, set the budget to 0.
|
||||||
if (pdbEnabled != nil && !*pdbEnabled) || c.Spec.NumberOfInstances <= 0 {
|
if (pdbEnabled != nil && !(*pdbEnabled)) || c.Spec.NumberOfInstances <= 0 {
|
||||||
minAvailable = intstr.FromInt(0)
|
minAvailable = intstr.FromInt(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@ package cluster
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -65,6 +65,17 @@ func (c *Cluster) listResources() error {
|
||||||
|
|
||||||
func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) {
|
func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) {
|
||||||
c.setProcessName("creating statefulset")
|
c.setProcessName("creating statefulset")
|
||||||
|
// check if it's allowed that spec contains initContainers
|
||||||
|
if c.Spec.InitContainers != nil && len(c.Spec.InitContainers) > 0 &&
|
||||||
|
c.OpConfig.EnableInitContainers != nil && !(*c.OpConfig.EnableInitContainers) {
|
||||||
|
return nil, fmt.Errorf("initContainers specified but disabled in configuration")
|
||||||
|
}
|
||||||
|
// check if it's allowed that spec contains sidecars
|
||||||
|
if c.Spec.Sidecars != nil && len(c.Spec.Sidecars) > 0 &&
|
||||||
|
c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) {
|
||||||
|
return nil, fmt.Errorf("sidecar containers specified but disabled in configuration")
|
||||||
|
}
|
||||||
|
|
||||||
statefulSetSpec, err := c.generateStatefulSet(&c.Spec)
|
statefulSetSpec, err := c.generateStatefulSet(&c.Spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not generate statefulset: %v", err)
|
return nil, fmt.Errorf("could not generate statefulset: %v", err)
|
||||||
|
|
|
||||||
|
|
@ -23,6 +23,7 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
oldStatus := c.Status
|
||||||
c.setSpec(newSpec)
|
c.setSpec(newSpec)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
|
@ -34,6 +35,16 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
if err = c.validateResources(&c.Spec); err != nil {
|
||||||
|
err = fmt.Errorf("insufficient resource limits specified: %v", err)
|
||||||
|
if oldStatus.Running() {
|
||||||
|
c.logger.Warning(err)
|
||||||
|
err = nil
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err = c.initUsers(); err != nil {
|
if err = c.initUsers(); err != nil {
|
||||||
err = fmt.Errorf("could not init users: %v", err)
|
err = fmt.Errorf("could not init users: %v", err)
|
||||||
return err
|
return err
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@ import (
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -111,7 +111,7 @@ func (c *Controller) initOperatorConfig() {
|
||||||
|
|
||||||
if c.opConfig.SetMemoryRequestToLimit {
|
if c.opConfig.SetMemoryRequestToLimit {
|
||||||
|
|
||||||
isSmaller, err := util.RequestIsSmallerThanLimit(c.opConfig.DefaultMemoryRequest, c.opConfig.DefaultMemoryLimit)
|
isSmaller, err := util.IsSmallerQuantity(c.opConfig.DefaultMemoryRequest, c.opConfig.DefaultMemoryLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
@ -120,7 +120,7 @@ func (c *Controller) initOperatorConfig() {
|
||||||
c.opConfig.DefaultMemoryRequest = c.opConfig.DefaultMemoryLimit
|
c.opConfig.DefaultMemoryRequest = c.opConfig.DefaultMemoryLimit
|
||||||
}
|
}
|
||||||
|
|
||||||
isSmaller, err = util.RequestIsSmallerThanLimit(c.opConfig.ScalyrMemoryRequest, c.opConfig.ScalyrMemoryLimit)
|
isSmaller, err = util.IsSmallerQuantity(c.opConfig.ScalyrMemoryRequest, c.opConfig.ScalyrMemoryLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -54,6 +54,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
|
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
|
||||||
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat
|
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat
|
||||||
result.EnablePodDisruptionBudget = fromCRD.Kubernetes.EnablePodDisruptionBudget
|
result.EnablePodDisruptionBudget = fromCRD.Kubernetes.EnablePodDisruptionBudget
|
||||||
|
result.EnableInitContainers = fromCRD.Kubernetes.EnableInitContainers
|
||||||
|
result.EnableSidecars = fromCRD.Kubernetes.EnableSidecars
|
||||||
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate
|
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate
|
||||||
result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName
|
result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName
|
||||||
result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName
|
result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,7 @@ import (
|
||||||
|
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -123,6 +123,8 @@ type Config struct {
|
||||||
ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"`
|
ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"`
|
||||||
PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"`
|
PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"`
|
||||||
EnablePodDisruptionBudget *bool `name:"enable_pod_disruption_budget" default:"true"`
|
EnablePodDisruptionBudget *bool `name:"enable_pod_disruption_budget" default:"true"`
|
||||||
|
EnableInitContainers *bool `name:"enable_init_containers" default:"true"`
|
||||||
|
EnableSidecars *bool `name:"enable_sidecars" default:"true"`
|
||||||
Workers uint32 `name:"workers" default:"4"`
|
Workers uint32 `name:"workers" default:"4"`
|
||||||
APIPort int `name:"api_port" default:"8080"`
|
APIPort int `name:"api_port" default:"8080"`
|
||||||
RingLogLines int `name:"ring_log_lines" default:"100"`
|
RingLogLines int `name:"ring_log_lines" default:"100"`
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ var teamsAPItc = []struct {
|
||||||
{`{
|
{`{
|
||||||
"dn": "cn=100100,ou=official,ou=foobar,dc=zalando,dc=net",
|
"dn": "cn=100100,ou=official,ou=foobar,dc=zalando,dc=net",
|
||||||
"id": "acid",
|
"id": "acid",
|
||||||
"id_name": "ACID",
|
"id_name": "acid",
|
||||||
"team_id": "111222",
|
"team_id": "111222",
|
||||||
"type": "official",
|
"type": "official",
|
||||||
"name": "Acid team name",
|
"name": "Acid team name",
|
||||||
|
|
@ -70,7 +70,7 @@ var teamsAPItc = []struct {
|
||||||
&Team{
|
&Team{
|
||||||
Dn: "cn=100100,ou=official,ou=foobar,dc=zalando,dc=net",
|
Dn: "cn=100100,ou=official,ou=foobar,dc=zalando,dc=net",
|
||||||
ID: "acid",
|
ID: "acid",
|
||||||
TeamName: "ACID",
|
TeamName: "acid",
|
||||||
TeamID: "111222",
|
TeamID: "111222",
|
||||||
Type: "official",
|
Type: "official",
|
||||||
FullName: "Acid team name",
|
FullName: "Acid team name",
|
||||||
|
|
|
||||||
|
|
@ -141,17 +141,17 @@ func Coalesce(val, defaultVal string) string {
|
||||||
return val
|
return val
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestIsSmallerThanLimit : ...
|
// IsSmallerQuantity : checks if first resource is of a smaller quantity than the second
|
||||||
func RequestIsSmallerThanLimit(requestStr, limitStr string) (bool, error) {
|
func IsSmallerQuantity(requestStr, limitStr string) (bool, error) {
|
||||||
|
|
||||||
request, err := resource.ParseQuantity(requestStr)
|
request, err := resource.ParseQuantity(requestStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("could not parse memory request %v : %v", requestStr, err)
|
return false, fmt.Errorf("could not parse request %v : %v", requestStr, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
limit, err2 := resource.ParseQuantity(limitStr)
|
limit, err2 := resource.ParseQuantity(limitStr)
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
return false, fmt.Errorf("could not parse memory limit %v : %v", limitStr, err2)
|
return false, fmt.Errorf("could not parse limit %v : %v", limitStr, err2)
|
||||||
}
|
}
|
||||||
|
|
||||||
return request.Cmp(limit) == -1, nil
|
return request.Cmp(limit) == -1, nil
|
||||||
|
|
|
||||||
|
|
@ -69,7 +69,7 @@ var substringMatch = []struct {
|
||||||
{regexp.MustCompile(`aaaa (\d+) bbbb`), "aaaa 123 bbbb", nil},
|
{regexp.MustCompile(`aaaa (\d+) bbbb`), "aaaa 123 bbbb", nil},
|
||||||
}
|
}
|
||||||
|
|
||||||
var requestIsSmallerThanLimitTests = []struct {
|
var requestIsSmallerQuantityTests = []struct {
|
||||||
request string
|
request string
|
||||||
limit string
|
limit string
|
||||||
out bool
|
out bool
|
||||||
|
|
@ -155,14 +155,14 @@ func TestMapContains(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRequestIsSmallerThanLimit(t *testing.T) {
|
func TestIsSmallerQuantity(t *testing.T) {
|
||||||
for _, tt := range requestIsSmallerThanLimitTests {
|
for _, tt := range requestIsSmallerQuantityTests {
|
||||||
res, err := RequestIsSmallerThanLimit(tt.request, tt.limit)
|
res, err := IsSmallerQuantity(tt.request, tt.limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("RequestIsSmallerThanLimit returned unexpected error: %#v", err)
|
t.Errorf("IsSmallerQuantity returned unexpected error: %#v", err)
|
||||||
}
|
}
|
||||||
if res != tt.out {
|
if res != tt.out {
|
||||||
t.Errorf("RequestIsSmallerThanLimit expected: %#v, got: %#v", tt.out, res)
|
t.Errorf("IsSmallerQuantity expected: %#v, got: %#v", tt.out, res)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue