merge with master
This commit is contained in:
commit
f9b8b6374f
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
name: postgres-operator-ui
|
||||
version: 0.1.0
|
||||
appVersion: 1.3.0
|
||||
version: 1.4.0
|
||||
appVersion: 1.4.0
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,29 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator-ui:
|
||||
- apiVersion: v1
|
||||
appVersion: 1.4.0
|
||||
created: "2020-02-24T15:32:47.610967635+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 00e0eff7056d56467cd5c975657fbb76c8d01accd25a4b7aca81bc42aeac961d
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
- email: sk@sik-net.de
|
||||
name: siku4
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.4.0.tgz
|
||||
version: 1.4.0
|
||||
generated: "2020-02-24T15:32:47.610348278+01:00"
|
||||
Binary file not shown.
|
|
@ -8,7 +8,7 @@ replicaCount: 1
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator-ui
|
||||
tag: v1.2.0
|
||||
tag: v1.4.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
rbac:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
name: postgres-operator
|
||||
version: 1.3.0
|
||||
appVersion: 1.3.0
|
||||
version: 1.4.0
|
||||
appVersion: 1.4.0
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -1,9 +1,31 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator:
|
||||
- apiVersion: v1
|
||||
appVersion: 1.4.0
|
||||
created: "2020-02-20T17:39:25.443276193+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: b93ccde5581deb8ed0857136b8ce74ca3f1b7240438fa4415f705764a1300bed
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.4.0.tgz
|
||||
version: 1.4.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.3.0
|
||||
created: "2019-12-17T12:58:49.477140129+01:00"
|
||||
created: "2020-02-20T17:39:25.441532163+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 7e788fd37daec76a01f6d6f9fe5be5b54f5035e4eba0041e80a760d656537325
|
||||
|
|
@ -25,7 +47,7 @@ entries:
|
|||
version: 1.3.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.2.0
|
||||
created: "2019-12-17T12:58:49.475844233+01:00"
|
||||
created: "2020-02-20T17:39:25.440278302+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: d10710c7cf19f4e266e7704f5d1e98dcfc61bee3919522326c35c22ca7d2f2bf
|
||||
|
|
@ -47,4 +69,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-1.2.0.tgz
|
||||
version: 1.2.0
|
||||
generated: "2019-12-17T12:58:49.474719294+01:00"
|
||||
generated: "2020-02-20T17:39:25.439168098+01:00"
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -63,9 +63,9 @@ rules:
|
|||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- get
|
||||
- update
|
||||
# to check nodes for node readiness label
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
|
@ -102,9 +102,9 @@ rules:
|
|||
- delete
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
# to resize the filesystem in Spilo pods when increasing volume size
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator
|
||||
tag: v1.3.1
|
||||
tag: v1.4.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -100,8 +100,14 @@ configKubernetes:
|
|||
pod_management_policy: "ordered_ready"
|
||||
# label assigned to the Postgres pods (and services/endpoints)
|
||||
pod_role_label: spilo-role
|
||||
# service account definition as JSON/YAML string to be used by postgres cluster pods
|
||||
# pod_service_account_definition: ""
|
||||
|
||||
# name of service account to be used by postgres cluster pods
|
||||
pod_service_account_name: "postgres-pod"
|
||||
# role binding definition as JSON/YAML string to be used by pod service account
|
||||
# pod_service_account_role_binding_definition: ""
|
||||
|
||||
# Postgres pods are terminated forcefully after this timeout
|
||||
pod_terminate_grace_period: 5m
|
||||
# template for database user secrets generated by the operator
|
||||
|
|
@ -212,7 +218,7 @@ configLogicalBackup:
|
|||
logical_backup_s3_endpoint: ""
|
||||
# S3 Secret Access Key
|
||||
logical_backup_s3_secret_access_key: ""
|
||||
# S3 server side encription
|
||||
# S3 server side encryption
|
||||
logical_backup_s3_sse: "AES256"
|
||||
# backup schedule in the cron format
|
||||
logical_backup_schedule: "30 00 * * *"
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator
|
||||
tag: v1.3.1
|
||||
tag: v1.4.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -93,8 +93,14 @@ configKubernetes:
|
|||
pod_management_policy: "ordered_ready"
|
||||
# label assigned to the Postgres pods (and services/endpoints)
|
||||
pod_role_label: spilo-role
|
||||
# service account definition as JSON/YAML string to be used by postgres cluster pods
|
||||
# pod_service_account_definition: ""
|
||||
|
||||
# name of service account to be used by postgres cluster pods
|
||||
pod_service_account_name: "postgres-pod"
|
||||
# role binding definition as JSON/YAML string to be used by pod service account
|
||||
# pod_service_account_role_binding_definition: ""
|
||||
|
||||
# Postgres pods are terminated forcefully after this timeout
|
||||
pod_terminate_grace_period: 5m
|
||||
# template for database user secrets generated by the operator
|
||||
|
|
@ -203,7 +209,7 @@ configLogicalBackup:
|
|||
logical_backup_s3_endpoint: ""
|
||||
# S3 Secret Access Key
|
||||
logical_backup_s3_secret_access_key: ""
|
||||
# S3 server side encription
|
||||
# S3 server side encryption
|
||||
logical_backup_s3_sse: "AES256"
|
||||
# backup schedule in the cron format
|
||||
logical_backup_schedule: "30 00 * * *"
|
||||
|
|
|
|||
|
|
@ -66,20 +66,13 @@ pipeline:
|
|||
- desc: 'Build and push Docker image'
|
||||
cmd: |
|
||||
cd ui
|
||||
image_base='registry-write.opensource.zalan.do/acid/postgres-operator-ui'
|
||||
if [[ "${CDP_TARGET_BRANCH}" == 'master' && -z "${CDP_PULL_REQUEST_NUMBER}" ]]
|
||||
IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"}
|
||||
if [[ ${CDP_TARGET_BRANCH} == "master" && ${IS_PR_BUILD} != "true" ]]
|
||||
then
|
||||
image="${image_base}"
|
||||
IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-ui
|
||||
else
|
||||
image="${image_base}-test"
|
||||
IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-ui-test
|
||||
fi
|
||||
image_with_tag="${image}:c${CDP_BUILD_VERSION}"
|
||||
|
||||
if docker pull "${image}"
|
||||
then
|
||||
docker build --cache-from="${image}" -t "${image_with_tag}" .
|
||||
else
|
||||
docker build -t "${image_with_tag}" .
|
||||
fi
|
||||
|
||||
docker push "${image_with_tag}"
|
||||
export IMAGE
|
||||
make docker
|
||||
make push
|
||||
|
|
|
|||
|
|
@ -11,11 +11,11 @@ switchover (planned failover) of the master to the Pod with new minor version.
|
|||
The switch should usually take less than 5 seconds, still clients have to
|
||||
reconnect.
|
||||
|
||||
Major version upgrades are supported via [cloning](user.md#clone-directly). The
|
||||
new cluster manifest must have a higher `version` string than the source cluster
|
||||
and will be created from a basebackup. Depending of the cluster size, downtime
|
||||
in this case can be significant as writes to the database should be stopped and
|
||||
all WAL files should be archived first before cloning is started.
|
||||
Major version upgrades are supported via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster).
|
||||
The new cluster manifest must have a higher `version` string than the source
|
||||
cluster and will be created from a basebackup. Depending of the cluster size,
|
||||
downtime in this case can be significant as writes to the database should be
|
||||
stopped and all WAL files should be archived first before cloning is started.
|
||||
|
||||
Note, that simply changing the version string in the `postgresql` manifest does
|
||||
not work at present and leads to errors. Neither Patroni nor Postgres Operator
|
||||
|
|
|
|||
|
|
@ -359,3 +359,24 @@ CPU and memory limits for the sidecar container.
|
|||
* **memory**
|
||||
memory limits for the sidecar container. Optional, overrides the
|
||||
`default_memory_limits` operator configuration parameter. Optional.
|
||||
|
||||
## Custom TLS certificates
|
||||
|
||||
Those parameters are grouped under the `tls` top-level key.
|
||||
|
||||
* **secretName**
|
||||
By setting the `secretName` value, the cluster will switch to load the given
|
||||
Kubernetes Secret into the container as a volume and uses that as the
|
||||
certificate instead. It is up to the user to create and manage the
|
||||
Kubernetes Secret either by hand or using a tool like the CertManager
|
||||
operator.
|
||||
|
||||
* **certificateFile**
|
||||
Filename of the certificate. Defaults to "tls.crt".
|
||||
|
||||
* **privateKeyFile**
|
||||
Filename of the private key. Defaults to "tls.key".
|
||||
|
||||
* **caFile**
|
||||
Optional filename to the CA certificate. Useful when the client connects
|
||||
with `sslmode=verify-ca` or `sslmode=verify-full`.
|
||||
|
|
|
|||
|
|
@ -110,8 +110,10 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
|
||||
* **min_instances**
|
||||
operator will run at least the number of instances for any given Postgres
|
||||
cluster equal to the value of this parameter. When `-1` is specified, no
|
||||
limits are applied. The default is `-1`.
|
||||
cluster equal to the value of this parameter. Standby clusters can still run
|
||||
with `numberOfInstances: 1` as this is the [recommended setup](../user.md#setting-up-a-standby-cluster).
|
||||
When `-1` is specified for `min_instances`, no limits are applied. The default
|
||||
is `-1`.
|
||||
|
||||
* **resync_period**
|
||||
period between consecutive sync requests. The default is `30m`.
|
||||
|
|
@ -282,7 +284,7 @@ configuration they are grouped under the `kubernetes` key.
|
|||
used for AWS volume resizing and not required if you don't need that
|
||||
capability. The default is `false`.
|
||||
|
||||
* **master_pod_move_timeout**
|
||||
* **master_pod_move_timeout**
|
||||
The period of time to wait for the success of migration of master pods from
|
||||
an unschedulable node. The migration includes Patroni switchovers to
|
||||
respective replicas on healthy nodes. The situation where master pods still
|
||||
|
|
@ -470,7 +472,7 @@ grouped under the `logical_backup` key.
|
|||
When using non-AWS S3 storage, endpoint can be set as a ENV variable. The default is empty.
|
||||
|
||||
* **logical_backup_s3_sse**
|
||||
Specify server side encription that S3 storage is using. If empty string
|
||||
Specify server side encryption that S3 storage is using. If empty string
|
||||
is specified, no argument will be passed to `aws s3` command. Default: "AES256".
|
||||
|
||||
* **logical_backup_s3_access_key_id**
|
||||
|
|
|
|||
184
docs/user.md
184
docs/user.md
|
|
@ -254,29 +254,22 @@ spec:
|
|||
|
||||
## How to clone an existing PostgreSQL cluster
|
||||
|
||||
You can spin up a new cluster as a clone of the existing one, using a clone
|
||||
You can spin up a new cluster as a clone of the existing one, using a `clone`
|
||||
section in the spec. There are two options here:
|
||||
|
||||
* Clone directly from a source cluster using `pg_basebackup`
|
||||
* Clone from an S3 bucket
|
||||
* Clone from an S3 bucket (recommended)
|
||||
* Clone directly from a source cluster
|
||||
|
||||
### Clone directly
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
clone:
|
||||
cluster: "acid-batman"
|
||||
```
|
||||
|
||||
Here `cluster` is a name of a source cluster that is going to be cloned. The
|
||||
cluster to clone is assumed to be running and the clone procedure invokes
|
||||
`pg_basebackup` from it. The operator will setup the cluster to be cloned to
|
||||
connect to the service of the source cluster by name (if the cluster is called
|
||||
test, then the connection string will look like host=test port=5432), which
|
||||
means that you can clone only from clusters within the same namespace.
|
||||
Note, that cloning can also be used for [major version upgrades](administrator.md#minor-and-major-version-upgrade)
|
||||
of PostgreSQL.
|
||||
|
||||
### Clone from S3
|
||||
|
||||
Cloning from S3 has the advantage that there is no impact on your production
|
||||
database. A new Postgres cluster is created by restoring the data of another
|
||||
source cluster. If you create it in the same Kubernetes environment, use a
|
||||
different name.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
clone:
|
||||
|
|
@ -287,7 +280,8 @@ spec:
|
|||
|
||||
Here `cluster` is a name of a source cluster that is going to be cloned. A new
|
||||
cluster will be cloned from S3, using the latest backup before the `timestamp`.
|
||||
In this case, `uid` field is also mandatory - operator will use it to find a
|
||||
Note, that a time zone is required for `timestamp` in the format of +00:00 which
|
||||
is UTC. The `uid` field is also mandatory. The operator will use it to find a
|
||||
correct key inside an S3 bucket. You can find this field in the metadata of the
|
||||
source cluster:
|
||||
|
||||
|
|
@ -299,9 +293,6 @@ metadata:
|
|||
uid: efd12e58-5786-11e8-b5a7-06148230260c
|
||||
```
|
||||
|
||||
Note that timezone is required for `timestamp`. Otherwise, offset is relative
|
||||
to UTC, see [RFC 3339 section 5.6) 3339 section 5.6](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
|
||||
For non AWS S3 following settings can be set to support cloning from other S3
|
||||
implementations:
|
||||
|
||||
|
|
@ -317,14 +308,35 @@ spec:
|
|||
s3_force_path_style: true
|
||||
```
|
||||
|
||||
### Clone directly
|
||||
|
||||
Another way to get a fresh copy of your source DB cluster is via basebackup. To
|
||||
use this feature simply leave out the timestamp field from the clone section.
|
||||
The operator will connect to the service of the source cluster by name. If the
|
||||
cluster is called test, then the connection string will look like host=test
|
||||
port=5432), which means that you can clone only from clusters within the same
|
||||
namespace.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
clone:
|
||||
cluster: "acid-batman"
|
||||
```
|
||||
|
||||
Be aware that on a busy source database this can result in an elevated load!
|
||||
|
||||
## Setting up a standby cluster
|
||||
|
||||
Standby clusters are like normal cluster but they are streaming from a remote
|
||||
cluster. As the first version of this feature, the only scenario covered by
|
||||
operator is to stream from a WAL archive of the master. Following the more
|
||||
popular infrastructure of using Amazon's S3 buckets, it is mentioned as
|
||||
`s3_wal_path` here. To start a cluster as standby add the following `standby`
|
||||
section in the YAML file:
|
||||
Standby cluster is a [Patroni feature](https://github.com/zalando/patroni/blob/master/docs/replica_bootstrap.rst#standby-cluster)
|
||||
that first clones a database, and keeps replicating changes afterwards. As the
|
||||
replication is happening by the means of archived WAL files (stored on S3 or
|
||||
the equivalent of other cloud providers), the standby cluster can exist in a
|
||||
different location than its source database. Unlike cloning, the PostgreSQL
|
||||
version between source and target cluster has to be the same.
|
||||
|
||||
To start a cluster as standby, add the following `standby` section in the YAML
|
||||
file and specify the S3 bucket path. An empty path will result in an error and
|
||||
no statefulset will be created.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
|
|
@ -332,20 +344,65 @@ spec:
|
|||
s3_wal_path: "s3 bucket path to the master"
|
||||
```
|
||||
|
||||
Things to note:
|
||||
At the moment, the operator only allows to stream from the WAL archive of the
|
||||
master. Thus, it is recommended to deploy standby clusters with only [one pod](../manifests/standby-manifest.yaml#L10).
|
||||
You can raise the instance count when detaching. Note, that the same pod role
|
||||
labels like for normal clusters are used: The standby leader is labeled as
|
||||
`master`.
|
||||
|
||||
- An empty string in the `s3_wal_path` field of the standby cluster will result
|
||||
in an error and no statefulset will be created.
|
||||
- Only one pod can be deployed for stand-by cluster.
|
||||
- To manually promote the standby_cluster, use `patronictl` and remove config
|
||||
entry.
|
||||
- There is no way to transform a non-standby cluster to a standby cluster
|
||||
through the operator. Adding the standby section to the manifest of a running
|
||||
Postgres cluster will have no effect. However, it can be done through Patroni
|
||||
by adding the [standby_cluster](https://github.com/zalando/patroni/blob/bd2c54581abb42a7d3a3da551edf0b8732eefd27/docs/replica_bootstrap.rst#standby-cluster)
|
||||
section using `patronictl edit-config`. Note that the transformed standby
|
||||
cluster will not be doing any streaming. It will be in standby mode and allow
|
||||
read-only transactions only.
|
||||
### Providing credentials of source cluster
|
||||
|
||||
A standby cluster is replicating the data (including users and passwords) from
|
||||
the source database and is read-only. The system and application users (like
|
||||
standby, postgres etc.) all have a password that does not match the credentials
|
||||
stored in secrets which are created by the operator. One solution is to create
|
||||
secrets beforehand and paste in the credentials of the source cluster.
|
||||
Otherwise, you will see errors in the Postgres logs saying users cannot log in
|
||||
and the operator logs will complain about not being able to sync resources.
|
||||
|
||||
When you only run a standby leader, you can safely ignore this, as it will be
|
||||
sorted out once the cluster is detached from the source. It is also harmless if
|
||||
you don’t plan it. But, when you created a standby replica, too, fix the
|
||||
credentials right away. WAL files will pile up on the standby leader if no
|
||||
connection can be established between standby replica(s). You can also edit the
|
||||
secrets after their creation. Find them by:
|
||||
|
||||
```bash
|
||||
kubectl get secrets --all-namespaces | grep <standby-cluster-name>
|
||||
```
|
||||
|
||||
### Promote the standby
|
||||
|
||||
One big advantage of standby clusters is that they can be promoted to a proper
|
||||
database cluster. This means it will stop replicating changes from the source,
|
||||
and start accept writes itself. This mechanism makes it possible to move
|
||||
databases from one place to another with minimal downtime. Currently, the
|
||||
operator does not support promoting a standby cluster. It has to be done
|
||||
manually using `patronictl edit-config` inside the postgres container of the
|
||||
standby leader pod. Remove the following lines from the YAML structure and the
|
||||
leader promotion happens immediately. Before doing so, make sure that the
|
||||
standby is not behind the source database.
|
||||
|
||||
```yaml
|
||||
standby_cluster:
|
||||
create_replica_methods:
|
||||
- bootstrap_standby_with_wale
|
||||
- basebackup_fast_xlog
|
||||
restore_command: envdir "/home/postgres/etc/wal-e.d/env-standby" /scripts/restore_command.sh
|
||||
"%f" "%p"
|
||||
```
|
||||
|
||||
Finally, remove the `standby` section from the postgres cluster manifest.
|
||||
|
||||
### Turn a normal cluster into a standby
|
||||
|
||||
There is no way to transform a non-standby cluster to a standby cluster through
|
||||
the operator. Adding the `standby` section to the manifest of a running
|
||||
Postgres cluster will have no effect. But, as explained in the previous
|
||||
paragraph it can be done manually through `patronictl edit-config`. This time,
|
||||
by adding the `standby_cluster` section to the Patroni configuration. However,
|
||||
the transformed standby cluster will not be doing any streaming. It will be in
|
||||
standby mode and allow read-only transactions only.
|
||||
|
||||
## Sidecar Support
|
||||
|
||||
|
|
@ -454,3 +511,50 @@ monitoring is outside the scope of operator responsibilities. See
|
|||
[configuration reference](reference/cluster_manifest.md) and
|
||||
[administrator documentation](administrator.md) for details on how backups are
|
||||
executed.
|
||||
|
||||
## Custom TLS certificates
|
||||
|
||||
By default, the spilo image generates its own TLS certificate during startup.
|
||||
This certificate is not secure since it cannot be verified and thus doesn't
|
||||
protect from active MITM attacks. In this section we show how a Kubernete
|
||||
Secret resources can be loaded with a custom TLS certificate.
|
||||
|
||||
Before applying these changes, the operator must also be configured with the
|
||||
`spilo_fsgroup` set to the GID matching the postgres user group. If the value
|
||||
is not provided, the cluster will default to `103` which is the GID from the
|
||||
default spilo image.
|
||||
|
||||
Upload the cert as a kubernetes secret:
|
||||
```sh
|
||||
kubectl create secret tls pg-tls \
|
||||
--key pg-tls.key \
|
||||
--cert pg-tls.crt
|
||||
```
|
||||
|
||||
Or with a CA:
|
||||
```sh
|
||||
kubectl create secret generic pg-tls \
|
||||
--from-file=tls.crt=server.crt \
|
||||
--from-file=tls.key=server.key \
|
||||
--from-file=ca.crt=ca.crt
|
||||
```
|
||||
|
||||
Alternatively it is also possible to use
|
||||
[cert-manager](https://cert-manager.io/docs/) to generate these secrets.
|
||||
|
||||
Then configure the postgres resource with the TLS secret:
|
||||
|
||||
```yaml
|
||||
apiVersion: "acid.zalan.do/v1"
|
||||
kind: postgresql
|
||||
|
||||
metadata:
|
||||
name: acid-test-cluster
|
||||
spec:
|
||||
tls:
|
||||
secretName: "pg-tls"
|
||||
caFile: "ca.crt" # add this if the secret is configured with a CA
|
||||
```
|
||||
|
||||
Certificate rotation is handled in the spilo image which checks every 5
|
||||
minutes if the certificates have changed and reloads postgres accordingly.
|
||||
|
|
|
|||
|
|
@ -57,6 +57,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
|
||||
k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml")
|
||||
k8s.wait_for_pod_start('spilo-role=master')
|
||||
k8s.wait_for_pod_start('spilo-role=replica')
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_enable_load_balancer(self):
|
||||
|
|
@ -107,141 +108,6 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.assertEqual(repl_svc_type, 'ClusterIP',
|
||||
"Expected ClusterIP service type for replica, found {}".format(repl_svc_type))
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_min_resource_limits(self):
|
||||
'''
|
||||
Lower resource limits below configured minimum and let operator fix it
|
||||
'''
|
||||
k8s = self.k8s
|
||||
cluster_label = 'cluster-name=acid-minimal-cluster'
|
||||
_, failover_targets = k8s.get_pg_nodes(cluster_label)
|
||||
|
||||
# configure minimum boundaries for CPU and memory limits
|
||||
minCPULimit = '500m'
|
||||
minMemoryLimit = '500Mi'
|
||||
patch_min_resource_limits = {
|
||||
"data": {
|
||||
"min_cpu_limit": minCPULimit,
|
||||
"min_memory_limit": minMemoryLimit
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_min_resource_limits)
|
||||
|
||||
# lower resource limits below minimum
|
||||
pg_patch_resources = {
|
||||
"spec": {
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "10m",
|
||||
"memory": "50Mi"
|
||||
},
|
||||
"limits": {
|
||||
"cpu": "200m",
|
||||
"memory": "200Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
|
||||
k8s.wait_for_master_failover(failover_targets)
|
||||
|
||||
pods = k8s.api.core_v1.list_namespaced_pod(
|
||||
'default', label_selector='spilo-role=master,' + cluster_label).items
|
||||
self.assert_master_is_unique()
|
||||
masterPod = pods[0]
|
||||
|
||||
self.assertEqual(masterPod.spec.containers[0].resources.limits['cpu'], minCPULimit,
|
||||
"Expected CPU limit {}, found {}"
|
||||
.format(minCPULimit, masterPod.spec.containers[0].resources.limits['cpu']))
|
||||
self.assertEqual(masterPod.spec.containers[0].resources.limits['memory'], minMemoryLimit,
|
||||
"Expected memory limit {}, found {}"
|
||||
.format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory']))
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_multi_namespace_support(self):
|
||||
'''
|
||||
Create a customized Postgres cluster in a non-default namespace.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
|
||||
with open("manifests/complete-postgres-manifest.yaml", 'r+') as f:
|
||||
pg_manifest = yaml.safe_load(f)
|
||||
pg_manifest["metadata"]["namespace"] = self.namespace
|
||||
yaml.dump(pg_manifest, f, Dumper=yaml.Dumper)
|
||||
|
||||
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
|
||||
k8s.wait_for_pod_start("spilo-role=master", self.namespace)
|
||||
self.assert_master_is_unique(self.namespace, "acid-test-cluster")
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_scaling(self):
|
||||
'''
|
||||
Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
labels = "cluster-name=acid-minimal-cluster"
|
||||
|
||||
k8s.wait_for_pg_to_scale(3)
|
||||
self.assertEqual(3, k8s.count_pods_with_label(labels))
|
||||
self.assert_master_is_unique()
|
||||
|
||||
k8s.wait_for_pg_to_scale(2)
|
||||
self.assertEqual(2, k8s.count_pods_with_label(labels))
|
||||
self.assert_master_is_unique()
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_taint_based_eviction(self):
|
||||
'''
|
||||
Add taint "postgres=:NoExecute" to node with master. This must cause a failover.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
cluster_label = 'cluster-name=acid-minimal-cluster'
|
||||
|
||||
# get nodes of master and replica(s) (expected target of new master)
|
||||
current_master_node, failover_targets = k8s.get_pg_nodes(cluster_label)
|
||||
num_replicas = len(failover_targets)
|
||||
|
||||
# if all pods live on the same node, failover will happen to other worker(s)
|
||||
failover_targets = [x for x in failover_targets if x != current_master_node]
|
||||
if len(failover_targets) == 0:
|
||||
nodes = k8s.api.core_v1.list_node()
|
||||
for n in nodes.items:
|
||||
if "node-role.kubernetes.io/master" not in n.metadata.labels and n.metadata.name != current_master_node:
|
||||
failover_targets.append(n.metadata.name)
|
||||
|
||||
# taint node with postgres=:NoExecute to force failover
|
||||
body = {
|
||||
"spec": {
|
||||
"taints": [
|
||||
{
|
||||
"effect": "NoExecute",
|
||||
"key": "postgres"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# patch node and test if master is failing over to one of the expected nodes
|
||||
k8s.api.core_v1.patch_node(current_master_node, body)
|
||||
k8s.wait_for_master_failover(failover_targets)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
|
||||
new_master_node, new_replica_nodes = k8s.get_pg_nodes(cluster_label)
|
||||
self.assertNotEqual(current_master_node, new_master_node,
|
||||
"Master on {} did not fail over to one of {}".format(current_master_node, failover_targets))
|
||||
self.assertEqual(num_replicas, len(new_replica_nodes),
|
||||
"Expected {} replicas, found {}".format(num_replicas, len(new_replica_nodes)))
|
||||
self.assert_master_is_unique()
|
||||
|
||||
# undo the tainting
|
||||
body = {
|
||||
"spec": {
|
||||
"taints": []
|
||||
}
|
||||
}
|
||||
k8s.api.core_v1.patch_node(new_master_node, body)
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_logical_backup_cron_job(self):
|
||||
'''
|
||||
|
|
@ -306,6 +172,133 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.assertEqual(0, len(jobs),
|
||||
"Expected 0 logical backup jobs, found {}".format(len(jobs)))
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_min_resource_limits(self):
|
||||
'''
|
||||
Lower resource limits below configured minimum and let operator fix it
|
||||
'''
|
||||
k8s = self.k8s
|
||||
cluster_label = 'cluster-name=acid-minimal-cluster'
|
||||
labels = 'spilo-role=master,' + cluster_label
|
||||
_, failover_targets = k8s.get_pg_nodes(cluster_label)
|
||||
|
||||
# configure minimum boundaries for CPU and memory limits
|
||||
minCPULimit = '500m'
|
||||
minMemoryLimit = '500Mi'
|
||||
patch_min_resource_limits = {
|
||||
"data": {
|
||||
"min_cpu_limit": minCPULimit,
|
||||
"min_memory_limit": minMemoryLimit
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_min_resource_limits)
|
||||
|
||||
# lower resource limits below minimum
|
||||
pg_patch_resources = {
|
||||
"spec": {
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "10m",
|
||||
"memory": "50Mi"
|
||||
},
|
||||
"limits": {
|
||||
"cpu": "200m",
|
||||
"memory": "200Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
|
||||
k8s.wait_for_pod_failover(failover_targets, labels)
|
||||
k8s.wait_for_pod_start('spilo-role=replica')
|
||||
|
||||
pods = k8s.api.core_v1.list_namespaced_pod(
|
||||
'default', label_selector=labels).items
|
||||
self.assert_master_is_unique()
|
||||
masterPod = pods[0]
|
||||
|
||||
self.assertEqual(masterPod.spec.containers[0].resources.limits['cpu'], minCPULimit,
|
||||
"Expected CPU limit {}, found {}"
|
||||
.format(minCPULimit, masterPod.spec.containers[0].resources.limits['cpu']))
|
||||
self.assertEqual(masterPod.spec.containers[0].resources.limits['memory'], minMemoryLimit,
|
||||
"Expected memory limit {}, found {}"
|
||||
.format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory']))
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_multi_namespace_support(self):
|
||||
'''
|
||||
Create a customized Postgres cluster in a non-default namespace.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
|
||||
with open("manifests/complete-postgres-manifest.yaml", 'r+') as f:
|
||||
pg_manifest = yaml.safe_load(f)
|
||||
pg_manifest["metadata"]["namespace"] = self.namespace
|
||||
yaml.dump(pg_manifest, f, Dumper=yaml.Dumper)
|
||||
|
||||
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
|
||||
k8s.wait_for_pod_start("spilo-role=master", self.namespace)
|
||||
self.assert_master_is_unique(self.namespace, "acid-test-cluster")
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_node_readiness_label(self):
|
||||
'''
|
||||
Remove node readiness label from master node. This must cause a failover.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
cluster_label = 'cluster-name=acid-minimal-cluster'
|
||||
labels = 'spilo-role=master,' + cluster_label
|
||||
readiness_label = 'lifecycle-status'
|
||||
readiness_value = 'ready'
|
||||
|
||||
# get nodes of master and replica(s) (expected target of new master)
|
||||
current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label)
|
||||
num_replicas = len(current_replica_nodes)
|
||||
failover_targets = self.get_failover_targets(current_master_node, current_replica_nodes)
|
||||
|
||||
# add node_readiness_label to potential failover nodes
|
||||
patch_readiness_label = {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
readiness_label: readiness_value
|
||||
}
|
||||
}
|
||||
}
|
||||
for failover_target in failover_targets:
|
||||
k8s.api.core_v1.patch_node(failover_target, patch_readiness_label)
|
||||
|
||||
# define node_readiness_label in config map which should trigger a failover of the master
|
||||
patch_readiness_label_config = {
|
||||
"data": {
|
||||
"node_readiness_label": readiness_label + ':' + readiness_value,
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_readiness_label_config)
|
||||
new_master_node, new_replica_nodes = self.assert_failover(
|
||||
current_master_node, num_replicas, failover_targets, cluster_label)
|
||||
|
||||
# patch also node where master ran before
|
||||
k8s.api.core_v1.patch_node(current_master_node, patch_readiness_label)
|
||||
# toggle pod anti affinity to move replica away from master node
|
||||
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_scaling(self):
|
||||
'''
|
||||
Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
labels = "cluster-name=acid-minimal-cluster"
|
||||
|
||||
k8s.wait_for_pg_to_scale(3)
|
||||
self.assertEqual(3, k8s.count_pods_with_label(labels))
|
||||
self.assert_master_is_unique()
|
||||
|
||||
k8s.wait_for_pg_to_scale(2)
|
||||
self.assertEqual(2, k8s.count_pods_with_label(labels))
|
||||
self.assert_master_is_unique()
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_service_annotations(self):
|
||||
'''
|
||||
|
|
@ -346,18 +339,118 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
k8s.update_config(unpatch_custom_service_annotations)
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_taint_based_eviction(self):
|
||||
'''
|
||||
Add taint "postgres=:NoExecute" to node with master. This must cause a failover.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
cluster_label = 'cluster-name=acid-minimal-cluster'
|
||||
|
||||
# get nodes of master and replica(s) (expected target of new master)
|
||||
current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label)
|
||||
num_replicas = len(current_replica_nodes)
|
||||
failover_targets = self.get_failover_targets(current_master_node, current_replica_nodes)
|
||||
|
||||
# taint node with postgres=:NoExecute to force failover
|
||||
body = {
|
||||
"spec": {
|
||||
"taints": [
|
||||
{
|
||||
"effect": "NoExecute",
|
||||
"key": "postgres"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# patch node and test if master is failing over to one of the expected nodes
|
||||
k8s.api.core_v1.patch_node(current_master_node, body)
|
||||
new_master_node, new_replica_nodes = self.assert_failover(
|
||||
current_master_node, num_replicas, failover_targets, cluster_label)
|
||||
|
||||
# add toleration to pods
|
||||
patch_toleration_config = {
|
||||
"data": {
|
||||
"toleration": "key:postgres,operator:Exists,effect:NoExecute"
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_toleration_config)
|
||||
|
||||
# toggle pod anti affinity to move replica away from master node
|
||||
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
|
||||
|
||||
def get_failover_targets(self, master_node, replica_nodes):
|
||||
'''
|
||||
If all pods live on the same node, failover will happen to other worker(s)
|
||||
'''
|
||||
k8s = self.k8s
|
||||
|
||||
failover_targets = [x for x in replica_nodes if x != master_node]
|
||||
if len(failover_targets) == 0:
|
||||
nodes = k8s.api.core_v1.list_node()
|
||||
for n in nodes.items:
|
||||
if "node-role.kubernetes.io/master" not in n.metadata.labels and n.metadata.name != master_node:
|
||||
failover_targets.append(n.metadata.name)
|
||||
|
||||
return failover_targets
|
||||
|
||||
def assert_failover(self, current_master_node, num_replicas, failover_targets, cluster_label):
|
||||
'''
|
||||
Check if master is failing over. The replica should move first to be the switchover target
|
||||
'''
|
||||
k8s = self.k8s
|
||||
k8s.wait_for_pod_failover(failover_targets, 'spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
|
||||
new_master_node, new_replica_nodes = k8s.get_pg_nodes(cluster_label)
|
||||
self.assertNotEqual(current_master_node, new_master_node,
|
||||
"Master on {} did not fail over to one of {}".format(current_master_node, failover_targets))
|
||||
self.assertEqual(num_replicas, len(new_replica_nodes),
|
||||
"Expected {} replicas, found {}".format(num_replicas, len(new_replica_nodes)))
|
||||
self.assert_master_is_unique()
|
||||
|
||||
return new_master_node, new_replica_nodes
|
||||
|
||||
def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"):
|
||||
'''
|
||||
Check that there is a single pod in the k8s cluster with the label "spilo-role=master"
|
||||
To be called manually after operations that affect pods
|
||||
'''
|
||||
|
||||
k8s = self.k8s
|
||||
labels = 'spilo-role=master,cluster-name=' + clusterName
|
||||
|
||||
num_of_master_pods = k8s.count_pods_with_label(labels, namespace)
|
||||
self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods))
|
||||
|
||||
def assert_distributed_pods(self, master_node, replica_nodes, cluster_label):
|
||||
'''
|
||||
Other tests can lead to the situation that master and replica are on the same node.
|
||||
Toggle pod anti affinty to distribute pods accross nodes (replica in particular).
|
||||
'''
|
||||
k8s = self.k8s
|
||||
failover_targets = self.get_failover_targets(master_node, replica_nodes)
|
||||
|
||||
# enable pod anti affintiy in config map which should trigger movement of replica
|
||||
patch_enable_antiaffinity = {
|
||||
"data": {
|
||||
"enable_pod_antiaffinity": "true"
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_enable_antiaffinity)
|
||||
self.assert_failover(
|
||||
master_node, len(replica_nodes), failover_targets, cluster_label)
|
||||
|
||||
# now disable pod anti affintiy again which will cause yet another failover
|
||||
patch_disable_antiaffinity = {
|
||||
"data": {
|
||||
"enable_pod_antiaffinity": "false"
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_disable_antiaffinity)
|
||||
k8s.wait_for_pod_start('spilo-role=master')
|
||||
k8s.wait_for_pod_start('spilo-role=replica')
|
||||
|
||||
|
||||
class K8sApi:
|
||||
|
||||
|
|
@ -445,15 +538,14 @@ class K8s:
|
|||
def count_pods_with_label(self, labels, namespace='default'):
|
||||
return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items)
|
||||
|
||||
def wait_for_master_failover(self, expected_master_nodes, namespace='default'):
|
||||
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
||||
pod_phase = 'Failing over'
|
||||
new_master_node = ''
|
||||
labels = 'spilo-role=master,cluster-name=acid-minimal-cluster'
|
||||
new_pod_node = ''
|
||||
|
||||
while (pod_phase != 'Running') or (new_master_node not in expected_master_nodes):
|
||||
while (pod_phase != 'Running') or (new_pod_node not in failover_targets):
|
||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
if pods:
|
||||
new_master_node = pods[0].spec.node_name
|
||||
new_pod_node = pods[0].spec.node_name
|
||||
pod_phase = pods[0].status.phase
|
||||
time.sleep(self.RETRY_TIMEOUT_SEC)
|
||||
|
||||
|
|
|
|||
1
go.mod
1
go.mod
|
|
@ -11,6 +11,7 @@ require (
|
|||
github.com/lib/pq v1.2.0
|
||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/stretchr/testify v1.4.0
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 // indirect
|
||||
|
|
|
|||
1
go.sum
1
go.sum
|
|
@ -275,6 +275,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
|||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
|
|
|
|||
|
|
@ -24,13 +24,14 @@ package cmd
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"log"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -95,8 +96,12 @@ func listAll(listPostgres *v1.PostgresqlList) {
|
|||
template := "%-32s%-16s%-12s%-12s%-12s%-12s%-12s\n"
|
||||
fmt.Printf(template, "NAME", "STATUS", "INSTANCES", "VERSION", "AGE", "VOLUME", "NAMESPACE")
|
||||
for _, pgObjs := range listPostgres.Items {
|
||||
fmt.Printf(template, pgObjs.Name, pgObjs.Status.PostgresClusterStatus, strconv.Itoa(int(pgObjs.Spec.NumberOfInstances)),
|
||||
pgObjs.Spec.PgVersion, time.Since(pgObjs.CreationTimestamp.Time).Truncate(TrimCreateTimestamp), pgObjs.Spec.Size, pgObjs.Namespace)
|
||||
fmt.Printf(template, pgObjs.Name,
|
||||
pgObjs.Status.PostgresClusterStatus,
|
||||
strconv.Itoa(int(pgObjs.Spec.NumberOfInstances)),
|
||||
pgObjs.Spec.PostgresqlParam.PgVersion,
|
||||
time.Since(pgObjs.CreationTimestamp.Time).Truncate(TrimCreateTimestamp),
|
||||
pgObjs.Spec.Size, pgObjs.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -104,8 +109,12 @@ func listWithNamespace(listPostgres *v1.PostgresqlList) {
|
|||
template := "%-32s%-16s%-12s%-12s%-12s%-12s\n"
|
||||
fmt.Printf(template, "NAME", "STATUS", "INSTANCES", "VERSION", "AGE", "VOLUME")
|
||||
for _, pgObjs := range listPostgres.Items {
|
||||
fmt.Printf(template, pgObjs.Name, pgObjs.Status.PostgresClusterStatus, strconv.Itoa(int(pgObjs.Spec.NumberOfInstances)),
|
||||
pgObjs.Spec.PgVersion, time.Since(pgObjs.CreationTimestamp.Time).Truncate(TrimCreateTimestamp), pgObjs.Spec.Size)
|
||||
fmt.Printf(template, pgObjs.Name,
|
||||
pgObjs.Status.PostgresClusterStatus,
|
||||
strconv.Itoa(int(pgObjs.Spec.NumberOfInstances)),
|
||||
pgObjs.Spec.PostgresqlParam.PgVersion,
|
||||
time.Since(pgObjs.CreationTimestamp.Time).Truncate(TrimCreateTimestamp),
|
||||
pgObjs.Spec.Size)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -111,3 +111,10 @@ spec:
|
|||
# env:
|
||||
# - name: "USEFUL_VAR"
|
||||
# value: "perhaps-true"
|
||||
|
||||
# Custom TLS certificate. Disabled unless tls.secretName has a value.
|
||||
tls:
|
||||
secretName: "" # should correspond to a Kubernetes Secret resource to load
|
||||
certificateFile: "tls.crt"
|
||||
privateKeyFile: "tls.key"
|
||||
caFile: "" # optionally configure Postgres with a CA certificate
|
||||
|
|
|
|||
|
|
@ -63,7 +63,9 @@ data:
|
|||
pod_label_wait_timeout: 10m
|
||||
pod_management_policy: "ordered_ready"
|
||||
pod_role_label: spilo-role
|
||||
# pod_service_account_definition: ""
|
||||
pod_service_account_name: "postgres-pod"
|
||||
# pod_service_account_role_binding_definition: ""
|
||||
pod_terminate_grace_period: 5m
|
||||
# postgres_superuser_teams: "postgres_superusers"
|
||||
# protected_role_names: "admin"
|
||||
|
|
|
|||
|
|
@ -64,9 +64,9 @@ rules:
|
|||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- get
|
||||
- update
|
||||
# to check nodes for node readiness label
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
|
@ -103,9 +103,9 @@ rules:
|
|||
- delete
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
# to resize the filesystem in Spilo pods when increasing volume size
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ spec:
|
|||
serviceAccountName: postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.3.1
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.4.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ configuration:
|
|||
log_statement: all
|
||||
# teams_api_url: ""
|
||||
logging_rest_api:
|
||||
api_port: 8008
|
||||
api_port: 8080
|
||||
cluster_history_entries: 1000
|
||||
ring_log_lines: 100
|
||||
scalyr:
|
||||
|
|
|
|||
|
|
@ -271,6 +271,19 @@ spec:
|
|||
type: string
|
||||
teamId:
|
||||
type: string
|
||||
tls:
|
||||
type: object
|
||||
required:
|
||||
- secretName
|
||||
properties:
|
||||
secretName:
|
||||
type: string
|
||||
certificateFile:
|
||||
type: string
|
||||
privateKeyFile:
|
||||
type: string
|
||||
caFile:
|
||||
type: string
|
||||
tolerations:
|
||||
type: array
|
||||
items:
|
||||
|
|
|
|||
|
|
@ -454,6 +454,24 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
|||
"teamId": {
|
||||
Type: "string",
|
||||
},
|
||||
"tls": {
|
||||
Type: "object",
|
||||
Required: []string{"secretName"},
|
||||
Properties: map[string]apiextv1beta1.JSONSchemaProps{
|
||||
"secretName": {
|
||||
Type: "string",
|
||||
},
|
||||
"certificateFile": {
|
||||
Type: "string",
|
||||
},
|
||||
"privateKeyFile": {
|
||||
Type: "string",
|
||||
},
|
||||
"caFile": {
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
"tolerations": {
|
||||
Type: "array",
|
||||
Items: &apiextv1beta1.JSONSchemaPropsOrArray{
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ type KubernetesMetaConfiguration struct {
|
|||
EnableInitContainers *bool `json:"enable_init_containers,omitempty"`
|
||||
EnableSidecars *bool `json:"enable_sidecars,omitempty"`
|
||||
SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"`
|
||||
ClusterDomain string `json:"cluster_domain"`
|
||||
ClusterDomain string `json:"cluster_domain,omitempty"`
|
||||
OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"`
|
||||
InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"`
|
||||
PodRoleLabel string `json:"pod_role_label,omitempty"`
|
||||
|
|
|
|||
|
|
@ -62,6 +62,7 @@ type PostgresSpec struct {
|
|||
StandbyCluster *StandbyDescription `json:"standby"`
|
||||
PodAnnotations map[string]string `json:"podAnnotations"`
|
||||
ServiceAnnotations map[string]string `json:"serviceAnnotations"`
|
||||
TLS *TLSDescription `json:"tls"`
|
||||
|
||||
// deprecated json tags
|
||||
InitContainersOld []v1.Container `json:"init_containers,omitempty"`
|
||||
|
|
@ -140,6 +141,13 @@ type StandbyDescription struct {
|
|||
S3WalPath string `json:"s3_wal_path,omitempty"`
|
||||
}
|
||||
|
||||
type TLSDescription struct {
|
||||
SecretName string `json:"secretName,omitempty"`
|
||||
CertificateFile string `json:"certificateFile,omitempty"`
|
||||
PrivateKeyFile string `json:"privateKeyFile,omitempty"`
|
||||
CAFile string `json:"caFile,omitempty"`
|
||||
}
|
||||
|
||||
// CloneDescription describes which cluster the new should clone and up to which point in time
|
||||
type CloneDescription struct {
|
||||
ClusterName string `json:"cluster,omitempty"`
|
||||
|
|
|
|||
|
|
@ -528,6 +528,11 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
|||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.TLS != nil {
|
||||
in, out := &in.TLS, &out.TLS
|
||||
*out = new(TLSDescription)
|
||||
**out = **in
|
||||
}
|
||||
if in.InitContainersOld != nil {
|
||||
in, out := &in.InitContainersOld, &out.InitContainersOld
|
||||
*out = make([]corev1.Container, len(*in))
|
||||
|
|
@ -810,6 +815,22 @@ func (in *StandbyDescription) DeepCopy() *StandbyDescription {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TLSDescription) DeepCopyInto(out *TLSDescription) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSDescription.
|
||||
func (in *TLSDescription) DeepCopy() *TLSDescription {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TLSDescription)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TeamsAPIConfiguration) DeepCopyInto(out *TeamsAPIConfiguration) {
|
||||
*out = *in
|
||||
|
|
|
|||
|
|
@ -562,10 +562,11 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}()
|
||||
|
||||
if oldSpec.Spec.PgVersion != newSpec.Spec.PgVersion { // PG versions comparison
|
||||
c.logger.Warningf("postgresql version change(%q -> %q) has no effect", oldSpec.Spec.PgVersion, newSpec.Spec.PgVersion)
|
||||
if oldSpec.Spec.PostgresqlParam.PgVersion != newSpec.Spec.PostgresqlParam.PgVersion { // PG versions comparison
|
||||
c.logger.Warningf("postgresql version change(%q -> %q) has no effect",
|
||||
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
||||
//we need that hack to generate statefulset with the old version
|
||||
newSpec.Spec.PgVersion = oldSpec.Spec.PgVersion
|
||||
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
||||
}
|
||||
|
||||
// Service
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package cluster
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -26,10 +27,13 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
pgBinariesLocationTemplate = "/usr/lib/postgresql/%s/bin"
|
||||
pgBinariesLocationTemplate = "/usr/lib/postgresql/%v/bin"
|
||||
patroniPGBinariesParameterName = "bin_dir"
|
||||
patroniPGParametersParameterName = "parameters"
|
||||
patroniPGHBAConfParameterName = "pg_hba"
|
||||
|
||||
// the gid of the postgres user in the default spilo image
|
||||
spiloPostgresGID = 103
|
||||
localHost = "127.0.0.1/32"
|
||||
)
|
||||
|
||||
|
|
@ -446,6 +450,7 @@ func generatePodTemplate(
|
|||
podAntiAffinityTopologyKey string,
|
||||
additionalSecretMount string,
|
||||
additionalSecretMountPath string,
|
||||
volumes []v1.Volume,
|
||||
) (*v1.PodTemplateSpec, error) {
|
||||
|
||||
terminateGracePeriodSeconds := terminateGracePeriod
|
||||
|
|
@ -464,6 +469,7 @@ func generatePodTemplate(
|
|||
InitContainers: initContainers,
|
||||
Tolerations: *tolerationsSpec,
|
||||
SecurityContext: &securityContext,
|
||||
Volumes: volumes,
|
||||
}
|
||||
|
||||
if shmVolume != nil && *shmVolume {
|
||||
|
|
@ -716,6 +722,50 @@ func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) acid
|
|||
}
|
||||
}
|
||||
|
||||
func extractPgVersionFromBinPath(binPath string, template string) (string, error) {
|
||||
var pgVersion float32
|
||||
_, err := fmt.Sscanf(binPath, template, &pgVersion)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("%v", pgVersion), nil
|
||||
}
|
||||
|
||||
func (c *Cluster) getNewPgVersion(container v1.Container, newPgVersion string) (string, error) {
|
||||
var (
|
||||
spiloConfiguration spiloConfiguration
|
||||
runningPgVersion string
|
||||
err error
|
||||
)
|
||||
|
||||
for _, env := range container.Env {
|
||||
if env.Name != "SPILO_CONFIGURATION" {
|
||||
continue
|
||||
}
|
||||
err = json.Unmarshal([]byte(env.Value), &spiloConfiguration)
|
||||
if err != nil {
|
||||
return newPgVersion, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(spiloConfiguration.PgLocalConfiguration) > 0 {
|
||||
currentBinPath := fmt.Sprintf("%v", spiloConfiguration.PgLocalConfiguration[patroniPGBinariesParameterName])
|
||||
runningPgVersion, err = extractPgVersionFromBinPath(currentBinPath, pgBinariesLocationTemplate)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not extract Postgres version from %v in SPILO_CONFIGURATION", currentBinPath)
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("could not find %q setting in SPILO_CONFIGURATION", patroniPGBinariesParameterName)
|
||||
}
|
||||
|
||||
if runningPgVersion != newPgVersion {
|
||||
c.logger.Warningf("postgresql version change(%q -> %q) has no effect", runningPgVersion, newPgVersion)
|
||||
newPgVersion = runningPgVersion
|
||||
}
|
||||
|
||||
return newPgVersion, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.StatefulSet, error) {
|
||||
|
||||
var (
|
||||
|
|
@ -724,6 +774,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
sidecarContainers []v1.Container
|
||||
podTemplate *v1.PodTemplateSpec
|
||||
volumeClaimTemplate *v1.PersistentVolumeClaim
|
||||
volumes []v1.Volume
|
||||
)
|
||||
|
||||
// Improve me. Please.
|
||||
|
|
@ -840,21 +891,76 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
}
|
||||
|
||||
// generate environment variables for the spilo container
|
||||
spiloEnvVars := deduplicateEnvVars(
|
||||
c.generateSpiloPodEnvVars(c.Postgresql.GetUID(), spiloConfiguration, &spec.Clone,
|
||||
spec.StandbyCluster, customPodEnvVarsList), c.containerName(), c.logger)
|
||||
spiloEnvVars := c.generateSpiloPodEnvVars(
|
||||
c.Postgresql.GetUID(),
|
||||
spiloConfiguration,
|
||||
&spec.Clone,
|
||||
spec.StandbyCluster,
|
||||
customPodEnvVarsList,
|
||||
)
|
||||
|
||||
// pickup the docker image for the spilo container
|
||||
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage)
|
||||
|
||||
// determine the FSGroup for the spilo pod
|
||||
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
|
||||
if spec.SpiloFSGroup != nil {
|
||||
effectiveFSGroup = spec.SpiloFSGroup
|
||||
}
|
||||
|
||||
volumeMounts := generateVolumeMounts(spec.Volume)
|
||||
|
||||
// configure TLS with a custom secret volume
|
||||
if spec.TLS != nil && spec.TLS.SecretName != "" {
|
||||
if effectiveFSGroup == nil {
|
||||
c.logger.Warnf("Setting the default FSGroup to satisfy the TLS configuration")
|
||||
fsGroup := int64(spiloPostgresGID)
|
||||
effectiveFSGroup = &fsGroup
|
||||
}
|
||||
// this is combined with the FSGroup above to give read access to the
|
||||
// postgres user
|
||||
defaultMode := int32(0640)
|
||||
volumes = append(volumes, v1.Volume{
|
||||
Name: "tls-secret",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: spec.TLS.SecretName,
|
||||
DefaultMode: &defaultMode,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
mountPath := "/tls"
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{
|
||||
MountPath: mountPath,
|
||||
Name: "tls-secret",
|
||||
ReadOnly: true,
|
||||
})
|
||||
|
||||
// use the same filenames as Secret resources by default
|
||||
certFile := ensurePath(spec.TLS.CertificateFile, mountPath, "tls.crt")
|
||||
privateKeyFile := ensurePath(spec.TLS.PrivateKeyFile, mountPath, "tls.key")
|
||||
spiloEnvVars = append(
|
||||
spiloEnvVars,
|
||||
v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: certFile},
|
||||
v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: privateKeyFile},
|
||||
)
|
||||
|
||||
if spec.TLS.CAFile != "" {
|
||||
caFile := ensurePath(spec.TLS.CAFile, mountPath, "")
|
||||
spiloEnvVars = append(
|
||||
spiloEnvVars,
|
||||
v1.EnvVar{Name: "SSL_CA_FILE", Value: caFile},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// generate the spilo container
|
||||
c.logger.Debugf("Generating Spilo container, environment variables: %v", spiloEnvVars)
|
||||
spiloContainer := generateContainer(c.containerName(),
|
||||
&effectiveDockerImage,
|
||||
resourceRequirements,
|
||||
spiloEnvVars,
|
||||
deduplicateEnvVars(spiloEnvVars, c.containerName(), c.logger),
|
||||
volumeMounts,
|
||||
c.OpConfig.Resources.SpiloPrivileged,
|
||||
)
|
||||
|
|
@ -893,16 +999,10 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
|
||||
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
|
||||
|
||||
// determine the FSGroup for the spilo pod
|
||||
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
|
||||
if spec.SpiloFSGroup != nil {
|
||||
effectiveFSGroup = spec.SpiloFSGroup
|
||||
}
|
||||
|
||||
annotations := c.generatePodAnnotations(spec)
|
||||
|
||||
// generate pod template for the statefulset, based on the spilo container and sidecars
|
||||
if podTemplate, err = generatePodTemplate(
|
||||
podTemplate, err = generatePodTemplate(
|
||||
c.Namespace,
|
||||
c.labelsSet(true),
|
||||
annotations,
|
||||
|
|
@ -920,10 +1020,9 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
c.OpConfig.EnablePodAntiAffinity,
|
||||
c.OpConfig.PodAntiAffinityTopologyKey,
|
||||
c.OpConfig.AdditionalSecretMount,
|
||||
c.OpConfig.AdditionalSecretMountPath); err != nil {
|
||||
return nil, fmt.Errorf("could not generate pod template: %v", err)
|
||||
}
|
||||
|
||||
c.OpConfig.AdditionalSecretMountPath,
|
||||
volumes,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate pod template: %v", err)
|
||||
}
|
||||
|
|
@ -1048,11 +1147,13 @@ func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
|
|||
cur := spec.NumberOfInstances
|
||||
newcur := cur
|
||||
|
||||
/* Limit the max number of pods to one, if this is standby-cluster */
|
||||
if spec.StandbyCluster != nil {
|
||||
c.logger.Info("Standby cluster can have maximum of 1 pod")
|
||||
min = 1
|
||||
max = 1
|
||||
if newcur == 1 {
|
||||
min = newcur
|
||||
max = newcur
|
||||
} else {
|
||||
c.logger.Warningf("operator only supports standby clusters with 1 pod")
|
||||
}
|
||||
}
|
||||
if max >= 0 && newcur > max {
|
||||
newcur = max
|
||||
|
|
@ -1544,7 +1645,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
false,
|
||||
"",
|
||||
c.OpConfig.AdditionalSecretMount,
|
||||
c.OpConfig.AdditionalSecretMountPath); err != nil {
|
||||
c.OpConfig.AdditionalSecretMountPath,
|
||||
nil); err != nil {
|
||||
return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -1629,7 +1731,7 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
|
|||
// Postgres env vars
|
||||
{
|
||||
Name: "PG_VERSION",
|
||||
Value: c.Spec.PgVersion,
|
||||
Value: c.Spec.PostgresqlParam.PgVersion,
|
||||
},
|
||||
{
|
||||
Name: "PGPORT",
|
||||
|
|
@ -1676,3 +1778,13 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
|
|||
func (c *Cluster) getLogicalBackupJobName() (jobName string) {
|
||||
return "logical-backup-" + c.clusterName().Name
|
||||
}
|
||||
|
||||
func ensurePath(file string, defaultDir string, defaultFile string) string {
|
||||
if file == "" {
|
||||
return path.Join(defaultDir, defaultFile)
|
||||
}
|
||||
if !path.IsAbs(file) {
|
||||
return path.Join(defaultDir, file)
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,16 +3,17 @@ package cluster
|
|||
import (
|
||||
"reflect"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
|
@ -381,6 +382,135 @@ func TestCloneEnv(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestExtractPgVersionFromBinPath(t *testing.T) {
|
||||
testName := "TestExtractPgVersionFromBinPath"
|
||||
tests := []struct {
|
||||
subTest string
|
||||
binPath string
|
||||
template string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
subTest: "test current bin path with decimal against hard coded template",
|
||||
binPath: "/usr/lib/postgresql/9.6/bin",
|
||||
template: pgBinariesLocationTemplate,
|
||||
expected: "9.6",
|
||||
},
|
||||
{
|
||||
subTest: "test current bin path against hard coded template",
|
||||
binPath: "/usr/lib/postgresql/12/bin",
|
||||
template: pgBinariesLocationTemplate,
|
||||
expected: "12",
|
||||
},
|
||||
{
|
||||
subTest: "test alternative bin path against a matching template",
|
||||
binPath: "/usr/pgsql-12/bin",
|
||||
template: "/usr/pgsql-%v/bin",
|
||||
expected: "12",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
pgVersion, err := extractPgVersionFromBinPath(tt.binPath, tt.template)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if pgVersion != tt.expected {
|
||||
t.Errorf("%s %s: Expected version %s, have %s instead",
|
||||
testName, tt.subTest, tt.expected, pgVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPgVersion(t *testing.T) {
|
||||
testName := "TestGetPgVersion"
|
||||
tests := []struct {
|
||||
subTest string
|
||||
pgContainer v1.Container
|
||||
currentPgVersion string
|
||||
newPgVersion string
|
||||
}{
|
||||
{
|
||||
subTest: "new version with decimal point differs from current SPILO_CONFIGURATION",
|
||||
pgContainer: v1.Container{
|
||||
Name: "postgres",
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "SPILO_CONFIGURATION",
|
||||
Value: "{\"postgresql\": {\"bin_dir\": \"/usr/lib/postgresql/9.6/bin\"}}",
|
||||
},
|
||||
},
|
||||
},
|
||||
currentPgVersion: "9.6",
|
||||
newPgVersion: "12",
|
||||
},
|
||||
{
|
||||
subTest: "new version differs from current SPILO_CONFIGURATION",
|
||||
pgContainer: v1.Container{
|
||||
Name: "postgres",
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "SPILO_CONFIGURATION",
|
||||
Value: "{\"postgresql\": {\"bin_dir\": \"/usr/lib/postgresql/11/bin\"}}",
|
||||
},
|
||||
},
|
||||
},
|
||||
currentPgVersion: "11",
|
||||
newPgVersion: "12",
|
||||
},
|
||||
{
|
||||
subTest: "new version is lower than the one found in current SPILO_CONFIGURATION",
|
||||
pgContainer: v1.Container{
|
||||
Name: "postgres",
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "SPILO_CONFIGURATION",
|
||||
Value: "{\"postgresql\": {\"bin_dir\": \"/usr/lib/postgresql/12/bin\"}}",
|
||||
},
|
||||
},
|
||||
},
|
||||
currentPgVersion: "12",
|
||||
newPgVersion: "11",
|
||||
},
|
||||
{
|
||||
subTest: "new version is the same like in the current SPILO_CONFIGURATION",
|
||||
pgContainer: v1.Container{
|
||||
Name: "postgres",
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "SPILO_CONFIGURATION",
|
||||
Value: "{\"postgresql\": {\"bin_dir\": \"/usr/lib/postgresql/12/bin\"}}",
|
||||
},
|
||||
},
|
||||
},
|
||||
currentPgVersion: "12",
|
||||
newPgVersion: "12",
|
||||
},
|
||||
}
|
||||
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
ProtectedRoles: []string{"admin"},
|
||||
Auth: config.Auth{
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
|
||||
for _, tt := range tests {
|
||||
pgVersion, err := cluster.getNewPgVersion(tt.pgContainer, tt.newPgVersion)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if pgVersion != tt.currentPgVersion {
|
||||
t.Errorf("%s %s: Expected version %s, have %s instead",
|
||||
testName, tt.subTest, tt.currentPgVersion, pgVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSecretVolume(t *testing.T) {
|
||||
testName := "TestSecretVolume"
|
||||
tests := []struct {
|
||||
|
|
@ -451,3 +581,65 @@ func TestSecretVolume(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTLS(t *testing.T) {
|
||||
var err error
|
||||
var spec acidv1.PostgresSpec
|
||||
var cluster *Cluster
|
||||
|
||||
makeSpec := func(tls acidv1.TLSDescription) acidv1.PostgresSpec {
|
||||
return acidv1.PostgresSpec{
|
||||
TeamID: "myapp", NumberOfInstances: 1,
|
||||
Resources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
TLS: &tls,
|
||||
}
|
||||
}
|
||||
|
||||
cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
ProtectedRoles: []string{"admin"},
|
||||
Auth: config.Auth{
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
spec = makeSpec(acidv1.TLSDescription{SecretName: "my-secret", CAFile: "ca.crt"})
|
||||
s, err := cluster.generateStatefulSet(&spec)
|
||||
if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
fsGroup := int64(103)
|
||||
assert.Equal(t, &fsGroup, s.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned")
|
||||
|
||||
defaultMode := int32(0640)
|
||||
volume := v1.Volume{
|
||||
Name: "tls-secret",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: "my-secret",
|
||||
DefaultMode: &defaultMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Contains(t, s.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume")
|
||||
|
||||
assert.Contains(t, s.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
|
||||
MountPath: "/tls",
|
||||
Name: "tls-secret",
|
||||
ReadOnly: true,
|
||||
}, "the volume gets mounted in /tls")
|
||||
|
||||
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
|
||||
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"})
|
||||
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -291,6 +291,18 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
// statefulset is already there, make sure we use its definition in order to compare with the spec.
|
||||
c.Statefulset = sset
|
||||
|
||||
// check if there is no Postgres version mismatch
|
||||
for _, container := range c.Statefulset.Spec.Template.Spec.Containers {
|
||||
if container.Name != "postgres" {
|
||||
continue
|
||||
}
|
||||
pgVersion, err := c.getNewPgVersion(container, c.Spec.PostgresqlParam.PgVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse current Postgres version: %v", err)
|
||||
}
|
||||
c.Spec.PostgresqlParam.PgVersion = pgVersion
|
||||
}
|
||||
|
||||
desiredSS, err := c.generateStatefulSet(&c.Spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not generate statefulset: %v", err)
|
||||
|
|
|
|||
|
|
@ -224,7 +224,7 @@ func (c *Controller) initRoleBinding() {
|
|||
|
||||
switch {
|
||||
case err != nil:
|
||||
panic(fmt.Errorf("unable to parse the definition of the role binding for the pod service account definition from the operator configuration: %v", err))
|
||||
panic(fmt.Errorf("unable to parse the role binding definition from the operator configuration: %v", err))
|
||||
case groupVersionKind.Kind != "RoleBinding":
|
||||
panic(fmt.Errorf("role binding definition in the operator configuration defines another type of resource: %v", groupVersionKind.Kind))
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/zalando/postgres-operator/pkg/util/retryutil"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
|
@ -172,19 +172,19 @@ func (c *Controller) nodeDelete(obj interface{}) {
|
|||
}
|
||||
|
||||
func (c *Controller) moveMasterPodsOffNode(node *v1.Node) {
|
||||
|
||||
// retry to move master until configured timeout is reached
|
||||
err := retryutil.Retry(1*time.Minute, c.opConfig.MasterPodMoveTimeout,
|
||||
func() (bool, error) {
|
||||
err := c.attemptToMoveMasterPodsOffNode(node)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("unable to move master pods off the unschedulable node; will retry after delay of 1 minute")
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
c.logger.Warningf("failed to move master pods from the node %q: timeout of %v minutes expired", node.Name, c.opConfig.MasterPodMoveTimeout)
|
||||
c.logger.Warningf("failed to move master pods from the node %q: %v", node.Name, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -505,11 +505,11 @@ func (c *Controller) submitRBACCredentials(event ClusterEvent) error {
|
|||
namespace := event.NewSpec.GetNamespace()
|
||||
|
||||
if err := c.createPodServiceAccount(namespace); err != nil {
|
||||
return fmt.Errorf("could not create pod service account %v : %v", c.opConfig.PodServiceAccountName, err)
|
||||
return fmt.Errorf("could not create pod service account %q : %v", c.opConfig.PodServiceAccountName, err)
|
||||
}
|
||||
|
||||
if err := c.createRoleBindings(namespace); err != nil {
|
||||
return fmt.Errorf("could not create role binding %v : %v", c.PodServiceAccountRoleBinding.Name, err)
|
||||
return fmt.Errorf("could not create role binding %q : %v", c.PodServiceAccountRoleBinding.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -520,16 +520,16 @@ func (c *Controller) createPodServiceAccount(namespace string) error {
|
|||
_, err := c.KubeClient.ServiceAccounts(namespace).Get(podServiceAccountName, metav1.GetOptions{})
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
|
||||
c.logger.Infof(fmt.Sprintf("creating pod service account in the namespace %v", namespace))
|
||||
c.logger.Infof(fmt.Sprintf("creating pod service account %q in the %q namespace", podServiceAccountName, namespace))
|
||||
|
||||
// get a separate copy of service account
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
sa := *c.PodServiceAccount
|
||||
if _, err = c.KubeClient.ServiceAccounts(namespace).Create(&sa); err != nil {
|
||||
return fmt.Errorf("cannot deploy the pod service account %v defined in the config map to the %v namespace: %v", podServiceAccountName, namespace, err)
|
||||
return fmt.Errorf("cannot deploy the pod service account %q defined in the configuration to the %q namespace: %v", podServiceAccountName, namespace, err)
|
||||
}
|
||||
|
||||
c.logger.Infof("successfully deployed the pod service account %v to the %v namespace", podServiceAccountName, namespace)
|
||||
c.logger.Infof("successfully deployed the pod service account %q to the %q namespace", podServiceAccountName, namespace)
|
||||
} else if k8sutil.ResourceAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -545,14 +545,14 @@ func (c *Controller) createRoleBindings(namespace string) error {
|
|||
_, err := c.KubeClient.RoleBindings(namespace).Get(podServiceAccountRoleBindingName, metav1.GetOptions{})
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
|
||||
c.logger.Infof("Creating the role binding %v in the namespace %v", podServiceAccountRoleBindingName, namespace)
|
||||
c.logger.Infof("Creating the role binding %q in the %q namespace", podServiceAccountRoleBindingName, namespace)
|
||||
|
||||
// get a separate copy of role binding
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
rb := *c.PodServiceAccountRoleBinding
|
||||
_, err = c.KubeClient.RoleBindings(namespace).Create(&rb)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot bind the pod service account %q defined in the config map to the cluster role in the %q namespace: %v", podServiceAccountName, namespace, err)
|
||||
return fmt.Errorf("cannot bind the pod service account %q defined in the configuration to the cluster role in the %q namespace: %v", podServiceAccountName, namespace, err)
|
||||
}
|
||||
|
||||
c.logger.Infof("successfully deployed the role binding for the pod service account %q to the %q namespace", podServiceAccountName, namespace)
|
||||
|
|
|
|||
|
|
@ -95,7 +95,6 @@ type Config struct {
|
|||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p2"`
|
||||
Sidecars map[string]string `name:"sidecar_docker_images"`
|
||||
// default name `operator` enables backward compatibility with the older ServiceAccountName field
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||
// value of this string must be valid JSON or YAML; see initPodServiceAccount
|
||||
PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""`
|
||||
|
|
|
|||
13
ui/Makefile
13
ui/Makefile
|
|
@ -5,9 +5,13 @@ VERSION ?= $(shell git describe --tags --always --dirty)
|
|||
TAG ?= $(VERSION)
|
||||
GITHEAD = $(shell git rev-parse --short HEAD)
|
||||
GITURL = $(shell git config --get remote.origin.url)
|
||||
GITSTATU = $(shell git status --porcelain || echo 'no changes')
|
||||
GITSTATUS = $(shell git status --porcelain || echo 'no changes')
|
||||
TTYFLAGS = $(shell test -t 0 && echo '-it')
|
||||
|
||||
ifdef CDP_PULL_REQUEST_NUMBER
|
||||
CDP_TAG := -${CDP_BUILD_VERSION}
|
||||
endif
|
||||
|
||||
default: docker
|
||||
|
||||
clean:
|
||||
|
|
@ -24,11 +28,12 @@ docker: appjs
|
|||
echo `(env)`
|
||||
echo "Tag ${TAG}"
|
||||
echo "Version ${VERSION}"
|
||||
echo "CDP tag ${CDP_TAG}"
|
||||
echo "git describe $(shell git describe --tags --always --dirty)"
|
||||
docker build --rm -t "$(IMAGE):$(TAG)" -f Dockerfile .
|
||||
docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)" -f Dockerfile .
|
||||
|
||||
push: docker
|
||||
docker push "$(IMAGE):$(TAG)"
|
||||
push:
|
||||
docker push "$(IMAGE):$(TAG)$(CDP_TAG)"
|
||||
|
||||
mock:
|
||||
docker run -it -p 8080:8080 "$(IMAGE):$(TAG)" --mock
|
||||
|
|
|
|||
Loading…
Reference in New Issue