Merge branch 'master' into fes-support
This commit is contained in:
commit
47aa7b1c64
|
|
@ -1,2 +1,2 @@
|
||||||
# global owners
|
# global owners
|
||||||
* @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu @RafiaSabih
|
* @sdudoladov @Jan-M @CyberDem0n @FxKu @jopadi
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,4 @@
|
||||||
Dmitrii Dolgov <dmitrii.dolgov@zalando.de>
|
|
||||||
Sergey Dudoladov <sergey.dudoladov@zalando.de>
|
Sergey Dudoladov <sergey.dudoladov@zalando.de>
|
||||||
Felix Kunde <felix.kunde@zalando.de>
|
Felix Kunde <felix.kunde@zalando.de>
|
||||||
Jan Mussler <jan.mussler@zalando.de>
|
Jan Mussler <jan.mussler@zalando.de>
|
||||||
Rafia Sabih <rafia.sabih@zalando.de>
|
Jociele Padilha <jociele.padilha@zalando.de>
|
||||||
|
|
@ -61,6 +61,11 @@ spec:
|
||||||
configuration:
|
configuration:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
crd_categories:
|
||||||
|
type: array
|
||||||
|
nullable: true
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
docker_image:
|
docker_image:
|
||||||
type: string
|
type: string
|
||||||
default: "registry.opensource.zalan.do/acid/spilo-14:2.1-p3"
|
default: "registry.opensource.zalan.do/acid/spilo-14:2.1-p3"
|
||||||
|
|
@ -69,6 +74,7 @@ spec:
|
||||||
default: true
|
default: true
|
||||||
enable_crd_validation:
|
enable_crd_validation:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
description: deprecated
|
||||||
default: true
|
default: true
|
||||||
enable_lazy_spilo_upgrade:
|
enable_lazy_spilo_upgrade:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
|
@ -90,11 +96,13 @@ spec:
|
||||||
default: false
|
default: false
|
||||||
max_instances:
|
max_instances:
|
||||||
type: integer
|
type: integer
|
||||||
minimum: -1 # -1 = disabled
|
description: "-1 = disabled"
|
||||||
|
minimum: -1
|
||||||
default: -1
|
default: -1
|
||||||
min_instances:
|
min_instances:
|
||||||
type: integer
|
type: integer
|
||||||
minimum: -1 # -1 = disabled
|
description: "-1 = disabled"
|
||||||
|
minimum: -1
|
||||||
default: -1
|
default: -1
|
||||||
resync_period:
|
resync_period:
|
||||||
type: string
|
type: string
|
||||||
|
|
@ -122,6 +130,15 @@ spec:
|
||||||
users:
|
users:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
enable_password_rotation:
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
password_rotation_interval:
|
||||||
|
type: integer
|
||||||
|
default: 90
|
||||||
|
password_rotation_user_retention:
|
||||||
|
type: integer
|
||||||
|
default: 180
|
||||||
replication_username:
|
replication_username:
|
||||||
type: string
|
type: string
|
||||||
default: standby
|
default: standby
|
||||||
|
|
@ -175,12 +192,12 @@ spec:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
enable_init_containers:
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
enable_cross_namespace_secret:
|
enable_cross_namespace_secret:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
enable_init_containers:
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
enable_pod_antiaffinity:
|
enable_pod_antiaffinity:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
|
@ -233,6 +250,11 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
|
node_readiness_label_merge:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- "AND"
|
||||||
|
- "OR"
|
||||||
oauth_token_secret_name:
|
oauth_token_secret_name:
|
||||||
type: string
|
type: string
|
||||||
default: "postgresql-operator"
|
default: "postgresql-operator"
|
||||||
|
|
@ -396,12 +418,12 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
log_s3_bucket:
|
log_s3_bucket:
|
||||||
type: string
|
type: string
|
||||||
|
wal_az_storage_account:
|
||||||
|
type: string
|
||||||
wal_gs_bucket:
|
wal_gs_bucket:
|
||||||
type: string
|
type: string
|
||||||
wal_s3_bucket:
|
wal_s3_bucket:
|
||||||
type: string
|
type: string
|
||||||
wal_az_storage_account:
|
|
||||||
type: string
|
|
||||||
logical_backup:
|
logical_backup:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
|
||||||
|
|
@ -147,7 +147,7 @@ spec:
|
||||||
- "transaction"
|
- "transaction"
|
||||||
numberOfInstances:
|
numberOfInstances:
|
||||||
type: integer
|
type: integer
|
||||||
minimum: 2
|
minimum: 1
|
||||||
resources:
|
resources:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
|
|
@ -201,8 +201,9 @@ spec:
|
||||||
type: boolean
|
type: boolean
|
||||||
enableShmVolume:
|
enableShmVolume:
|
||||||
type: boolean
|
type: boolean
|
||||||
init_containers: # deprecated
|
init_containers:
|
||||||
type: array
|
type: array
|
||||||
|
description: deprecated
|
||||||
nullable: true
|
nullable: true
|
||||||
items:
|
items:
|
||||||
type: object
|
type: object
|
||||||
|
|
@ -229,8 +230,8 @@ spec:
|
||||||
items:
|
items:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
- weight
|
|
||||||
- preference
|
- preference
|
||||||
|
- weight
|
||||||
properties:
|
properties:
|
||||||
preference:
|
preference:
|
||||||
type: object
|
type: object
|
||||||
|
|
@ -348,8 +349,9 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
pod_priority_class_name: # deprecated
|
pod_priority_class_name:
|
||||||
type: string
|
type: string
|
||||||
|
description: deprecated
|
||||||
podPriorityClassName:
|
podPriorityClassName:
|
||||||
type: string
|
type: string
|
||||||
postgresql:
|
postgresql:
|
||||||
|
|
@ -393,8 +395,9 @@ spec:
|
||||||
type: boolean
|
type: boolean
|
||||||
secretNamespace:
|
secretNamespace:
|
||||||
type: string
|
type: string
|
||||||
replicaLoadBalancer: # deprecated
|
replicaLoadBalancer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
description: deprecated
|
||||||
resources:
|
resources:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
|
|
@ -545,14 +548,14 @@ spec:
|
||||||
- PreferNoSchedule
|
- PreferNoSchedule
|
||||||
tolerationSeconds:
|
tolerationSeconds:
|
||||||
type: integer
|
type: integer
|
||||||
useLoadBalancer: # deprecated
|
useLoadBalancer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
description: deprecated
|
||||||
users:
|
users:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: array
|
type: array
|
||||||
nullable: true
|
nullable: true
|
||||||
description: "Role flags specified here must not contradict each other"
|
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
|
|
@ -584,6 +587,16 @@ spec:
|
||||||
- SUPERUSER
|
- SUPERUSER
|
||||||
- nosuperuser
|
- nosuperuser
|
||||||
- NOSUPERUSER
|
- NOSUPERUSER
|
||||||
|
usersWithPasswordRotation:
|
||||||
|
type: array
|
||||||
|
nullable: true
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
usersWithInPlacePasswordRotation:
|
||||||
|
type: array
|
||||||
|
nullable: true
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
volume:
|
volume:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
|
|
|
||||||
|
|
@ -22,8 +22,9 @@ enableJsonLogging: false
|
||||||
configGeneral:
|
configGeneral:
|
||||||
# the deployment should create/update the CRDs
|
# the deployment should create/update the CRDs
|
||||||
enable_crd_registration: true
|
enable_crd_registration: true
|
||||||
# choose if deployment creates/updates CRDs with OpenAPIV3Validation
|
# specify categories under which crds should be listed
|
||||||
enable_crd_validation: true
|
crd_categories:
|
||||||
|
- "all"
|
||||||
# update only the statefulsets without immediately doing the rolling update
|
# update only the statefulsets without immediately doing the rolling update
|
||||||
enable_lazy_spilo_upgrade: false
|
enable_lazy_spilo_upgrade: false
|
||||||
# set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION
|
# set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION
|
||||||
|
|
@ -132,6 +133,9 @@ configKubernetes:
|
||||||
# node_readiness_label:
|
# node_readiness_label:
|
||||||
# status: ready
|
# status: ready
|
||||||
|
|
||||||
|
# defines how nodeAffinity from manifest should be merged with node_readiness_label
|
||||||
|
# node_readiness_label_merge: "OR"
|
||||||
|
|
||||||
# namespaced name of the secret containing the OAuth2 token to pass to the teams API
|
# namespaced name of the secret containing the OAuth2 token to pass to the teams API
|
||||||
# oauth_token_secret_name: postgresql-operator
|
# oauth_token_secret_name: postgresql-operator
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,25 @@
|
||||||
Learn how to configure and manage the Postgres Operator in your Kubernetes (K8s)
|
Learn how to configure and manage the Postgres Operator in your Kubernetes (K8s)
|
||||||
environment.
|
environment.
|
||||||
|
|
||||||
|
## CRD registration and validation
|
||||||
|
|
||||||
|
On startup, the operator will try to register the necessary
|
||||||
|
[CustomResourceDefinitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions)
|
||||||
|
`Postgresql` and `OperatorConfiguration`. The latter will only get created if
|
||||||
|
the `POSTGRES_OPERATOR_CONFIGURATION_OBJECT` [environment variable](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L36)
|
||||||
|
is set in the deployment yaml and is not empty. If the CRDs already exists they
|
||||||
|
will only be patched. If you do not wish the operator to create or update the
|
||||||
|
CRDs set `enable_crd_registration` config option to `false`.
|
||||||
|
|
||||||
|
CRDs are defined with a `openAPIV3Schema` structural schema against which new
|
||||||
|
manifests of [`postgresql`](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql.crd.yaml) or [`OperatorConfiguration`](https://github.com/zalando/postgres-operator/blob/master/manifests/operatorconfiguration.crd.yaml)
|
||||||
|
resources will be validated. On creation you can bypass the validation with
|
||||||
|
`kubectl create --validate=false`.
|
||||||
|
|
||||||
|
By default, the operator will register the CRDs in the `all` category so
|
||||||
|
that resources are listed on `kubectl get all` commands. The `crd_categories`
|
||||||
|
config option allows for customization of categories.
|
||||||
|
|
||||||
## Upgrading the operator
|
## Upgrading the operator
|
||||||
|
|
||||||
The Postgres Operator is upgraded by changing the docker image within the
|
The Postgres Operator is upgraded by changing the docker image within the
|
||||||
|
|
@ -63,30 +82,6 @@ upgrade procedure, refer to the [corresponding PR in Spilo](https://github.com/z
|
||||||
When `major_version_upgrade_mode` is set to `manual` the operator will run
|
When `major_version_upgrade_mode` is set to `manual` the operator will run
|
||||||
the upgrade script for you after the manifest is updated and pods are rotated.
|
the upgrade script for you after the manifest is updated and pods are rotated.
|
||||||
|
|
||||||
## CRD Validation
|
|
||||||
|
|
||||||
[CustomResourceDefinitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions)
|
|
||||||
will be registered with schema validation by default when the operator is
|
|
||||||
deployed. The `OperatorConfiguration` CRD will only get created if the
|
|
||||||
`POSTGRES_OPERATOR_CONFIGURATION_OBJECT` [environment variable](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L36)
|
|
||||||
in the deployment yaml is set and not empty.
|
|
||||||
|
|
||||||
When submitting manifests of [`postgresql`](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql.crd.yaml) or
|
|
||||||
[`OperatorConfiguration`](https://github.com/zalando/postgres-operator/blob/master/manifests/operatorconfiguration.crd.yaml) custom
|
|
||||||
resources with kubectl, validation can be bypassed with `--validate=false`. The
|
|
||||||
operator can also be configured to not register CRDs with validation on `ADD` or
|
|
||||||
`UPDATE` events. Running instances are not affected when enabling the validation
|
|
||||||
afterwards unless the manifests is not changed then. Note, that the provided CRD
|
|
||||||
manifests contain the validation for users to understand what schema is
|
|
||||||
enforced.
|
|
||||||
|
|
||||||
Once the validation is enabled it can only be disabled manually by editing or
|
|
||||||
patching the CRD manifest:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
kubectl patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}'
|
|
||||||
```
|
|
||||||
|
|
||||||
## Non-default cluster domain
|
## Non-default cluster domain
|
||||||
|
|
||||||
If your cluster uses a DNS domain other than the default `cluster.local`, this
|
If your cluster uses a DNS domain other than the default `cluster.local`, this
|
||||||
|
|
@ -293,6 +288,84 @@ that are aggregated into the K8s [default roles](https://kubernetes.io/docs/refe
|
||||||
|
|
||||||
For Helm deployments setting `rbac.createAggregateClusterRoles: true` adds these clusterroles to the deployment.
|
For Helm deployments setting `rbac.createAggregateClusterRoles: true` adds these clusterroles to the deployment.
|
||||||
|
|
||||||
|
## Password rotation in K8s secrets
|
||||||
|
|
||||||
|
The operator regularly updates credentials in the K8s secrets if the
|
||||||
|
`enable_password_rotation` option is set to `true` in the configuration.
|
||||||
|
It happens only for `LOGIN` roles with an associated secret (manifest roles,
|
||||||
|
default users from `preparedDatabases`). Furthermore, there are the following
|
||||||
|
exceptions:
|
||||||
|
|
||||||
|
1. Infrastructure role secrets since rotation should happen by the infrastructure.
|
||||||
|
2. Team API roles that connect via OAuth2 and JWT token (no secrets to these roles anyway).
|
||||||
|
3. Database owners since ownership on database objects can not be inherited.
|
||||||
|
4. System users such as `postgres`, `standby` and `pooler` user.
|
||||||
|
|
||||||
|
The interval of days can be set with `password_rotation_interval` (default
|
||||||
|
`90` = 90 days, minimum 1). On each rotation the user name and password values
|
||||||
|
are replaced in the K8s secret. They belong to a newly created user named after
|
||||||
|
the original role plus rotation date in YYMMDD format. All priviliges are
|
||||||
|
inherited meaning that migration scripts should still grant and revoke rights
|
||||||
|
against the original role. The timestamp of the next rotation is written to the
|
||||||
|
secret as well. Note, if the rotation interval is decreased it is reflected in
|
||||||
|
the secrets only if the next rotation date is more days away than the new
|
||||||
|
length of the interval.
|
||||||
|
|
||||||
|
Pods still using the previous secret values which they keep in memory continue
|
||||||
|
to connect to the database since the password of the corresponding user is not
|
||||||
|
replaced. However, a retention policy can be configured for users created by
|
||||||
|
the password rotation feature with `password_rotation_user_retention`. The
|
||||||
|
operator will ensure that this period is at least twice as long as the
|
||||||
|
configured rotation interval, hence the default of `180` = 180 days. When
|
||||||
|
the creation date of a rotated user is older than the retention period it
|
||||||
|
might not get removed immediately. Only on the next user rotation it is checked
|
||||||
|
if users can get removed. Therefore, you might want to configure the retention
|
||||||
|
to be a multiple of the rotation interval.
|
||||||
|
|
||||||
|
### Password rotation for single users
|
||||||
|
|
||||||
|
From the configuration, password rotation is enabled for all secrets with the
|
||||||
|
mentioned exceptions. If you wish to first test rotation for a single user (or
|
||||||
|
just have it enabled only for a few secrets) you can specify it in the cluster
|
||||||
|
manifest. The rotation and retention intervals can only be configured globally.
|
||||||
|
|
||||||
|
```
|
||||||
|
spec:
|
||||||
|
usersWithSecretRotation:
|
||||||
|
- foo_user
|
||||||
|
- bar_reader_user
|
||||||
|
```
|
||||||
|
|
||||||
|
### Password replacement without extra users
|
||||||
|
|
||||||
|
For some use cases where the secret is only used rarely - think of a `flyway`
|
||||||
|
user running a migration script on pod start - we do not need to create extra
|
||||||
|
database users but can replace only the password in the K8s secret. This type
|
||||||
|
of rotation cannot be configured globally but specified in the cluster
|
||||||
|
manifest:
|
||||||
|
|
||||||
|
```
|
||||||
|
spec:
|
||||||
|
usersWithInPlaceSecretRotation:
|
||||||
|
- flyway
|
||||||
|
- bar_owner_user
|
||||||
|
```
|
||||||
|
|
||||||
|
This would be the recommended option to enable rotation in secrets of database
|
||||||
|
owners, but only if they are not used as application users for regular read
|
||||||
|
and write operations.
|
||||||
|
|
||||||
|
### Turning off password rotation
|
||||||
|
|
||||||
|
When password rotation is turned off again the operator will check if the
|
||||||
|
`username` value in the secret matches the original username and replace it
|
||||||
|
with the latter. A new password is assigned and the `nextRotation` field is
|
||||||
|
cleared. A final lookup for child (rotation) users to be removed is done but
|
||||||
|
they will only be dropped if the retention policy allows for it. This is to
|
||||||
|
avoid sudden connection issues in pods which still use credentials of these
|
||||||
|
users in memory. You have to remove these child users manually or re-enable
|
||||||
|
password rotation with smaller interval so they get cleaned up.
|
||||||
|
|
||||||
## Use taints and tolerations for dedicated PostgreSQL nodes
|
## Use taints and tolerations for dedicated PostgreSQL nodes
|
||||||
|
|
||||||
To ensure Postgres pods are running on nodes without any other application pods,
|
To ensure Postgres pods are running on nodes without any other application pods,
|
||||||
|
|
@ -339,6 +412,81 @@ master pods from being evicted by the K8s runtime. To prevent eviction
|
||||||
completely, specify the toleration by leaving out the `tolerationSeconds` value
|
completely, specify the toleration by leaving out the `tolerationSeconds` value
|
||||||
(similar to how Kubernetes' own DaemonSets are configured)
|
(similar to how Kubernetes' own DaemonSets are configured)
|
||||||
|
|
||||||
|
## Node readiness labels
|
||||||
|
|
||||||
|
The operator can watch on certain node labels to detect e.g. the start of a
|
||||||
|
Kubernetes cluster upgrade procedure and move master pods off the nodes to be
|
||||||
|
decommissioned. Key-value pairs for these node readiness labels can be
|
||||||
|
specified in the configuration (option name is in singular form):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: postgres-operator
|
||||||
|
data:
|
||||||
|
node_readiness_label: "status1:ready,status2:ready"
|
||||||
|
```
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "acid.zalan.do/v1"
|
||||||
|
kind: OperatorConfiguration
|
||||||
|
metadata:
|
||||||
|
name: postgresql-configuration
|
||||||
|
configuration:
|
||||||
|
kubernetes:
|
||||||
|
node_readiness_label:
|
||||||
|
status1: ready
|
||||||
|
status2: ready
|
||||||
|
```
|
||||||
|
|
||||||
|
The operator will create a `nodeAffinity` on the pods. This makes the
|
||||||
|
`node_readiness_label` option the global configuration for defining node
|
||||||
|
affinities for all Postgres clusters. You can have both, cluster-specific and
|
||||||
|
global affinity, defined and they will get merged on the pods. If
|
||||||
|
`node_readiness_label_merge` is configured to `"AND"` the node readiness
|
||||||
|
affinity will end up under the same `matchExpressions` section(s) from the
|
||||||
|
manifest affinity.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: environment
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- pci
|
||||||
|
- key: status1
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- ready
|
||||||
|
- key: status2
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
If `node_readiness_label_merge` is set to `"OR"` (default) the readiness label
|
||||||
|
affinty will be appended with its own expressions block:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: environment
|
||||||
|
...
|
||||||
|
- matchExpressions:
|
||||||
|
- key: storage
|
||||||
|
...
|
||||||
|
- matchExpressions:
|
||||||
|
- key: status1
|
||||||
|
...
|
||||||
|
- key: status2
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
## Enable pod anti affinity
|
## Enable pod anti affinity
|
||||||
|
|
||||||
To ensure Postgres pods are running on different topologies, you can use
|
To ensure Postgres pods are running on different topologies, you can use
|
||||||
|
|
|
||||||
|
|
@ -115,6 +115,22 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
||||||
create the K8s secret in that namespace. The part after the first `.` is
|
create the K8s secret in that namespace. The part after the first `.` is
|
||||||
considered to be the user name. Optional.
|
considered to be the user name. Optional.
|
||||||
|
|
||||||
|
* **usersWithSecretRotation**
|
||||||
|
list of users to enable credential rotation in K8s secrets. The rotation
|
||||||
|
interval can only be configured globally. On each rotation a new user will
|
||||||
|
be added in the database replacing the `username` value in the secret of
|
||||||
|
the listed user. Although, rotation users inherit all rights from the
|
||||||
|
original role, keep in mind that ownership is not transferred. See more
|
||||||
|
details in the [administrator docs](https://github.com/zalando/postgres-operator/blob/master/docs/administrator.md#password-rotation-in-k8s-secrets).
|
||||||
|
|
||||||
|
* **usersWithInPlaceSecretRotation**
|
||||||
|
list of users to enable in-place password rotation in K8s secrets. The
|
||||||
|
rotation interval can only be configured globally. On each rotation the
|
||||||
|
password value will be replaced in the secrets which the operator reflects
|
||||||
|
in the database, too. List only users here that rarely connect to the
|
||||||
|
database, like a flyway user running a migration on Pod start. See more
|
||||||
|
details in the [administrator docs](https://github.com/zalando/postgres-operator/blob/master/docs/administrator.md#password-replacement-without-extra-users).
|
||||||
|
|
||||||
* **databases**
|
* **databases**
|
||||||
a map of database names to database owners for the databases that should be
|
a map of database names to database owners for the databases that should be
|
||||||
created by the operator. The owner users should already exist on the cluster
|
created by the operator. The owner users should already exist on the cluster
|
||||||
|
|
|
||||||
|
|
@ -75,9 +75,12 @@ Those are top-level keys, containing both leaf keys and groups.
|
||||||
The default is `true`.
|
The default is `true`.
|
||||||
|
|
||||||
* **enable_crd_validation**
|
* **enable_crd_validation**
|
||||||
toggles if the operator will create or update CRDs with
|
*deprecated*: toggles if the operator will create or update CRDs with
|
||||||
[OpenAPI v3 schema validation](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#validation)
|
[OpenAPI v3 schema validation](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#validation)
|
||||||
The default is `true`.
|
The default is `true`. `false` will be ignored, since `apiextensions.io/v1` requires a structural schema definition.
|
||||||
|
|
||||||
|
* **crd_categories**
|
||||||
|
The operator will register CRDs in the `all` category by default so that they will be returned by a `kubectl get all` call. You are free to change categories or leave them empty.
|
||||||
|
|
||||||
* **enable_lazy_spilo_upgrade**
|
* **enable_lazy_spilo_upgrade**
|
||||||
Instruct operator to update only the statefulsets with new images (Spilo and InitContainers) without immediately doing the rolling update. The assumption is pods will be re-started later with new images, for example due to the node rotation.
|
Instruct operator to update only the statefulsets with new images (Spilo and InitContainers) without immediately doing the rolling update. The assumption is pods will be re-started later with new images, for example due to the node rotation.
|
||||||
|
|
@ -174,6 +177,28 @@ under the `users` key.
|
||||||
Postgres username used for replication between instances. The default is
|
Postgres username used for replication between instances. The default is
|
||||||
`standby`.
|
`standby`.
|
||||||
|
|
||||||
|
* **enable_password_rotation**
|
||||||
|
For all `LOGIN` roles that are not database owners the operator can rotate
|
||||||
|
credentials in the corresponding K8s secrets by replacing the username and
|
||||||
|
password. This means, new users will be added on each rotation inheriting
|
||||||
|
all priviliges from the original roles. The rotation date (in YYMMDD format)
|
||||||
|
is appended to the names of the new user. The timestamp of the next rotation
|
||||||
|
is written to the secret. The default is `false`.
|
||||||
|
|
||||||
|
* **password_rotation_interval**
|
||||||
|
If password rotation is enabled (either from config or cluster manifest) the
|
||||||
|
interval can be configured with this parameter. The measure is in days which
|
||||||
|
means daily rotation (`1`) is the most frequent interval possible.
|
||||||
|
Default is `90`.
|
||||||
|
|
||||||
|
* **password_rotation_user_retention**
|
||||||
|
To avoid an ever growing amount of new users due to password rotation the
|
||||||
|
operator will remove the created users again after a certain amount of days
|
||||||
|
has passed. The number can be configured with this parameter. However, the
|
||||||
|
operator will check that the retention policy is at least twice as long as
|
||||||
|
the rotation interval and update to this minimum in case it is not.
|
||||||
|
Default is `180`.
|
||||||
|
|
||||||
## Major version upgrades
|
## Major version upgrades
|
||||||
|
|
||||||
Parameters configuring automatic major version upgrades. In a
|
Parameters configuring automatic major version upgrades. In a
|
||||||
|
|
@ -344,11 +369,16 @@ configuration they are grouped under the `kubernetes` key.
|
||||||
|
|
||||||
* **node_readiness_label**
|
* **node_readiness_label**
|
||||||
a set of labels that a running and active node should possess to be
|
a set of labels that a running and active node should possess to be
|
||||||
considered `ready`. The operator uses values of those labels to detect the
|
considered `ready`. When the set is not empty, the operator assigns the
|
||||||
start of the Kubernetes cluster upgrade procedure and move master pods off
|
`nodeAffinity` clause to the Postgres pods to be scheduled only on `ready`
|
||||||
the nodes to be decommissioned. When the set is not empty, the operator also
|
nodes. The default is empty.
|
||||||
assigns the `Affinity` clause to the Postgres pods to be scheduled only on
|
|
||||||
`ready` nodes. The default is empty.
|
* **node_readiness_label_merge**
|
||||||
|
If a `nodeAffinity` is also specified in the postgres cluster manifest
|
||||||
|
it will get merged with the `node_readiness_label` affinity on the pods.
|
||||||
|
The merge strategy can be configured - it can either be "AND" or "OR".
|
||||||
|
See [user docs](../user.md#use-taints-tolerations-and-node-affinity-for-dedicated-postgresql-nodes)
|
||||||
|
for more details. Default is "OR".
|
||||||
|
|
||||||
* **toleration**
|
* **toleration**
|
||||||
a dictionary that should contain `key`, `operator`, `value` and
|
a dictionary that should contain `key`, `operator`, `value` and
|
||||||
|
|
|
||||||
|
|
@ -671,7 +671,9 @@ configured [default requests](reference/operator_parameters.md#kubernetes-resour
|
||||||
|
|
||||||
To ensure Postgres pods are running on nodes without any other application pods,
|
To ensure Postgres pods are running on nodes without any other application pods,
|
||||||
you can use [taints and tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)
|
you can use [taints and tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)
|
||||||
and configure the required toleration in the manifest.
|
and configure the required toleration in the manifest. Tolerations can also be
|
||||||
|
defined in the [operator config](administrator.md#use-taints-and-tolerations-for-dedicated-postgresql-nodes)
|
||||||
|
to apply for all Postgres clusters.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
spec:
|
spec:
|
||||||
|
|
@ -703,6 +705,9 @@ spec:
|
||||||
- pci
|
- pci
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you need to define a `nodeAffinity` for all your Postgres clusters use the
|
||||||
|
`node_readiness_label` [configuration](administrator.md#node-readiness-labels).
|
||||||
|
|
||||||
## In-place major version upgrade
|
## In-place major version upgrade
|
||||||
|
|
||||||
Starting with Spilo 13, operator supports in-place major version upgrade to a
|
Starting with Spilo 13, operator supports in-place major version upgrade to a
|
||||||
|
|
|
||||||
|
|
@ -53,7 +53,7 @@ class K8s:
|
||||||
|
|
||||||
return master_pod_node, replica_pod_nodes
|
return master_pod_node, replica_pod_nodes
|
||||||
|
|
||||||
def get_cluster_nodes(self, cluster_labels='cluster-name=acid-minimal-cluster', namespace='default'):
|
def get_cluster_nodes(self, cluster_labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
||||||
m = []
|
m = []
|
||||||
r = []
|
r = []
|
||||||
podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels)
|
podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels)
|
||||||
|
|
@ -321,6 +321,9 @@ class K8s:
|
||||||
def get_cluster_replica_pod(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
def get_cluster_replica_pod(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
||||||
return self.get_cluster_pod('replica', labels, namespace)
|
return self.get_cluster_pod('replica', labels, namespace)
|
||||||
|
|
||||||
|
def get_secret_data(self, username, clustername='acid-minimal-cluster', namespace='default'):
|
||||||
|
return self.api.core_v1.read_namespaced_secret(
|
||||||
|
"{}.{}.credentials.postgresql.acid.zalan.do".format(username.replace("_","-"), clustername), namespace).data
|
||||||
|
|
||||||
class K8sBase:
|
class K8sBase:
|
||||||
'''
|
'''
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,9 @@ import time
|
||||||
import timeout_decorator
|
import timeout_decorator
|
||||||
import os
|
import os
|
||||||
import yaml
|
import yaml
|
||||||
|
import base64
|
||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime, date, timedelta
|
||||||
from kubernetes import client
|
from kubernetes import client
|
||||||
|
|
||||||
from tests.k8s_api import K8s
|
from tests.k8s_api import K8s
|
||||||
|
|
@ -203,7 +204,8 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
"enable_postgres_team_crd": "true",
|
"enable_postgres_team_crd": "true",
|
||||||
"enable_team_member_deprecation": "true",
|
"enable_team_member_deprecation": "true",
|
||||||
"role_deletion_suffix": "_delete_me",
|
"role_deletion_suffix": "_delete_me",
|
||||||
"resync_period": "15s"
|
"resync_period": "15s",
|
||||||
|
"repair_period": "10s",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
k8s.update_config(enable_postgres_team_crd)
|
k8s.update_config(enable_postgres_team_crd)
|
||||||
|
|
@ -286,7 +288,8 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
# revert config change
|
# revert config change
|
||||||
revert_resync = {
|
revert_resync = {
|
||||||
"data": {
|
"data": {
|
||||||
"resync_period": "30m",
|
"resync_period": "4m",
|
||||||
|
"repair_period": "1m",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
k8s.update_config(revert_resync)
|
k8s.update_config(revert_resync)
|
||||||
|
|
@ -579,6 +582,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
"Parameters": None,
|
"Parameters": None,
|
||||||
"AdminRole": "",
|
"AdminRole": "",
|
||||||
"Origin": 2,
|
"Origin": 2,
|
||||||
|
"IsDbOwner": False,
|
||||||
"Deleted": False
|
"Deleted": False
|
||||||
})
|
})
|
||||||
return True
|
return True
|
||||||
|
|
@ -600,7 +604,6 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
but lets pods run with the old image until they are recreated for
|
but lets pods run with the old image until they are recreated for
|
||||||
reasons other than operator's activity. That works because the operator
|
reasons other than operator's activity. That works because the operator
|
||||||
configures stateful sets to use "onDelete" pod update policy.
|
configures stateful sets to use "onDelete" pod update policy.
|
||||||
|
|
||||||
The test covers:
|
The test covers:
|
||||||
1) enabling lazy upgrade in existing operator deployment
|
1) enabling lazy upgrade in existing operator deployment
|
||||||
2) forcing the normal rolling upgrade by changing the operator
|
2) forcing the normal rolling upgrade by changing the operator
|
||||||
|
|
@ -695,7 +698,6 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
Ensure we can (a) create the cron job at user request for a specific PG cluster
|
Ensure we can (a) create the cron job at user request for a specific PG cluster
|
||||||
(b) update the cluster-wide image for the logical backup pod
|
(b) update the cluster-wide image for the logical backup pod
|
||||||
(c) delete the job at user request
|
(c) delete the job at user request
|
||||||
|
|
||||||
Limitations:
|
Limitations:
|
||||||
(a) Does not run the actual batch job because there is no S3 mock to upload backups to
|
(a) Does not run the actual batch job because there is no S3 mock to upload backups to
|
||||||
(b) Assumes 'acid-minimal-cluster' exists as defined in setUp
|
(b) Assumes 'acid-minimal-cluster' exists as defined in setUp
|
||||||
|
|
@ -880,12 +882,10 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
|
|
||||||
# verify we are in good state from potential previous tests
|
# verify we are in good state from potential previous tests
|
||||||
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
|
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
|
||||||
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running")
|
|
||||||
|
|
||||||
# get nodes of master and replica(s)
|
# get nodes of master and replica(s)
|
||||||
master_node, replica_nodes = k8s.get_pg_nodes(cluster_label)
|
master_nodes, replica_nodes = k8s.get_cluster_nodes()
|
||||||
|
self.assertNotEqual(master_nodes, [])
|
||||||
self.assertNotEqual(master_node, [])
|
|
||||||
self.assertNotEqual(replica_nodes, [])
|
self.assertNotEqual(replica_nodes, [])
|
||||||
|
|
||||||
# label node with environment=postgres
|
# label node with environment=postgres
|
||||||
|
|
@ -898,8 +898,8 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# patch current master node with the label
|
# patch master node with the label
|
||||||
k8s.api.core_v1.patch_node(master_node, node_label_body)
|
k8s.api.core_v1.patch_node(master_nodes[0], node_label_body)
|
||||||
|
|
||||||
# add node affinity to cluster
|
# add node affinity to cluster
|
||||||
patch_node_affinity_config = {
|
patch_node_affinity_config = {
|
||||||
|
|
@ -923,7 +923,6 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
group="acid.zalan.do",
|
group="acid.zalan.do",
|
||||||
version="v1",
|
version="v1",
|
||||||
|
|
@ -934,14 +933,17 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
# node affinity change should cause replica to relocate from replica node to master node due to node affinity requirement
|
# node affinity change should cause replica to relocate from replica node to master node due to node affinity requirement
|
||||||
k8s.wait_for_pod_failover(master_node, 'spilo-role=replica,' + cluster_label)
|
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
|
||||||
|
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||||
|
# next master will be switched over and pod needs to be replaced as well to finish the rolling update
|
||||||
|
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label)
|
||||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||||
|
|
||||||
podsList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label)
|
podsList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label)
|
||||||
for pod in podsList.items:
|
for pod in podsList.items:
|
||||||
if pod.metadata.labels.get('spilo-role') == 'replica':
|
if pod.metadata.labels.get('spilo-role') == 'replica':
|
||||||
self.assertEqual(master_node, pod.spec.node_name,
|
self.assertEqual(master_nodes[0], pod.spec.node_name,
|
||||||
"Sanity check: expected replica to relocate to master node {}, but found on {}".format(master_node, pod.spec.node_name))
|
"Sanity check: expected replica to relocate to master node {}, but found on {}".format(master_nodes[0], pod.spec.node_name))
|
||||||
|
|
||||||
# check that pod has correct node affinity
|
# check that pod has correct node affinity
|
||||||
key = pod.spec.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms[0].match_expressions[0].key
|
key = pod.spec.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms[0].match_expressions[0].key
|
||||||
|
|
@ -966,15 +968,17 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
# node affinity change should cause another rolling update and relocation of replica
|
# node affinity change should cause another rolling update and relocation of replica
|
||||||
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
|
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=replica,' + cluster_label)
|
||||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||||
|
|
||||||
except timeout_decorator.TimeoutError:
|
except timeout_decorator.TimeoutError:
|
||||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
# toggle pod anti affinity to make sure replica and master run on separate nodes
|
||||||
|
self.assert_distributed_pods(replica_nodes)
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
@unittest.skip("Skipping this test until fixed")
|
|
||||||
def test_node_readiness_label(self):
|
def test_node_readiness_label(self):
|
||||||
'''
|
'''
|
||||||
Remove node readiness label from master node. This must cause a failover.
|
Remove node readiness label from master node. This must cause a failover.
|
||||||
|
|
@ -984,12 +988,15 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
readiness_label = 'lifecycle-status'
|
readiness_label = 'lifecycle-status'
|
||||||
readiness_value = 'ready'
|
readiness_value = 'ready'
|
||||||
|
|
||||||
try:
|
# verify we are in good state from potential previous tests
|
||||||
# get nodes of master and replica(s) (expected target of new master)
|
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
|
||||||
current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label)
|
|
||||||
num_replicas = len(current_replica_nodes)
|
|
||||||
failover_targets = self.get_failover_targets(current_master_node, current_replica_nodes)
|
|
||||||
|
|
||||||
|
# get nodes of master and replica(s) (expected target of new master)
|
||||||
|
master_nodes, replica_nodes = k8s.get_cluster_nodes()
|
||||||
|
self.assertNotEqual(master_nodes, [])
|
||||||
|
self.assertNotEqual(replica_nodes, [])
|
||||||
|
|
||||||
|
try:
|
||||||
# add node_readiness_label to potential failover nodes
|
# add node_readiness_label to potential failover nodes
|
||||||
patch_readiness_label = {
|
patch_readiness_label = {
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
|
@ -998,30 +1005,43 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.assertTrue(len(failover_targets) > 0, "No failover targets available")
|
for replica_node in replica_nodes:
|
||||||
for failover_target in failover_targets:
|
k8s.api.core_v1.patch_node(replica_node, patch_readiness_label)
|
||||||
k8s.api.core_v1.patch_node(failover_target, patch_readiness_label)
|
|
||||||
|
|
||||||
# define node_readiness_label in config map which should trigger a failover of the master
|
# define node_readiness_label in config map which should trigger a rolling update
|
||||||
patch_readiness_label_config = {
|
patch_readiness_label_config = {
|
||||||
"data": {
|
"data": {
|
||||||
"node_readiness_label": readiness_label + ':' + readiness_value,
|
"node_readiness_label": readiness_label + ':' + readiness_value,
|
||||||
|
"node_readiness_label_merge": "AND",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
k8s.update_config(patch_readiness_label_config, "setting readiness label")
|
k8s.update_config(patch_readiness_label_config, "setting readiness label")
|
||||||
new_master_node, new_replica_nodes = self.assert_failover(
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
current_master_node, num_replicas, failover_targets, cluster_label)
|
|
||||||
|
# first replica will be replaced and get the new affinity
|
||||||
|
# however, it might not start due to a volume node affinity conflict
|
||||||
|
# in this case only if the pvc and pod are deleted it can be scheduled
|
||||||
|
replica = k8s.get_cluster_replica_pod()
|
||||||
|
if replica.status.phase == 'Pending':
|
||||||
|
k8s.api.core_v1.delete_namespaced_persistent_volume_claim('pgdata-' + replica.metadata.name, 'default')
|
||||||
|
k8s.api.core_v1.delete_namespaced_pod(replica.metadata.name, 'default')
|
||||||
|
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||||
|
|
||||||
|
# next master will be switched over and pod needs to be replaced as well to finish the rolling update
|
||||||
|
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
|
||||||
|
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||||
|
|
||||||
# patch also node where master ran before
|
# patch also node where master ran before
|
||||||
k8s.api.core_v1.patch_node(current_master_node, patch_readiness_label)
|
k8s.api.core_v1.patch_node(master_nodes[0], patch_readiness_label)
|
||||||
|
|
||||||
# toggle pod anti affinity to move replica away from master node
|
|
||||||
self.eventuallyTrue(lambda: self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label), "Pods are redistributed")
|
|
||||||
|
|
||||||
except timeout_decorator.TimeoutError:
|
except timeout_decorator.TimeoutError:
|
||||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
# toggle pod anti affinity to move replica away from master node
|
||||||
|
self.assert_distributed_pods(master_nodes)
|
||||||
|
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_overwrite_pooler_deployment(self):
|
def test_overwrite_pooler_deployment(self):
|
||||||
k8s = self.k8s
|
k8s = self.k8s
|
||||||
|
|
@ -1056,6 +1076,122 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"),
|
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"),
|
||||||
0, "Pooler pods not scaled down")
|
0, "Pooler pods not scaled down")
|
||||||
|
|
||||||
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
|
def test_password_rotation(self):
|
||||||
|
'''
|
||||||
|
Test password rotation and removal of users due to retention policy
|
||||||
|
'''
|
||||||
|
k8s = self.k8s
|
||||||
|
leader = k8s.get_cluster_leader_pod()
|
||||||
|
today = date.today()
|
||||||
|
|
||||||
|
# enable password rotation for owner of foo database
|
||||||
|
pg_patch_inplace_rotation_for_owner = {
|
||||||
|
"spec": {
|
||||||
|
"usersWithInPlaceSecretRotation": [
|
||||||
|
"zalando"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_inplace_rotation_for_owner)
|
||||||
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
|
# check if next rotation date was set in secret
|
||||||
|
secret_data = k8s.get_secret_data("zalando")
|
||||||
|
next_rotation_timestamp = datetime.fromisoformat(str(base64.b64decode(secret_data["nextRotation"]), 'utf-8'))
|
||||||
|
today90days = today+timedelta(days=90)
|
||||||
|
self.assertEqual(today90days, next_rotation_timestamp.date(),
|
||||||
|
"Unexpected rotation date in secret of zalando user: expected {}, got {}".format(today90days, next_rotation_timestamp.date()))
|
||||||
|
|
||||||
|
# create fake rotation users that should be removed by operator
|
||||||
|
# but have one that would still fit into the retention period
|
||||||
|
create_fake_rotation_user = """
|
||||||
|
CREATE ROLE foo_user201031 IN ROLE foo_user;
|
||||||
|
CREATE ROLE foo_user211031 IN ROLE foo_user;
|
||||||
|
CREATE ROLE foo_user"""+(today-timedelta(days=40)).strftime("%y%m%d")+""" IN ROLE foo_user;
|
||||||
|
"""
|
||||||
|
self.query_database(leader.metadata.name, "postgres", create_fake_rotation_user)
|
||||||
|
|
||||||
|
# patch foo_user secret with outdated rotation date
|
||||||
|
fake_rotation_date = today.isoformat() + ' 00:00:00'
|
||||||
|
fake_rotation_date_encoded = base64.b64encode(fake_rotation_date.encode('utf-8'))
|
||||||
|
secret_fake_rotation = {
|
||||||
|
"data": {
|
||||||
|
"nextRotation": str(fake_rotation_date_encoded, 'utf-8'),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
k8s.api.core_v1.patch_namespaced_secret(
|
||||||
|
name="foo-user.acid-minimal-cluster.credentials.postgresql.acid.zalan.do",
|
||||||
|
namespace="default",
|
||||||
|
body=secret_fake_rotation)
|
||||||
|
|
||||||
|
# enable password rotation for all other users (foo_user)
|
||||||
|
# this will force a sync of secrets for further assertions
|
||||||
|
enable_password_rotation = {
|
||||||
|
"data": {
|
||||||
|
"enable_password_rotation": "true",
|
||||||
|
"password_rotation_interval": "30",
|
||||||
|
"password_rotation_user_retention": "30", # should be set to 60
|
||||||
|
},
|
||||||
|
}
|
||||||
|
k8s.update_config(enable_password_rotation)
|
||||||
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
||||||
|
"Operator does not get in sync")
|
||||||
|
|
||||||
|
# check if next rotation date and username have been replaced
|
||||||
|
secret_data = k8s.get_secret_data("foo_user")
|
||||||
|
secret_username = str(base64.b64decode(secret_data["username"]), 'utf-8')
|
||||||
|
next_rotation_timestamp = datetime.fromisoformat(str(base64.b64decode(secret_data["nextRotation"]), 'utf-8'))
|
||||||
|
rotation_user = "foo_user"+today.strftime("%y%m%d")
|
||||||
|
today30days = today+timedelta(days=30)
|
||||||
|
|
||||||
|
self.assertEqual(rotation_user, secret_username,
|
||||||
|
"Unexpected username in secret of foo_user: expected {}, got {}".format(rotation_user, secret_username))
|
||||||
|
self.assertEqual(today30days, next_rotation_timestamp.date(),
|
||||||
|
"Unexpected rotation date in secret of foo_user: expected {}, got {}".format(today30days, next_rotation_timestamp.date()))
|
||||||
|
|
||||||
|
# check if oldest fake rotation users were deleted
|
||||||
|
# there should only be foo_user, foo_user+today and foo_user+today-40days
|
||||||
|
user_query = """
|
||||||
|
SELECT rolname
|
||||||
|
FROM pg_catalog.pg_roles
|
||||||
|
WHERE rolname LIKE 'foo_user%';
|
||||||
|
"""
|
||||||
|
self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", user_query)), 3,
|
||||||
|
"Found incorrect number of rotation users", 10, 5)
|
||||||
|
|
||||||
|
# disable password rotation for all other users (foo_user)
|
||||||
|
# and pick smaller intervals to see if the third fake rotation user is dropped
|
||||||
|
enable_password_rotation = {
|
||||||
|
"data": {
|
||||||
|
"enable_password_rotation": "false",
|
||||||
|
"password_rotation_interval": "15",
|
||||||
|
"password_rotation_user_retention": "30", # 2 * rotation interval
|
||||||
|
},
|
||||||
|
}
|
||||||
|
k8s.update_config(enable_password_rotation)
|
||||||
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
||||||
|
"Operator does not get in sync")
|
||||||
|
|
||||||
|
# check if username in foo_user secret is reset
|
||||||
|
secret_data = k8s.get_secret_data("foo_user")
|
||||||
|
secret_username = str(base64.b64decode(secret_data["username"]), 'utf-8')
|
||||||
|
next_rotation_timestamp = str(base64.b64decode(secret_data["nextRotation"]), 'utf-8')
|
||||||
|
self.assertEqual("foo_user", secret_username,
|
||||||
|
"Unexpected username in secret of foo_user: expected {}, got {}".format("foo_user", secret_username))
|
||||||
|
self.assertEqual('', next_rotation_timestamp,
|
||||||
|
"Unexpected rotation date in secret of foo_user: expected empty string, got {}".format(next_rotation_timestamp))
|
||||||
|
|
||||||
|
# check roles again, there should only be foo_user and foo_user+today
|
||||||
|
user_query = """
|
||||||
|
SELECT rolname
|
||||||
|
FROM pg_catalog.pg_roles
|
||||||
|
WHERE rolname LIKE 'foo_user%';
|
||||||
|
"""
|
||||||
|
self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", user_query)), 2,
|
||||||
|
"Found incorrect number of rotation users", 10, 5)
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_patroni_config_update(self):
|
def test_patroni_config_update(self):
|
||||||
'''
|
'''
|
||||||
|
|
@ -1269,6 +1405,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
"data": {
|
"data": {
|
||||||
"pod_label_wait_timeout": "2s",
|
"pod_label_wait_timeout": "2s",
|
||||||
"resync_period": "30s",
|
"resync_period": "30s",
|
||||||
|
"repair_period": "10s",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1309,7 +1446,8 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
patch_resync_config = {
|
patch_resync_config = {
|
||||||
"data": {
|
"data": {
|
||||||
"pod_label_wait_timeout": "10m",
|
"pod_label_wait_timeout": "10m",
|
||||||
"resync_period": "30m",
|
"resync_period": "4m",
|
||||||
|
"repair_period": "2m",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
k8s.update_config(patch_resync_config, "revert resync interval and pod_label_wait_timeout")
|
k8s.update_config(patch_resync_config, "revert resync interval and pod_label_wait_timeout")
|
||||||
|
|
@ -1413,7 +1551,6 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing")
|
self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing")
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
@unittest.skip("Skipping this test until fixed")
|
|
||||||
def test_taint_based_eviction(self):
|
def test_taint_based_eviction(self):
|
||||||
'''
|
'''
|
||||||
Add taint "postgres=:NoExecute" to node with master. This must cause a failover.
|
Add taint "postgres=:NoExecute" to node with master. This must cause a failover.
|
||||||
|
|
@ -1427,7 +1564,6 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
|
|
||||||
# get nodes of master and replica(s) (expected target of new master)
|
# get nodes of master and replica(s) (expected target of new master)
|
||||||
master_nodes, replica_nodes = k8s.get_cluster_nodes()
|
master_nodes, replica_nodes = k8s.get_cluster_nodes()
|
||||||
|
|
||||||
self.assertNotEqual(master_nodes, [])
|
self.assertNotEqual(master_nodes, [])
|
||||||
self.assertNotEqual(replica_nodes, [])
|
self.assertNotEqual(replica_nodes, [])
|
||||||
|
|
||||||
|
|
@ -1442,10 +1578,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
k8s.api.core_v1.patch_node(master_nodes[0], body)
|
k8s.api.core_v1.patch_node(master_nodes[0], body)
|
||||||
self.eventuallyTrue(lambda: k8s.get_cluster_nodes()[0], replica_nodes)
|
|
||||||
self.assertNotEqual(lambda: k8s.get_cluster_nodes()[0], master_nodes)
|
|
||||||
|
|
||||||
# add toleration to pods
|
# add toleration to pods
|
||||||
patch_toleration_config = {
|
patch_toleration_config = {
|
||||||
|
|
@ -1454,15 +1587,20 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
k8s.update_config(patch_toleration_config, step="allow tainted nodes")
|
k8s.update_config(patch_toleration_config, step="allow tainted nodes")
|
||||||
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
||||||
|
"Operator does not get in sync")
|
||||||
|
|
||||||
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
|
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
|
||||||
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running")
|
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running")
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
# toggle pod anti affinity to move replica away from master node
|
# toggle pod anti affinity to move replica away from master node
|
||||||
nm, new_replica_nodes = k8s.get_cluster_nodes()
|
self.assert_distributed_pods(master_nodes)
|
||||||
new_master_node = nm[0]
|
|
||||||
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
|
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_zz_cluster_deletion(self):
|
def test_zz_cluster_deletion(self):
|
||||||
|
|
@ -1549,39 +1687,6 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
k8s.update_config(patch_delete_annotations)
|
k8s.update_config(patch_delete_annotations)
|
||||||
|
|
||||||
def get_failover_targets(self, master_node, replica_nodes):
|
|
||||||
'''
|
|
||||||
If all pods live on the same node, failover will happen to other worker(s)
|
|
||||||
'''
|
|
||||||
k8s = self.k8s
|
|
||||||
k8s_master_exclusion = 'kubernetes.io/hostname!=postgres-operator-e2e-tests-control-plane'
|
|
||||||
|
|
||||||
failover_targets = [x for x in replica_nodes if x != master_node]
|
|
||||||
if len(failover_targets) == 0:
|
|
||||||
nodes = k8s.api.core_v1.list_node(label_selector=k8s_master_exclusion)
|
|
||||||
for n in nodes.items:
|
|
||||||
if n.metadata.name != master_node:
|
|
||||||
failover_targets.append(n.metadata.name)
|
|
||||||
|
|
||||||
return failover_targets
|
|
||||||
|
|
||||||
def assert_failover(self, current_master_node, num_replicas, failover_targets, cluster_label):
|
|
||||||
'''
|
|
||||||
Check if master is failing over. The replica should move first to be the switchover target
|
|
||||||
'''
|
|
||||||
k8s = self.k8s
|
|
||||||
k8s.wait_for_pod_failover(failover_targets, 'spilo-role=master,' + cluster_label)
|
|
||||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
|
||||||
|
|
||||||
new_master_node, new_replica_nodes = k8s.get_pg_nodes(cluster_label)
|
|
||||||
self.assertNotEqual(current_master_node, new_master_node,
|
|
||||||
"Master on {} did not fail over to one of {}".format(current_master_node, failover_targets))
|
|
||||||
self.assertEqual(num_replicas, len(new_replica_nodes),
|
|
||||||
"Expected {} replicas, found {}".format(num_replicas, len(new_replica_nodes)))
|
|
||||||
self.assert_master_is_unique()
|
|
||||||
|
|
||||||
return new_master_node, new_replica_nodes
|
|
||||||
|
|
||||||
def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"):
|
def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"):
|
||||||
'''
|
'''
|
||||||
Check that there is a single pod in the k8s cluster with the label "spilo-role=master"
|
Check that there is a single pod in the k8s cluster with the label "spilo-role=master"
|
||||||
|
|
@ -1593,14 +1698,23 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
num_of_master_pods = k8s.count_pods_with_label(labels, namespace)
|
num_of_master_pods = k8s.count_pods_with_label(labels, namespace)
|
||||||
self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods))
|
self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods))
|
||||||
|
|
||||||
def assert_distributed_pods(self, master_node, replica_nodes, cluster_label):
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
|
def assert_distributed_pods(self, target_nodes, cluster_labels='cluster-name=acid-minimal-cluster'):
|
||||||
'''
|
'''
|
||||||
Other tests can lead to the situation that master and replica are on the same node.
|
Other tests can lead to the situation that master and replica are on the same node.
|
||||||
Toggle pod anti affinty to distribute pods accross nodes (replica in particular).
|
Toggle pod anti affinty to distribute pods accross nodes (replica in particular).
|
||||||
'''
|
'''
|
||||||
k8s = self.k8s
|
k8s = self.k8s
|
||||||
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
cluster_labels = 'application=spilo,cluster-name=acid-minimal-cluster'
|
||||||
failover_targets = self.get_failover_targets(master_node, replica_nodes)
|
|
||||||
|
# get nodes of master and replica(s)
|
||||||
|
master_nodes, replica_nodes = k8s.get_cluster_nodes()
|
||||||
|
self.assertNotEqual(master_nodes, [])
|
||||||
|
self.assertNotEqual(replica_nodes, [])
|
||||||
|
|
||||||
|
# if nodes are different we can quit here
|
||||||
|
if master_nodes[0] not in replica_nodes:
|
||||||
|
return True
|
||||||
|
|
||||||
# enable pod anti affintiy in config map which should trigger movement of replica
|
# enable pod anti affintiy in config map which should trigger movement of replica
|
||||||
patch_enable_antiaffinity = {
|
patch_enable_antiaffinity = {
|
||||||
|
|
@ -1608,8 +1722,13 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
"enable_pod_antiaffinity": "true"
|
"enable_pod_antiaffinity": "true"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
k8s.update_config(patch_enable_antiaffinity, "enable antiaffinity")
|
k8s.update_config(patch_enable_antiaffinity, "enable antiaffinity")
|
||||||
self.assert_failover(master_node, len(replica_nodes), failover_targets, cluster_label)
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
|
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_labels)
|
||||||
|
k8s.wait_for_running_pods(cluster_labels, 2)
|
||||||
|
|
||||||
# now disable pod anti affintiy again which will cause yet another failover
|
# now disable pod anti affintiy again which will cause yet another failover
|
||||||
patch_disable_antiaffinity = {
|
patch_disable_antiaffinity = {
|
||||||
|
|
@ -1618,8 +1737,25 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
k8s.update_config(patch_disable_antiaffinity, "disable antiaffinity")
|
k8s.update_config(patch_disable_antiaffinity, "disable antiaffinity")
|
||||||
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
|
||||||
|
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_labels)
|
||||||
|
k8s.wait_for_running_pods(cluster_labels, 2)
|
||||||
|
|
||||||
|
master_nodes, replica_nodes = k8s.get_cluster_nodes()
|
||||||
|
self.assertNotEqual(master_nodes, [])
|
||||||
|
self.assertNotEqual(replica_nodes, [])
|
||||||
|
|
||||||
|
# if nodes are different we can quit here
|
||||||
|
for target_node in target_nodes:
|
||||||
|
if (target_node not in master_nodes or target_node not in replica_nodes) and master_nodes[0] in replica_nodes:
|
||||||
|
print('Pods run on the same node')
|
||||||
|
return False
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def list_databases(self, pod_name):
|
def list_databases(self, pod_name):
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,9 @@ spec:
|
||||||
zalando:
|
zalando:
|
||||||
- superuser
|
- superuser
|
||||||
- createdb
|
- createdb
|
||||||
|
foo_user: []
|
||||||
|
# usersWithSecretRotation: "foo_user"
|
||||||
|
# usersWithInPlaceSecretRotation: "flyway,bar_owner_user"
|
||||||
enableMasterLoadBalancer: false
|
enableMasterLoadBalancer: false
|
||||||
enableReplicaLoadBalancer: false
|
enableReplicaLoadBalancer: false
|
||||||
enableConnectionPooler: false # enable/disable connection pooler deployment
|
enableConnectionPooler: false # enable/disable connection pooler deployment
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,7 @@ data:
|
||||||
# connection_pooler_number_of_instances: 2
|
# connection_pooler_number_of_instances: 2
|
||||||
# connection_pooler_schema: "pooler"
|
# connection_pooler_schema: "pooler"
|
||||||
# connection_pooler_user: "pooler"
|
# connection_pooler_user: "pooler"
|
||||||
|
crd_categories: "all"
|
||||||
# custom_service_annotations: "keyx:valuez,keya:valuea"
|
# custom_service_annotations: "keyx:valuez,keya:valuea"
|
||||||
# custom_pod_annotations: "keya:valuea,keyb:valueb"
|
# custom_pod_annotations: "keya:valuea,keyb:valueb"
|
||||||
db_hosted_zone: db.example.com
|
db_hosted_zone: db.example.com
|
||||||
|
|
@ -36,7 +37,6 @@ data:
|
||||||
# downscaler_annotations: "deployment-time,downscaler/*"
|
# downscaler_annotations: "deployment-time,downscaler/*"
|
||||||
# enable_admin_role_for_users: "true"
|
# enable_admin_role_for_users: "true"
|
||||||
# enable_crd_registration: "true"
|
# enable_crd_registration: "true"
|
||||||
# enable_crd_validation: "true"
|
|
||||||
# enable_cross_namespace_secret: "false"
|
# enable_cross_namespace_secret: "false"
|
||||||
# enable_database_access: "true"
|
# enable_database_access: "true"
|
||||||
enable_ebs_gp3_migration: "false"
|
enable_ebs_gp3_migration: "false"
|
||||||
|
|
@ -44,6 +44,7 @@ data:
|
||||||
# enable_init_containers: "true"
|
# enable_init_containers: "true"
|
||||||
# enable_lazy_spilo_upgrade: "false"
|
# enable_lazy_spilo_upgrade: "false"
|
||||||
enable_master_load_balancer: "false"
|
enable_master_load_balancer: "false"
|
||||||
|
enable_password_rotation: "false"
|
||||||
enable_pgversion_env_var: "true"
|
enable_pgversion_env_var: "true"
|
||||||
# enable_pod_antiaffinity: "false"
|
# enable_pod_antiaffinity: "false"
|
||||||
# enable_pod_disruption_budget: "true"
|
# enable_pod_disruption_budget: "true"
|
||||||
|
|
@ -86,11 +87,14 @@ data:
|
||||||
# min_cpu_limit: 250m
|
# min_cpu_limit: 250m
|
||||||
# min_memory_limit: 250Mi
|
# min_memory_limit: 250Mi
|
||||||
# minimal_major_version: "9.6"
|
# minimal_major_version: "9.6"
|
||||||
# node_readiness_label: ""
|
# node_readiness_label: "status:ready"
|
||||||
|
# node_readiness_label_merge: "OR"
|
||||||
# oauth_token_secret_name: postgresql-operator
|
# oauth_token_secret_name: postgresql-operator
|
||||||
# pam_configuration: |
|
# pam_configuration: |
|
||||||
# https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees
|
# https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees
|
||||||
# pam_role_name: zalandos
|
# pam_role_name: zalandos
|
||||||
|
# password_rotation_interval: "90"
|
||||||
|
# password_rotation_user_retention: "180"
|
||||||
pdb_name_format: "postgres-{cluster}-pdb"
|
pdb_name_format: "postgres-{cluster}-pdb"
|
||||||
# pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
# pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
pod_deletion_wait_timeout: 10m
|
pod_deletion_wait_timeout: 10m
|
||||||
|
|
|
||||||
|
|
@ -59,6 +59,11 @@ spec:
|
||||||
configuration:
|
configuration:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
crd_categories:
|
||||||
|
type: array
|
||||||
|
nullable: true
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
docker_image:
|
docker_image:
|
||||||
type: string
|
type: string
|
||||||
default: "registry.opensource.zalan.do/acid/spilo-14:2.1-p3"
|
default: "registry.opensource.zalan.do/acid/spilo-14:2.1-p3"
|
||||||
|
|
@ -67,6 +72,7 @@ spec:
|
||||||
default: true
|
default: true
|
||||||
enable_crd_validation:
|
enable_crd_validation:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
description: deprecated
|
||||||
default: true
|
default: true
|
||||||
enable_lazy_spilo_upgrade:
|
enable_lazy_spilo_upgrade:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
|
@ -88,11 +94,13 @@ spec:
|
||||||
default: false
|
default: false
|
||||||
max_instances:
|
max_instances:
|
||||||
type: integer
|
type: integer
|
||||||
minimum: -1 # -1 = disabled
|
description: "-1 = disabled"
|
||||||
|
minimum: -1
|
||||||
default: -1
|
default: -1
|
||||||
min_instances:
|
min_instances:
|
||||||
type: integer
|
type: integer
|
||||||
minimum: -1 # -1 = disabled
|
description: "-1 = disabled"
|
||||||
|
minimum: -1
|
||||||
default: -1
|
default: -1
|
||||||
resync_period:
|
resync_period:
|
||||||
type: string
|
type: string
|
||||||
|
|
@ -120,6 +128,15 @@ spec:
|
||||||
users:
|
users:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
enable_password_rotation:
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
password_rotation_interval:
|
||||||
|
type: integer
|
||||||
|
default: 90
|
||||||
|
password_rotation_user_retention:
|
||||||
|
type: integer
|
||||||
|
default: 180
|
||||||
replication_username:
|
replication_username:
|
||||||
type: string
|
type: string
|
||||||
default: standby
|
default: standby
|
||||||
|
|
@ -173,6 +190,9 @@ spec:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
|
enable_cross_namespace_secret:
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
enable_init_containers:
|
enable_init_containers:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: true
|
default: true
|
||||||
|
|
@ -228,6 +248,11 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
|
node_readiness_label_merge:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- "AND"
|
||||||
|
- "OR"
|
||||||
oauth_token_secret_name:
|
oauth_token_secret_name:
|
||||||
type: string
|
type: string
|
||||||
default: "postgresql-operator"
|
default: "postgresql-operator"
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,8 @@ metadata:
|
||||||
configuration:
|
configuration:
|
||||||
docker_image: registry.opensource.zalan.do/acid/spilo-14:2.1-p3
|
docker_image: registry.opensource.zalan.do/acid/spilo-14:2.1-p3
|
||||||
# enable_crd_registration: true
|
# enable_crd_registration: true
|
||||||
# enable_crd_validation: true
|
# crd_categories:
|
||||||
|
# - all
|
||||||
# enable_lazy_spilo_upgrade: false
|
# enable_lazy_spilo_upgrade: false
|
||||||
enable_pgversion_env_var: true
|
enable_pgversion_env_var: true
|
||||||
# enable_shm_volume: true
|
# enable_shm_volume: true
|
||||||
|
|
@ -25,6 +26,9 @@ configuration:
|
||||||
# protocol: TCP
|
# protocol: TCP
|
||||||
workers: 8
|
workers: 8
|
||||||
users:
|
users:
|
||||||
|
enable_password_rotation: false
|
||||||
|
password_rotation_interval: 90
|
||||||
|
password_rotation_user_retention: 180
|
||||||
replication_username: standby
|
replication_username: standby
|
||||||
super_username: postgres
|
super_username: postgres
|
||||||
major_version_upgrade:
|
major_version_upgrade:
|
||||||
|
|
@ -70,6 +74,7 @@ configuration:
|
||||||
master_pod_move_timeout: 20m
|
master_pod_move_timeout: 20m
|
||||||
# node_readiness_label:
|
# node_readiness_label:
|
||||||
# status: ready
|
# status: ready
|
||||||
|
# node_readiness_label_merge: "OR"
|
||||||
oauth_token_secret_name: postgresql-operator
|
oauth_token_secret_name: postgresql-operator
|
||||||
pdb_name_format: "postgres-{cluster}-pdb"
|
pdb_name_format: "postgres-{cluster}-pdb"
|
||||||
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
|
|
|
||||||
|
|
@ -145,7 +145,7 @@ spec:
|
||||||
- "transaction"
|
- "transaction"
|
||||||
numberOfInstances:
|
numberOfInstances:
|
||||||
type: integer
|
type: integer
|
||||||
minimum: 2
|
minimum: 1
|
||||||
resources:
|
resources:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
|
|
@ -199,8 +199,9 @@ spec:
|
||||||
type: boolean
|
type: boolean
|
||||||
enableShmVolume:
|
enableShmVolume:
|
||||||
type: boolean
|
type: boolean
|
||||||
init_containers: # deprecated
|
init_containers:
|
||||||
type: array
|
type: array
|
||||||
|
description: deprecated
|
||||||
nullable: true
|
nullable: true
|
||||||
items:
|
items:
|
||||||
type: object
|
type: object
|
||||||
|
|
@ -227,8 +228,8 @@ spec:
|
||||||
items:
|
items:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
- weight
|
|
||||||
- preference
|
- preference
|
||||||
|
- weight
|
||||||
properties:
|
properties:
|
||||||
preference:
|
preference:
|
||||||
type: object
|
type: object
|
||||||
|
|
@ -346,8 +347,9 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
pod_priority_class_name: # deprecated
|
pod_priority_class_name:
|
||||||
type: string
|
type: string
|
||||||
|
description: deprecated
|
||||||
podPriorityClassName:
|
podPriorityClassName:
|
||||||
type: string
|
type: string
|
||||||
postgresql:
|
postgresql:
|
||||||
|
|
@ -391,8 +393,9 @@ spec:
|
||||||
type: boolean
|
type: boolean
|
||||||
secretNamespace:
|
secretNamespace:
|
||||||
type: string
|
type: string
|
||||||
replicaLoadBalancer: # deprecated
|
replicaLoadBalancer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
description: deprecated
|
||||||
resources:
|
resources:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
|
|
@ -543,14 +546,14 @@ spec:
|
||||||
- PreferNoSchedule
|
- PreferNoSchedule
|
||||||
tolerationSeconds:
|
tolerationSeconds:
|
||||||
type: integer
|
type: integer
|
||||||
useLoadBalancer: # deprecated
|
useLoadBalancer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
description: deprecated
|
||||||
users:
|
users:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: array
|
type: array
|
||||||
nullable: true
|
nullable: true
|
||||||
description: "Role flags specified here must not contradict each other"
|
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
|
|
@ -582,6 +585,16 @@ spec:
|
||||||
- SUPERUSER
|
- SUPERUSER
|
||||||
- nosuperuser
|
- nosuperuser
|
||||||
- NOSUPERUSER
|
- NOSUPERUSER
|
||||||
|
usersWithPasswordRotation:
|
||||||
|
type: array
|
||||||
|
nullable: true
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
usersWithInPlacePasswordRotation:
|
||||||
|
type: array
|
||||||
|
nullable: true
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
volume:
|
volume:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,8 @@
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do"
|
acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do"
|
||||||
"github.com/zalando/postgres-operator/pkg/util"
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
|
|
@ -11,11 +13,13 @@ import (
|
||||||
const (
|
const (
|
||||||
PostgresCRDResourceKind = "postgresql"
|
PostgresCRDResourceKind = "postgresql"
|
||||||
PostgresCRDResourcePlural = "postgresqls"
|
PostgresCRDResourcePlural = "postgresqls"
|
||||||
|
PostgresCRDResourceList = PostgresCRDResourceKind + "List"
|
||||||
PostgresCRDResouceName = PostgresCRDResourcePlural + "." + acidzalando.GroupName
|
PostgresCRDResouceName = PostgresCRDResourcePlural + "." + acidzalando.GroupName
|
||||||
PostgresCRDResourceShort = "pg"
|
PostgresCRDResourceShort = "pg"
|
||||||
|
|
||||||
OperatorConfigCRDResouceKind = "OperatorConfiguration"
|
OperatorConfigCRDResouceKind = "OperatorConfiguration"
|
||||||
OperatorConfigCRDResourcePlural = "operatorconfigurations"
|
OperatorConfigCRDResourcePlural = "operatorconfigurations"
|
||||||
|
OperatorConfigCRDResourceList = OperatorConfigCRDResouceKind + "List"
|
||||||
OperatorConfigCRDResourceName = OperatorConfigCRDResourcePlural + "." + acidzalando.GroupName
|
OperatorConfigCRDResourceName = OperatorConfigCRDResourcePlural + "." + acidzalando.GroupName
|
||||||
OperatorConfigCRDResourceShort = "opconfig"
|
OperatorConfigCRDResourceShort = "opconfig"
|
||||||
)
|
)
|
||||||
|
|
@ -149,6 +153,7 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
"targetContainers": {
|
"targetContainers": {
|
||||||
Type: "array",
|
Type: "array",
|
||||||
|
Nullable: true,
|
||||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
Schema: &apiextv1.JSONSchemaProps{
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
Type: "string",
|
Type: "string",
|
||||||
|
|
@ -200,7 +205,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
"timestamp": {
|
"timestamp": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Description: "Date-time format that specifies a timezone as an offset relative to UTC e.g. 1996-12-19T16:39:57-08:00",
|
|
||||||
Pattern: "^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\\.[0-9]+)?(([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$",
|
Pattern: "^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\\.[0-9]+)?(([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$",
|
||||||
},
|
},
|
||||||
"uid": {
|
"uid": {
|
||||||
|
|
@ -243,12 +247,10 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||||
"cpu": {
|
"cpu": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0",
|
|
||||||
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||||
},
|
},
|
||||||
"memory": {
|
"memory": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Description: "Plain integer or fixed-point integer using one of these suffixes: E, P, T, G, M, k (with or without a tailing i). Must be greater than 0",
|
|
||||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -259,12 +261,10 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||||
"cpu": {
|
"cpu": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0",
|
|
||||||
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||||
},
|
},
|
||||||
"memory": {
|
"memory": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Description: "Plain integer or fixed-point integer using one of these suffixes: E, P, T, G, M, k (with or without a tailing i). Must be greater than 0",
|
|
||||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -284,7 +284,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||||
Schema: &apiextv1.JSONSchemaProps{
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Description: "User names specified here as database owners must be declared in the users key of the spec key",
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -311,7 +310,8 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
"init_containers": {
|
"init_containers": {
|
||||||
Type: "array",
|
Type: "array",
|
||||||
Description: "Deprecated",
|
Description: "deprecated",
|
||||||
|
Nullable: true,
|
||||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
Schema: &apiextv1.JSONSchemaProps{
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
Type: "object",
|
Type: "object",
|
||||||
|
|
@ -321,6 +321,7 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
"initContainers": {
|
"initContainers": {
|
||||||
Type: "array",
|
Type: "array",
|
||||||
|
Nullable: true,
|
||||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
Schema: &apiextv1.JSONSchemaProps{
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
Type: "object",
|
Type: "object",
|
||||||
|
|
@ -359,8 +360,22 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
Schema: &apiextv1.JSONSchemaProps{
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
Type: "object",
|
Type: "object",
|
||||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
Required: []string{"key", "operator"},
|
||||||
Allows: true,
|
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||||
|
"key": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"operator": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"values": {
|
||||||
|
Type: "array",
|
||||||
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -370,8 +385,22 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
Schema: &apiextv1.JSONSchemaProps{
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
Type: "object",
|
Type: "object",
|
||||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
Required: []string{"key", "operator"},
|
||||||
Allows: true,
|
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||||
|
"key": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"operator": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"values": {
|
||||||
|
Type: "array",
|
||||||
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -401,8 +430,22 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
Schema: &apiextv1.JSONSchemaProps{
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
Type: "object",
|
Type: "object",
|
||||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
Required: []string{"key", "operator"},
|
||||||
Allows: true,
|
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||||
|
"key": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"operator": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"values": {
|
||||||
|
Type: "array",
|
||||||
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -412,8 +455,22 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
Schema: &apiextv1.JSONSchemaProps{
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
Type: "object",
|
Type: "object",
|
||||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
Required: []string{"key", "operator"},
|
||||||
Allows: true,
|
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||||
|
"key": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"operator": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"values": {
|
||||||
|
Type: "array",
|
||||||
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -492,7 +549,7 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
"pod_priority_class_name": {
|
"pod_priority_class_name": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Description: "Deprecated",
|
Description: "deprecated",
|
||||||
},
|
},
|
||||||
"podPriorityClassName": {
|
"podPriorityClassName": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
|
|
@ -579,7 +636,7 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
"replicaLoadBalancer": {
|
"replicaLoadBalancer": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
Description: "Deprecated",
|
Description: "deprecated",
|
||||||
},
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
Type: "object",
|
Type: "object",
|
||||||
|
|
@ -591,12 +648,10 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||||
"cpu": {
|
"cpu": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0",
|
|
||||||
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||||
},
|
},
|
||||||
"memory": {
|
"memory": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Description: "Plain integer or fixed-point integer using one of these suffixes: E, P, T, G, M, k (with or without a tailing i). Must be greater than 0",
|
|
||||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -607,12 +662,10 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||||
"cpu": {
|
"cpu": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0",
|
|
||||||
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||||
},
|
},
|
||||||
"memory": {
|
"memory": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Description: "Plain integer or fixed-point integer using one of these suffixes: E, P, T, G, M, k (with or without a tailing i). Must be greater than 0",
|
|
||||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -632,6 +685,7 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
"sidecars": {
|
"sidecars": {
|
||||||
Type: "array",
|
Type: "array",
|
||||||
|
Nullable: true,
|
||||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
Schema: &apiextv1.JSONSchemaProps{
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
Type: "object",
|
Type: "object",
|
||||||
|
|
@ -778,14 +832,13 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
"useLoadBalancer": {
|
"useLoadBalancer": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
Description: "Deprecated",
|
Description: "deprecated",
|
||||||
},
|
},
|
||||||
"users": {
|
"users": {
|
||||||
Type: "object",
|
Type: "object",
|
||||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||||
Schema: &apiextv1.JSONSchemaProps{
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
Type: "array",
|
Type: "array",
|
||||||
Description: "Role flags specified here must not contradict each other",
|
|
||||||
Nullable: true,
|
Nullable: true,
|
||||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
Schema: &apiextv1.JSONSchemaProps{
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
|
|
@ -881,6 +934,24 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"usersWithSecretRotation": {
|
||||||
|
Type: "array",
|
||||||
|
Nullable: true,
|
||||||
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"usersWithInPlaceSecretRotation": {
|
||||||
|
Type: "array",
|
||||||
|
Nullable: true,
|
||||||
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"volume": {
|
"volume": {
|
||||||
Type: "object",
|
Type: "object",
|
||||||
Required: []string{"size"},
|
Required: []string{"size"},
|
||||||
|
|
@ -938,7 +1009,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
"size": {
|
"size": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Description: "Value must not be zero",
|
|
||||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||||
},
|
},
|
||||||
"storageClass": {
|
"storageClass": {
|
||||||
|
|
@ -991,6 +1061,15 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
"configuration": {
|
"configuration": {
|
||||||
Type: "object",
|
Type: "object",
|
||||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||||
|
"crd_categories": {
|
||||||
|
Type: "array",
|
||||||
|
Nullable: true,
|
||||||
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"docker_image": {
|
"docker_image": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
},
|
},
|
||||||
|
|
@ -999,6 +1078,7 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
"enable_crd_validation": {
|
"enable_crd_validation": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
|
Description: "deprecated",
|
||||||
},
|
},
|
||||||
"enable_lazy_spilo_upgrade": {
|
"enable_lazy_spilo_upgrade": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
|
|
@ -1044,6 +1124,7 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
"sidecars": {
|
"sidecars": {
|
||||||
Type: "array",
|
Type: "array",
|
||||||
|
Nullable: true,
|
||||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
Schema: &apiextv1.JSONSchemaProps{
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
Type: "object",
|
Type: "object",
|
||||||
|
|
@ -1155,6 +1236,7 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
"infrastructure_roles_secrets": {
|
"infrastructure_roles_secrets": {
|
||||||
Type: "array",
|
Type: "array",
|
||||||
|
Nullable: true,
|
||||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
Schema: &apiextv1.JSONSchemaProps{
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
Type: "object",
|
Type: "object",
|
||||||
|
|
@ -1215,6 +1297,17 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"node_readiness_label_merge": {
|
||||||
|
Type: "string",
|
||||||
|
Enum: []apiextv1.JSON{
|
||||||
|
{
|
||||||
|
Raw: []byte(`"AND"`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Raw: []byte(`"OR"`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"oauth_token_secret_name": {
|
"oauth_token_secret_name": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
},
|
},
|
||||||
|
|
@ -1645,18 +1738,27 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildCRD(name, kind, plural, short string, columns []apiextv1.CustomResourceColumnDefinition, validation apiextv1.CustomResourceValidation) *apiextv1.CustomResourceDefinition {
|
func buildCRD(name, kind, plural, list, short string,
|
||||||
|
categories []string,
|
||||||
|
columns []apiextv1.CustomResourceColumnDefinition,
|
||||||
|
validation apiextv1.CustomResourceValidation) *apiextv1.CustomResourceDefinition {
|
||||||
return &apiextv1.CustomResourceDefinition{
|
return &apiextv1.CustomResourceDefinition{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
APIVersion: fmt.Sprintf("%s/%s", apiextv1.GroupName, apiextv1.SchemeGroupVersion.Version),
|
||||||
|
Kind: "CustomResourceDefinition",
|
||||||
|
},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
},
|
},
|
||||||
Spec: apiextv1.CustomResourceDefinitionSpec{
|
Spec: apiextv1.CustomResourceDefinitionSpec{
|
||||||
Group: SchemeGroupVersion.Group,
|
Group: SchemeGroupVersion.Group,
|
||||||
Names: apiextv1.CustomResourceDefinitionNames{
|
Names: apiextv1.CustomResourceDefinitionNames{
|
||||||
Plural: plural,
|
|
||||||
ShortNames: []string{short},
|
|
||||||
Kind: kind,
|
Kind: kind,
|
||||||
Categories: []string{"all"},
|
ListKind: list,
|
||||||
|
Plural: plural,
|
||||||
|
Singular: kind,
|
||||||
|
ShortNames: []string{short},
|
||||||
|
Categories: categories,
|
||||||
},
|
},
|
||||||
Scope: apiextv1.NamespaceScoped,
|
Scope: apiextv1.NamespaceScoped,
|
||||||
Versions: []apiextv1.CustomResourceDefinitionVersion{
|
Versions: []apiextv1.CustomResourceDefinitionVersion{
|
||||||
|
|
@ -1676,33 +1778,25 @@ func buildCRD(name, kind, plural, short string, columns []apiextv1.CustomResourc
|
||||||
}
|
}
|
||||||
|
|
||||||
// PostgresCRD returns CustomResourceDefinition built from PostgresCRDResource
|
// PostgresCRD returns CustomResourceDefinition built from PostgresCRDResource
|
||||||
func PostgresCRD(enableValidation *bool) *apiextv1.CustomResourceDefinition {
|
func PostgresCRD(crdCategories []string) *apiextv1.CustomResourceDefinition {
|
||||||
postgresCRDvalidation := apiextv1.CustomResourceValidation{}
|
|
||||||
|
|
||||||
if enableValidation != nil && *enableValidation {
|
|
||||||
postgresCRDvalidation = PostgresCRDResourceValidation
|
|
||||||
}
|
|
||||||
|
|
||||||
return buildCRD(PostgresCRDResouceName,
|
return buildCRD(PostgresCRDResouceName,
|
||||||
PostgresCRDResourceKind,
|
PostgresCRDResourceKind,
|
||||||
PostgresCRDResourcePlural,
|
PostgresCRDResourcePlural,
|
||||||
|
PostgresCRDResourceList,
|
||||||
PostgresCRDResourceShort,
|
PostgresCRDResourceShort,
|
||||||
|
crdCategories,
|
||||||
PostgresCRDResourceColumns,
|
PostgresCRDResourceColumns,
|
||||||
postgresCRDvalidation)
|
PostgresCRDResourceValidation)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigurationCRD returns CustomResourceDefinition built from OperatorConfigCRDResource
|
// ConfigurationCRD returns CustomResourceDefinition built from OperatorConfigCRDResource
|
||||||
func ConfigurationCRD(enableValidation *bool) *apiextv1.CustomResourceDefinition {
|
func ConfigurationCRD(crdCategories []string) *apiextv1.CustomResourceDefinition {
|
||||||
opconfigCRDvalidation := apiextv1.CustomResourceValidation{}
|
|
||||||
|
|
||||||
if enableValidation != nil && *enableValidation {
|
|
||||||
opconfigCRDvalidation = OperatorConfigCRDResourceValidation
|
|
||||||
}
|
|
||||||
|
|
||||||
return buildCRD(OperatorConfigCRDResourceName,
|
return buildCRD(OperatorConfigCRDResourceName,
|
||||||
OperatorConfigCRDResouceKind,
|
OperatorConfigCRDResouceKind,
|
||||||
OperatorConfigCRDResourcePlural,
|
OperatorConfigCRDResourcePlural,
|
||||||
|
OperatorConfigCRDResourceList,
|
||||||
OperatorConfigCRDResourceShort,
|
OperatorConfigCRDResourceShort,
|
||||||
|
crdCategories,
|
||||||
OperatorConfigCRDResourceColumns,
|
OperatorConfigCRDResourceColumns,
|
||||||
opconfigCRDvalidation)
|
OperatorConfigCRDResourceValidation)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -39,6 +39,9 @@ type OperatorConfigurationList struct {
|
||||||
type PostgresUsersConfiguration struct {
|
type PostgresUsersConfiguration struct {
|
||||||
SuperUsername string `json:"super_username,omitempty"`
|
SuperUsername string `json:"super_username,omitempty"`
|
||||||
ReplicationUsername string `json:"replication_username,omitempty"`
|
ReplicationUsername string `json:"replication_username,omitempty"`
|
||||||
|
EnablePasswordRotation bool `json:"enable_password_rotation,omitempty"`
|
||||||
|
PasswordRotationInterval uint32 `json:"password_rotation_interval,omitempty"`
|
||||||
|
PasswordRotationUserRetention uint32 `json:"password_rotation_user_retention,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MajorVersionUpgradeConfiguration defines how to execute major version upgrades of Postgres.
|
// MajorVersionUpgradeConfiguration defines how to execute major version upgrades of Postgres.
|
||||||
|
|
@ -82,6 +85,7 @@ type KubernetesMetaConfiguration struct {
|
||||||
DeleteAnnotationDateKey string `json:"delete_annotation_date_key,omitempty"`
|
DeleteAnnotationDateKey string `json:"delete_annotation_date_key,omitempty"`
|
||||||
DeleteAnnotationNameKey string `json:"delete_annotation_name_key,omitempty"`
|
DeleteAnnotationNameKey string `json:"delete_annotation_name_key,omitempty"`
|
||||||
NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"`
|
NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"`
|
||||||
|
NodeReadinessLabelMerge string `json:"node_readiness_label_merge,omitempty"`
|
||||||
CustomPodAnnotations map[string]string `json:"custom_pod_annotations,omitempty"`
|
CustomPodAnnotations map[string]string `json:"custom_pod_annotations,omitempty"`
|
||||||
// TODO: use a proper toleration structure?
|
// TODO: use a proper toleration structure?
|
||||||
PodToleration map[string]string `json:"toleration,omitempty"`
|
PodToleration map[string]string `json:"toleration,omitempty"`
|
||||||
|
|
@ -217,6 +221,7 @@ type OperatorLogicalBackupConfiguration struct {
|
||||||
type OperatorConfigurationData struct {
|
type OperatorConfigurationData struct {
|
||||||
EnableCRDRegistration *bool `json:"enable_crd_registration,omitempty"`
|
EnableCRDRegistration *bool `json:"enable_crd_registration,omitempty"`
|
||||||
EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"`
|
EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"`
|
||||||
|
CRDCategories []string `json:"crd_categories,omitempty"`
|
||||||
EnableLazySpiloUpgrade bool `json:"enable_lazy_spilo_upgrade,omitempty"`
|
EnableLazySpiloUpgrade bool `json:"enable_lazy_spilo_upgrade,omitempty"`
|
||||||
EnablePgVersionEnvVar bool `json:"enable_pgversion_env_var,omitempty"`
|
EnablePgVersionEnvVar bool `json:"enable_pgversion_env_var,omitempty"`
|
||||||
EnableSpiloWalPathCompat bool `json:"enable_spilo_wal_path_compat,omitempty"`
|
EnableSpiloWalPathCompat bool `json:"enable_spilo_wal_path_compat,omitempty"`
|
||||||
|
|
|
||||||
|
|
@ -53,8 +53,11 @@ type PostgresSpec struct {
|
||||||
// load balancers' source ranges are the same for master and replica services
|
// load balancers' source ranges are the same for master and replica services
|
||||||
AllowedSourceRanges []string `json:"allowedSourceRanges"`
|
AllowedSourceRanges []string `json:"allowedSourceRanges"`
|
||||||
|
|
||||||
NumberOfInstances int32 `json:"numberOfInstances"`
|
|
||||||
Users map[string]UserFlags `json:"users,omitempty"`
|
Users map[string]UserFlags `json:"users,omitempty"`
|
||||||
|
UsersWithSecretRotation []string `json:"usersWithSecretRotation,omitempty"`
|
||||||
|
UsersWithInPlaceSecretRotation []string `json:"usersWithInPlaceSecretRotation,omitempty"`
|
||||||
|
|
||||||
|
NumberOfInstances int32 `json:"numberOfInstances"`
|
||||||
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
||||||
Clone *CloneDescription `json:"clone,omitempty"`
|
Clone *CloneDescription `json:"clone,omitempty"`
|
||||||
ClusterName string `json:"-"`
|
ClusterName string `json:"-"`
|
||||||
|
|
|
||||||
|
|
@ -377,6 +377,11 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData
|
||||||
*out = new(bool)
|
*out = new(bool)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.CRDCategories != nil {
|
||||||
|
in, out := &in.CRDCategories, &out.CRDCategories
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
if in.ShmVolume != nil {
|
if in.ShmVolume != nil {
|
||||||
in, out := &in.ShmVolume, &out.ShmVolume
|
in, out := &in.ShmVolume, &out.ShmVolume
|
||||||
*out = new(bool)
|
*out = new(bool)
|
||||||
|
|
@ -641,6 +646,16 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
||||||
(*out)[key] = outVal
|
(*out)[key] = outVal
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.UsersWithSecretRotation != nil {
|
||||||
|
in, out := &in.UsersWithSecretRotation, &out.UsersWithSecretRotation
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.UsersWithInPlaceSecretRotation != nil {
|
||||||
|
in, out := &in.UsersWithInPlaceSecretRotation, &out.UsersWithInPlaceSecretRotation
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
if in.MaintenanceWindows != nil {
|
if in.MaintenanceWindows != nil {
|
||||||
in, out := &in.MaintenanceWindows, &out.MaintenanceWindows
|
in, out := &in.MaintenanceWindows, &out.MaintenanceWindows
|
||||||
*out = make([]MaintenanceWindow, len(*in))
|
*out = make([]MaintenanceWindow, len(*in))
|
||||||
|
|
|
||||||
|
|
@ -381,7 +381,6 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
||||||
reasons = append(reasons, "new statefulset's number of replicas does not match the current one")
|
reasons = append(reasons, "new statefulset's number of replicas does not match the current one")
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(c.Statefulset.Annotations, statefulSet.Annotations) {
|
if !reflect.DeepEqual(c.Statefulset.Annotations, statefulSet.Annotations) {
|
||||||
match = false
|
|
||||||
needsReplace = true
|
needsReplace = true
|
||||||
reasons = append(reasons, "new statefulset's annotations do not match the current one")
|
reasons = append(reasons, "new statefulset's annotations do not match the current one")
|
||||||
}
|
}
|
||||||
|
|
@ -412,6 +411,11 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
||||||
needsRollUpdate = true
|
needsRollUpdate = true
|
||||||
reasons = append(reasons, "new statefulset's pod affinity does not match the current one")
|
reasons = append(reasons, "new statefulset's pod affinity does not match the current one")
|
||||||
}
|
}
|
||||||
|
if len(c.Statefulset.Spec.Template.Spec.Tolerations) != len(statefulSet.Spec.Template.Spec.Tolerations) {
|
||||||
|
needsReplace = true
|
||||||
|
needsRollUpdate = true
|
||||||
|
reasons = append(reasons, "new statefulset's pod tolerations does not match the current one")
|
||||||
|
}
|
||||||
|
|
||||||
// Some generated fields like creationTimestamp make it not possible to use DeepCompare on Spec.Template.ObjectMeta
|
// Some generated fields like creationTimestamp make it not possible to use DeepCompare on Spec.Template.ObjectMeta
|
||||||
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Labels, statefulSet.Spec.Template.Labels) {
|
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Labels, statefulSet.Spec.Template.Labels) {
|
||||||
|
|
@ -433,13 +437,11 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
||||||
}
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations) {
|
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations) {
|
||||||
match = false
|
|
||||||
needsReplace = true
|
needsReplace = true
|
||||||
needsRollUpdate = true
|
needsRollUpdate = true
|
||||||
reasons = append(reasons, "new statefulset's pod template metadata annotations does not match the current one")
|
reasons = append(reasons, "new statefulset's pod template metadata annotations does not match the current one")
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.SecurityContext, statefulSet.Spec.Template.Spec.SecurityContext) {
|
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.SecurityContext, statefulSet.Spec.Template.Spec.SecurityContext) {
|
||||||
match = false
|
|
||||||
needsReplace = true
|
needsReplace = true
|
||||||
needsRollUpdate = true
|
needsRollUpdate = true
|
||||||
reasons = append(reasons, "new statefulset's pod template security context in spec does not match the current one")
|
reasons = append(reasons, "new statefulset's pod template security context in spec does not match the current one")
|
||||||
|
|
@ -475,7 +477,6 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
||||||
// we assume any change in priority happens by rolling out a new priority class
|
// we assume any change in priority happens by rolling out a new priority class
|
||||||
// changing the priority value in an existing class is not supproted
|
// changing the priority value in an existing class is not supproted
|
||||||
if c.Statefulset.Spec.Template.Spec.PriorityClassName != statefulSet.Spec.Template.Spec.PriorityClassName {
|
if c.Statefulset.Spec.Template.Spec.PriorityClassName != statefulSet.Spec.Template.Spec.PriorityClassName {
|
||||||
match = false
|
|
||||||
needsReplace = true
|
needsReplace = true
|
||||||
needsRollUpdate = true
|
needsRollUpdate = true
|
||||||
reasons = append(reasons, "new statefulset's pod priority class in spec does not match the current one")
|
reasons = append(reasons, "new statefulset's pod priority class in spec does not match the current one")
|
||||||
|
|
@ -716,13 +717,18 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// connection pooler needs one system user created, which is done in
|
// check if users need to be synced
|
||||||
// initUsers. Check if it needs to be called.
|
|
||||||
sameUsers := reflect.DeepEqual(oldSpec.Spec.Users, newSpec.Spec.Users) &&
|
sameUsers := reflect.DeepEqual(oldSpec.Spec.Users, newSpec.Spec.Users) &&
|
||||||
reflect.DeepEqual(oldSpec.Spec.PreparedDatabases, newSpec.Spec.PreparedDatabases)
|
reflect.DeepEqual(oldSpec.Spec.PreparedDatabases, newSpec.Spec.PreparedDatabases)
|
||||||
|
sameRotatedUsers := reflect.DeepEqual(oldSpec.Spec.UsersWithSecretRotation, newSpec.Spec.UsersWithSecretRotation) &&
|
||||||
|
reflect.DeepEqual(oldSpec.Spec.UsersWithInPlaceSecretRotation, newSpec.Spec.UsersWithInPlaceSecretRotation)
|
||||||
|
|
||||||
|
// connection pooler needs one system user created, which is done in
|
||||||
|
// initUsers. Check if it needs to be called.
|
||||||
needConnectionPooler := needMasterConnectionPoolerWorker(&newSpec.Spec) ||
|
needConnectionPooler := needMasterConnectionPoolerWorker(&newSpec.Spec) ||
|
||||||
needReplicaConnectionPoolerWorker(&newSpec.Spec)
|
needReplicaConnectionPoolerWorker(&newSpec.Spec)
|
||||||
if !sameUsers || needConnectionPooler {
|
|
||||||
|
if !sameUsers || !sameRotatedUsers || needConnectionPooler {
|
||||||
c.logger.Debugf("initialize users")
|
c.logger.Debugf("initialize users")
|
||||||
if err := c.initUsers(); err != nil {
|
if err := c.initUsers(); err != nil {
|
||||||
c.logger.Errorf("could not init users: %v", err)
|
c.logger.Errorf("could not init users: %v", err)
|
||||||
|
|
@ -1017,6 +1023,7 @@ func (c *Cluster) initSystemUsers() {
|
||||||
Origin: spec.RoleOriginSystem,
|
Origin: spec.RoleOriginSystem,
|
||||||
Name: c.OpConfig.ReplicationUsername,
|
Name: c.OpConfig.ReplicationUsername,
|
||||||
Namespace: c.Namespace,
|
Namespace: c.Namespace,
|
||||||
|
Flags: []string{constants.RoleFlagLogin},
|
||||||
Password: util.RandomPassword(constants.PasswordLength),
|
Password: util.RandomPassword(constants.PasswordLength),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1146,7 +1153,6 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
|
||||||
func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix, searchPath, secretNamespace string) error {
|
func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix, searchPath, secretNamespace string) error {
|
||||||
|
|
||||||
for defaultRole, inherits := range defaultRoles {
|
for defaultRole, inherits := range defaultRoles {
|
||||||
|
|
||||||
namespace := c.Namespace
|
namespace := c.Namespace
|
||||||
//if namespaced secrets are allowed
|
//if namespaced secrets are allowed
|
||||||
if secretNamespace != "" {
|
if secretNamespace != "" {
|
||||||
|
|
@ -1169,8 +1175,10 @@ func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix
|
||||||
}
|
}
|
||||||
|
|
||||||
adminRole := ""
|
adminRole := ""
|
||||||
|
isOwner := false
|
||||||
if strings.Contains(defaultRole, constants.OwnerRoleNameSuffix) {
|
if strings.Contains(defaultRole, constants.OwnerRoleNameSuffix) {
|
||||||
adminRole = admin
|
adminRole = admin
|
||||||
|
isOwner = true
|
||||||
} else {
|
} else {
|
||||||
adminRole = prefix + constants.OwnerRoleNameSuffix
|
adminRole = prefix + constants.OwnerRoleNameSuffix
|
||||||
}
|
}
|
||||||
|
|
@ -1184,6 +1192,7 @@ func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix
|
||||||
MemberOf: memberOf,
|
MemberOf: memberOf,
|
||||||
Parameters: map[string]string{"search_path": searchPath},
|
Parameters: map[string]string{"search_path": searchPath},
|
||||||
AdminRole: adminRole,
|
AdminRole: adminRole,
|
||||||
|
IsDbOwner: isOwner,
|
||||||
}
|
}
|
||||||
if currentRole, present := c.pgUsers[roleName]; present {
|
if currentRole, present := c.pgUsers[roleName]; present {
|
||||||
c.pgUsers[roleName] = c.resolveNameConflict(¤tRole, &newRole)
|
c.pgUsers[roleName] = c.resolveNameConflict(¤tRole, &newRole)
|
||||||
|
|
@ -1205,6 +1214,14 @@ func (c *Cluster) initRobotUsers() error {
|
||||||
}
|
}
|
||||||
namespace := c.Namespace
|
namespace := c.Namespace
|
||||||
|
|
||||||
|
// check if role is specified as database owner
|
||||||
|
isOwner := false
|
||||||
|
for _, owner := range c.Spec.Databases {
|
||||||
|
if username == owner {
|
||||||
|
isOwner = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//if namespaced secrets are allowed
|
//if namespaced secrets are allowed
|
||||||
if c.Config.OpConfig.EnableCrossNamespaceSecret {
|
if c.Config.OpConfig.EnableCrossNamespaceSecret {
|
||||||
if strings.Contains(username, ".") {
|
if strings.Contains(username, ".") {
|
||||||
|
|
@ -1229,6 +1246,7 @@ func (c *Cluster) initRobotUsers() error {
|
||||||
Password: util.RandomPassword(constants.PasswordLength),
|
Password: util.RandomPassword(constants.PasswordLength),
|
||||||
Flags: flags,
|
Flags: flags,
|
||||||
AdminRole: adminRole,
|
AdminRole: adminRole,
|
||||||
|
IsDbOwner: isOwner,
|
||||||
}
|
}
|
||||||
if currentRole, present := c.pgUsers[username]; present {
|
if currentRole, present := c.pgUsers[username]; present {
|
||||||
c.pgUsers[username] = c.resolveNameConflict(¤tRole, &newRole)
|
c.pgUsers[username] = c.resolveNameConflict(¤tRole, &newRole)
|
||||||
|
|
|
||||||
|
|
@ -309,7 +309,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeAffinity := nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity)
|
nodeAffinity := c.nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity)
|
||||||
if c.OpConfig.EnablePodAntiAffinity {
|
if c.OpConfig.EnablePodAntiAffinity {
|
||||||
labelsSet := labels.Set(c.connectionPoolerLabels(role, false).MatchLabels)
|
labelsSet := labels.Set(c.connectionPoolerLabels(role, false).MatchLabels)
|
||||||
podTemplate.Spec.Affinity = generatePodAffinity(labelsSet, c.OpConfig.PodAntiAffinityTopologyKey, nodeAffinity)
|
podTemplate.Spec.Affinity = generatePodAffinity(labelsSet, c.OpConfig.PodAntiAffinityTopologyKey, nodeAffinity)
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@ import (
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/retryutil"
|
"github.com/zalando/postgres-operator/pkg/util/retryutil"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/util/users"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -27,6 +28,12 @@ const (
|
||||||
WHERE a.rolname = ANY($1)
|
WHERE a.rolname = ANY($1)
|
||||||
ORDER BY 1;`
|
ORDER BY 1;`
|
||||||
|
|
||||||
|
getUsersForRetention = `SELECT r.rolname, right(r.rolname, 6) AS roldatesuffix
|
||||||
|
FROM pg_roles r
|
||||||
|
JOIN unnest($1::text[]) AS u(name) ON r.rolname LIKE u.name || '%'
|
||||||
|
AND right(r.rolname, 6) ~ '^[0-9\.]+$'
|
||||||
|
ORDER BY 1;`
|
||||||
|
|
||||||
getDatabasesSQL = `SELECT datname, pg_get_userbyid(datdba) AS owner FROM pg_database;`
|
getDatabasesSQL = `SELECT datname, pg_get_userbyid(datdba) AS owner FROM pg_database;`
|
||||||
getSchemasSQL = `SELECT n.nspname AS dbschema FROM pg_catalog.pg_namespace n
|
getSchemasSQL = `SELECT n.nspname AS dbschema FROM pg_catalog.pg_namespace n
|
||||||
WHERE n.nspname !~ '^pg_' AND n.nspname <> 'information_schema' ORDER BY 1`
|
WHERE n.nspname !~ '^pg_' AND n.nspname <> 'information_schema' ORDER BY 1`
|
||||||
|
|
@ -234,6 +241,65 @@ func (c *Cluster) readPgUsersFromDatabase(userNames []string) (users spec.PgUser
|
||||||
return users, nil
|
return users, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func findUsersFromRotation(rotatedUsers []string, db *sql.DB) (map[string]string, error) {
|
||||||
|
extraUsers := make(map[string]string, 0)
|
||||||
|
rows, err := db.Query(getUsersForRetention, pq.Array(rotatedUsers))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("query failed: %v", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err2 := rows.Close(); err2 != nil {
|
||||||
|
err = fmt.Errorf("error when closing query cursor: %v", err2)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var (
|
||||||
|
rolname, roldatesuffix string
|
||||||
|
)
|
||||||
|
err := rows.Scan(&rolname, &roldatesuffix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error when processing rows of deprecated users: %v", err)
|
||||||
|
}
|
||||||
|
extraUsers[rolname] = roldatesuffix
|
||||||
|
}
|
||||||
|
|
||||||
|
return extraUsers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) cleanupRotatedUsers(rotatedUsers []string, db *sql.DB) error {
|
||||||
|
c.setProcessName("checking for rotated users to remove from the database due to configured retention")
|
||||||
|
extraUsers, err := findUsersFromRotation(rotatedUsers, db)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error when querying for deprecated users from password rotation: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure user retention policy aligns with rotation interval
|
||||||
|
retenionDays := c.OpConfig.PasswordRotationUserRetention
|
||||||
|
if retenionDays < 2*c.OpConfig.PasswordRotationInterval {
|
||||||
|
retenionDays = 2 * c.OpConfig.PasswordRotationInterval
|
||||||
|
c.logger.Warnf("user retention days too few compared to rotation interval %d - setting it to %d", c.OpConfig.PasswordRotationInterval, retenionDays)
|
||||||
|
}
|
||||||
|
retentionDate := time.Now().AddDate(0, 0, int(retenionDays)*-1)
|
||||||
|
|
||||||
|
for rotatedUser, dateSuffix := range extraUsers {
|
||||||
|
userCreationDate, err := time.Parse("060102", dateSuffix)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Errorf("could not parse creation date suffix of user %q: %v", rotatedUser, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if retentionDate.After(userCreationDate) {
|
||||||
|
c.logger.Infof("dropping user %q due to configured days in password_rotation_user_retention", rotatedUser)
|
||||||
|
if err = users.DropPgUser(rotatedUser, db); err != nil {
|
||||||
|
c.logger.Errorf("could not drop role %q: %v", rotatedUser, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// getDatabases returns the map of current databases with owners
|
// getDatabases returns the map of current databases with owners
|
||||||
// The caller is responsible for opening and closing the database connection
|
// The caller is responsible for opening and closing the database connection
|
||||||
func (c *Cluster) getDatabases() (dbs map[string]string, err error) {
|
func (c *Cluster) getDatabases() (dbs map[string]string, err error) {
|
||||||
|
|
|
||||||
|
|
@ -327,7 +327,7 @@ func generateCapabilities(capabilities []string) *v1.Capabilities {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAffinity) *v1.Affinity {
|
func (c *Cluster) nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAffinity) *v1.Affinity {
|
||||||
if len(nodeReadinessLabel) == 0 && nodeAffinity == nil {
|
if len(nodeReadinessLabel) == 0 && nodeAffinity == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -352,8 +352,18 @@ func nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAff
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if c.OpConfig.NodeReadinessLabelMerge == "OR" {
|
||||||
|
manifestTerms := nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||||
|
manifestTerms = append(manifestTerms, nodeReadinessSelectorTerm)
|
||||||
nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
|
nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
|
||||||
NodeSelectorTerms: append(nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, nodeReadinessSelectorTerm),
|
NodeSelectorTerms: manifestTerms,
|
||||||
|
}
|
||||||
|
} else if c.OpConfig.NodeReadinessLabelMerge == "AND" {
|
||||||
|
for i, nodeSelectorTerm := range nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
|
||||||
|
manifestExpressions := nodeSelectorTerm.MatchExpressions
|
||||||
|
manifestExpressions = append(manifestExpressions, matchExpressions...)
|
||||||
|
nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[i] = v1.NodeSelectorTerm{MatchExpressions: manifestExpressions}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1260,7 +1270,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
effectiveRunAsUser,
|
effectiveRunAsUser,
|
||||||
effectiveRunAsGroup,
|
effectiveRunAsGroup,
|
||||||
effectiveFSGroup,
|
effectiveFSGroup,
|
||||||
nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity),
|
c.nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity),
|
||||||
spec.SchedulerName,
|
spec.SchedulerName,
|
||||||
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
||||||
c.OpConfig.PodServiceAccountName,
|
c.OpConfig.PodServiceAccountName,
|
||||||
|
|
@ -2010,7 +2020,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
nodeAffinity(c.OpConfig.NodeReadinessLabel, nil),
|
c.nodeAffinity(c.OpConfig.NodeReadinessLabel, nil),
|
||||||
nil,
|
nil,
|
||||||
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
||||||
c.OpConfig.PodServiceAccountName,
|
c.OpConfig.PodServiceAccountName,
|
||||||
|
|
|
||||||
|
|
@ -620,58 +620,178 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv
|
||||||
return requiresMasterRestart, nil
|
return requiresMasterRestart, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) getNextRotationDate(currentDate time.Time) (time.Time, string) {
|
||||||
|
nextRotationDate := currentDate.AddDate(0, 0, int(c.OpConfig.PasswordRotationInterval))
|
||||||
|
return nextRotationDate, nextRotationDate.Format("2006-01-02 15:04:05")
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cluster) syncSecrets() error {
|
func (c *Cluster) syncSecrets() error {
|
||||||
var (
|
|
||||||
err error
|
|
||||||
secret *v1.Secret
|
|
||||||
)
|
|
||||||
c.logger.Info("syncing secrets")
|
c.logger.Info("syncing secrets")
|
||||||
c.setProcessName("syncing secrets")
|
c.setProcessName("syncing secrets")
|
||||||
secrets := c.generateUserSecrets()
|
generatedSecrets := c.generateUserSecrets()
|
||||||
|
rotationUsers := make(spec.PgUserMap)
|
||||||
|
retentionUsers := make([]string, 0)
|
||||||
|
currentTime := time.Now()
|
||||||
|
|
||||||
for secretUsername, secretSpec := range secrets {
|
for secretUsername, generatedSecret := range generatedSecrets {
|
||||||
if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Create(context.TODO(), secretSpec, metav1.CreateOptions{}); err == nil {
|
secret, err := c.KubeClient.Secrets(generatedSecret.Namespace).Create(context.TODO(), generatedSecret, metav1.CreateOptions{})
|
||||||
|
if err == nil {
|
||||||
c.Secrets[secret.UID] = secret
|
c.Secrets[secret.UID] = secret
|
||||||
c.logger.Debugf("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), secretSpec.Namespace, secret.UID)
|
c.logger.Debugf("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), generatedSecret.Namespace, secret.UID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if k8sutil.ResourceAlreadyExists(err) {
|
if k8sutil.ResourceAlreadyExists(err) {
|
||||||
var userMap map[string]spec.PgUser
|
if err = c.updateSecret(secretUsername, generatedSecret, &rotationUsers, &retentionUsers, currentTime); err != nil {
|
||||||
if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Get(context.TODO(), secretSpec.Name, metav1.GetOptions{}); err != nil {
|
c.logger.Warningf("syncing secret %s failed: %v", util.NameFromMeta(secret.ObjectMeta), err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("could not create secret for user %s: in namespace %s: %v", secretUsername, generatedSecret.Namespace, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// add new user with date suffix and use it in the secret of the original user
|
||||||
|
if len(rotationUsers) > 0 {
|
||||||
|
err := c.initDbConn()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not init db connection: %v", err)
|
||||||
|
}
|
||||||
|
pgSyncRequests := c.userSyncStrategy.ProduceSyncRequests(spec.PgUserMap{}, rotationUsers)
|
||||||
|
if err = c.userSyncStrategy.ExecuteSyncRequests(pgSyncRequests, c.pgDb); err != nil {
|
||||||
|
return fmt.Errorf("error creating database roles for password rotation: %v", err)
|
||||||
|
}
|
||||||
|
if err := c.closeDbConn(); err != nil {
|
||||||
|
c.logger.Errorf("could not close database connection after creating users for password rotation: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove rotation users that exceed the retention interval
|
||||||
|
if len(retentionUsers) > 0 {
|
||||||
|
err := c.initDbConn()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not init db connection: %v", err)
|
||||||
|
}
|
||||||
|
if err = c.cleanupRotatedUsers(retentionUsers, c.pgDb); err != nil {
|
||||||
|
return fmt.Errorf("error removing users exceeding configured retention interval: %v", err)
|
||||||
|
}
|
||||||
|
if err := c.closeDbConn(); err != nil {
|
||||||
|
c.logger.Errorf("could not close database connection after removing users exceeding configured retention interval: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) updateSecret(
|
||||||
|
secretUsername string,
|
||||||
|
generatedSecret *v1.Secret,
|
||||||
|
rotationUsers *spec.PgUserMap,
|
||||||
|
retentionUsers *[]string,
|
||||||
|
currentTime time.Time) error {
|
||||||
|
var (
|
||||||
|
secret *v1.Secret
|
||||||
|
err error
|
||||||
|
updateSecret bool
|
||||||
|
updateSecretMsg string
|
||||||
|
nextRotationDate time.Time
|
||||||
|
nextRotationDateStr string
|
||||||
|
)
|
||||||
|
|
||||||
|
// get the secret first
|
||||||
|
if secret, err = c.KubeClient.Secrets(generatedSecret.Namespace).Get(context.TODO(), generatedSecret.Name, metav1.GetOptions{}); err != nil {
|
||||||
return fmt.Errorf("could not get current secret: %v", err)
|
return fmt.Errorf("could not get current secret: %v", err)
|
||||||
}
|
}
|
||||||
if secretUsername != string(secret.Data["username"]) {
|
|
||||||
c.logger.Errorf("secret %s does not contain the role %s", secretSpec.Name, secretUsername)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
c.Secrets[secret.UID] = secret
|
c.Secrets[secret.UID] = secret
|
||||||
c.logger.Debugf("secret %s already exists, fetching its password", util.NameFromMeta(secret.ObjectMeta))
|
|
||||||
|
// fetch user map to update later
|
||||||
|
var userMap map[string]spec.PgUser
|
||||||
|
var userKey string
|
||||||
if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name {
|
if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name {
|
||||||
secretUsername = constants.SuperuserKeyName
|
userKey = constants.SuperuserKeyName
|
||||||
userMap = c.systemUsers
|
userMap = c.systemUsers
|
||||||
} else if secretUsername == c.systemUsers[constants.ReplicationUserKeyName].Name {
|
} else if secretUsername == c.systemUsers[constants.ReplicationUserKeyName].Name {
|
||||||
secretUsername = constants.ReplicationUserKeyName
|
userKey = constants.ReplicationUserKeyName
|
||||||
userMap = c.systemUsers
|
userMap = c.systemUsers
|
||||||
} else {
|
} else {
|
||||||
|
userKey = secretUsername
|
||||||
userMap = c.pgUsers
|
userMap = c.pgUsers
|
||||||
}
|
}
|
||||||
pwdUser := userMap[secretUsername]
|
pwdUser := userMap[userKey]
|
||||||
// if this secret belongs to the infrastructure role and the password has changed - replace it in the secret
|
secretName := util.NameFromMeta(secret.ObjectMeta)
|
||||||
if pwdUser.Password != string(secret.Data["password"]) &&
|
|
||||||
pwdUser.Origin == spec.RoleOriginInfrastructure {
|
|
||||||
|
|
||||||
c.logger.Debugf("updating the secret %s from the infrastructure roles", secretSpec.Name)
|
// if password rotation is enabled update password and username if rotation interval has been passed
|
||||||
if _, err = c.KubeClient.Secrets(secretSpec.Namespace).Update(context.TODO(), secretSpec, metav1.UpdateOptions{}); err != nil {
|
if (c.OpConfig.EnablePasswordRotation && !pwdUser.IsDbOwner &&
|
||||||
return fmt.Errorf("could not update infrastructure role secret for role %q: %v", secretUsername, err)
|
pwdUser.Origin != spec.RoleOriginInfrastructure && pwdUser.Origin != spec.RoleOriginSystem) ||
|
||||||
|
util.SliceContains(c.Spec.UsersWithSecretRotation, secretUsername) ||
|
||||||
|
util.SliceContains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername) {
|
||||||
|
|
||||||
|
// initialize password rotation setting first rotation date
|
||||||
|
nextRotationDateStr = string(secret.Data["nextRotation"])
|
||||||
|
if nextRotationDate, err = time.ParseInLocation("2006-01-02 15:04:05", nextRotationDateStr, time.Local); err != nil {
|
||||||
|
nextRotationDate, nextRotationDateStr = c.getNextRotationDate(currentTime)
|
||||||
|
secret.Data["nextRotation"] = []byte(nextRotationDateStr)
|
||||||
|
updateSecret = true
|
||||||
|
updateSecretMsg = fmt.Sprintf("rotation date not found in secret %q. Setting it to %s", secretName, nextRotationDateStr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check if next rotation can happen sooner
|
||||||
|
// if rotation interval has been decreased
|
||||||
|
currentRotationDate, _ := c.getNextRotationDate(currentTime)
|
||||||
|
if nextRotationDate.After(currentRotationDate) {
|
||||||
|
nextRotationDate = currentRotationDate
|
||||||
|
}
|
||||||
|
|
||||||
|
// update password and next rotation date if configured interval has passed
|
||||||
|
if currentTime.After(nextRotationDate) {
|
||||||
|
// create rotation user if role is not listed for in-place password update
|
||||||
|
if !util.SliceContains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername) {
|
||||||
|
rotationUser := pwdUser
|
||||||
|
newRotationUsername := secretUsername + currentTime.Format("060102")
|
||||||
|
rotationUser.Name = newRotationUsername
|
||||||
|
rotationUser.MemberOf = []string{secretUsername}
|
||||||
|
(*rotationUsers)[newRotationUsername] = rotationUser
|
||||||
|
secret.Data["username"] = []byte(newRotationUsername)
|
||||||
|
|
||||||
|
// whenever there is a rotation, check if old rotation users can be deleted
|
||||||
|
*retentionUsers = append(*retentionUsers, secretUsername)
|
||||||
|
}
|
||||||
|
secret.Data["password"] = []byte(util.RandomPassword(constants.PasswordLength))
|
||||||
|
|
||||||
|
_, nextRotationDateStr = c.getNextRotationDate(nextRotationDate)
|
||||||
|
secret.Data["nextRotation"] = []byte(nextRotationDateStr)
|
||||||
|
|
||||||
|
updateSecret = true
|
||||||
|
updateSecretMsg = fmt.Sprintf("updating secret %q due to password rotation - next rotation date: %s", secretName, nextRotationDateStr)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// username might not match if password rotation has been disabled again
|
||||||
|
if secretUsername != string(secret.Data["username"]) {
|
||||||
|
*retentionUsers = append(*retentionUsers, secretUsername)
|
||||||
|
secret.Data["username"] = []byte(secretUsername)
|
||||||
|
secret.Data["password"] = []byte(util.RandomPassword(constants.PasswordLength))
|
||||||
|
secret.Data["nextRotation"] = []byte{}
|
||||||
|
updateSecret = true
|
||||||
|
updateSecretMsg = fmt.Sprintf("secret %s does not contain the role %s - updating username and resetting password", secretName, secretUsername)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if this secret belongs to the infrastructure role and the password has changed - replace it in the secret
|
||||||
|
if pwdUser.Password != string(secret.Data["password"]) && pwdUser.Origin == spec.RoleOriginInfrastructure {
|
||||||
|
secret = generatedSecret
|
||||||
|
updateSecret = true
|
||||||
|
updateSecretMsg = fmt.Sprintf("updating the secret %s from the infrastructure roles", secretName)
|
||||||
} else {
|
} else {
|
||||||
// for non-infrastructure role - update the role with the password from the secret
|
// for non-infrastructure role - update the role with the password from the secret
|
||||||
pwdUser.Password = string(secret.Data["password"])
|
pwdUser.Password = string(secret.Data["password"])
|
||||||
userMap[secretUsername] = pwdUser
|
userMap[userKey] = pwdUser
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
return fmt.Errorf("could not create secret for user %s: in namespace %s: %v", secretUsername, secretSpec.Namespace, err)
|
if updateSecret {
|
||||||
|
c.logger.Debugln(updateSecretMsg)
|
||||||
|
if _, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil {
|
||||||
|
return fmt.Errorf("could not update secret %q: %v", secretName, err)
|
||||||
}
|
}
|
||||||
|
c.Secrets[secret.UID] = secret
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"github.com/zalando/postgres-operator/mocks"
|
"github.com/zalando/postgres-operator/mocks"
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
|
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/patroni"
|
"github.com/zalando/postgres-operator/pkg/util/patroni"
|
||||||
|
|
@ -26,6 +27,8 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var patroniLogger = logrus.New().WithField("test", "patroni")
|
var patroniLogger = logrus.New().WithField("test", "patroni")
|
||||||
|
var acidClientSet = fakeacidv1.NewSimpleClientset()
|
||||||
|
var clientSet = fake.NewSimpleClientset()
|
||||||
|
|
||||||
func newMockPod(ip string) *v1.Pod {
|
func newMockPod(ip string) *v1.Pod {
|
||||||
return &v1.Pod{
|
return &v1.Pod{
|
||||||
|
|
@ -36,9 +39,6 @@ func newMockPod(ip string) *v1.Pod {
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFakeK8sSyncClient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
func newFakeK8sSyncClient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
||||||
acidClientSet := fakeacidv1.NewSimpleClientset()
|
|
||||||
clientSet := fake.NewSimpleClientset()
|
|
||||||
|
|
||||||
return k8sutil.KubernetesClient{
|
return k8sutil.KubernetesClient{
|
||||||
PodsGetter: clientSet.CoreV1(),
|
PodsGetter: clientSet.CoreV1(),
|
||||||
PostgresqlsGetter: acidClientSet.AcidV1(),
|
PostgresqlsGetter: acidClientSet.AcidV1(),
|
||||||
|
|
@ -46,6 +46,12 @@ func newFakeK8sSyncClient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
||||||
}, clientSet
|
}, clientSet
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newFakeK8sSyncSecretsClient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
||||||
|
return k8sutil.KubernetesClient{
|
||||||
|
SecretsGetter: clientSet.CoreV1(),
|
||||||
|
}, clientSet
|
||||||
|
}
|
||||||
|
|
||||||
func TestSyncStatefulSetsAnnotations(t *testing.T) {
|
func TestSyncStatefulSetsAnnotations(t *testing.T) {
|
||||||
testName := "test syncing statefulsets annotations"
|
testName := "test syncing statefulsets annotations"
|
||||||
client, _ := newFakeK8sSyncClient()
|
client, _ := newFakeK8sSyncClient()
|
||||||
|
|
@ -257,3 +263,72 @@ func TestCheckAndSetGlobalPostgreSQLConfiguration(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUpdateSecret(t *testing.T) {
|
||||||
|
testName := "test syncing secrets"
|
||||||
|
client, _ := newFakeK8sSyncSecretsClient()
|
||||||
|
|
||||||
|
clusterName := "acid-test-cluster"
|
||||||
|
namespace := "default"
|
||||||
|
username := "foo"
|
||||||
|
secretTemplate := config.StringTemplate("{username}.{cluster}.credentials")
|
||||||
|
rotationUsers := make(spec.PgUserMap)
|
||||||
|
retentionUsers := make([]string, 0)
|
||||||
|
yesterday := time.Now().AddDate(0, 0, -1)
|
||||||
|
|
||||||
|
// new cluster with pvc storage resize mode and configured labels
|
||||||
|
var cluster = New(
|
||||||
|
Config{
|
||||||
|
OpConfig: config.Config{
|
||||||
|
Auth: config.Auth{
|
||||||
|
SecretNameTemplate: secretTemplate,
|
||||||
|
EnablePasswordRotation: true,
|
||||||
|
PasswordRotationInterval: 1,
|
||||||
|
PasswordRotationUserRetention: 3,
|
||||||
|
},
|
||||||
|
Resources: config.Resources{
|
||||||
|
ClusterLabels: map[string]string{"application": "spilo"},
|
||||||
|
ClusterNameLabel: "cluster-name",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, client, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
|
cluster.Name = clusterName
|
||||||
|
cluster.Namespace = namespace
|
||||||
|
cluster.pgUsers = map[string]spec.PgUser{}
|
||||||
|
cluster.Spec.Users = map[string]acidv1.UserFlags{username: {}}
|
||||||
|
cluster.initRobotUsers()
|
||||||
|
|
||||||
|
// create a secret for user foo
|
||||||
|
cluster.syncSecrets()
|
||||||
|
|
||||||
|
secret, err := cluster.KubeClient.Secrets(namespace).Get(context.TODO(), secretTemplate.Format("username", username, "cluster", clusterName), metav1.GetOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
generatedSecret := cluster.Secrets[secret.UID]
|
||||||
|
|
||||||
|
// now update the secret setting next rotation date (yesterday + interval)
|
||||||
|
cluster.updateSecret(username, generatedSecret, &rotationUsers, &retentionUsers, yesterday)
|
||||||
|
updatedSecret, err := cluster.KubeClient.Secrets(namespace).Get(context.TODO(), secretTemplate.Format("username", username, "cluster", clusterName), metav1.GetOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
nextRotation := string(updatedSecret.Data["nextRotation"])
|
||||||
|
_, nextRotationDate := cluster.getNextRotationDate(yesterday)
|
||||||
|
if nextRotation != nextRotationDate {
|
||||||
|
t.Errorf("%s: updated secret does not contain correct rotation date: expected %s, got %s", testName, nextRotationDate, nextRotation)
|
||||||
|
}
|
||||||
|
|
||||||
|
// update secret again but use current time to trigger rotation
|
||||||
|
cluster.updateSecret(username, generatedSecret, &rotationUsers, &retentionUsers, time.Now())
|
||||||
|
updatedSecret, err = cluster.KubeClient.Secrets(namespace).Get(context.TODO(), secretTemplate.Format("username", username, "cluster", clusterName), metav1.GetOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
if len(rotationUsers) != 1 && len(retentionUsers) != 1 {
|
||||||
|
t.Errorf("%s: unexpected number of users to rotate - expected only foo, found %d", testName, len(rotationUsers))
|
||||||
|
}
|
||||||
|
|
||||||
|
secretUsername := string(updatedSecret.Data["username"])
|
||||||
|
rotatedUsername := username + time.Now().Format("060102")
|
||||||
|
if secretUsername != rotatedUsername {
|
||||||
|
t.Errorf("%s: updated secret does not contain correct username: expected %s, got %s", testName, rotatedUsername, secretUsername)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -310,7 +310,7 @@ func (c *Controller) initController() {
|
||||||
|
|
||||||
if configObjectName := os.Getenv("POSTGRES_OPERATOR_CONFIGURATION_OBJECT"); configObjectName != "" {
|
if configObjectName := os.Getenv("POSTGRES_OPERATOR_CONFIGURATION_OBJECT"); configObjectName != "" {
|
||||||
if c.opConfig.EnableCRDRegistration != nil && *c.opConfig.EnableCRDRegistration {
|
if c.opConfig.EnableCRDRegistration != nil && *c.opConfig.EnableCRDRegistration {
|
||||||
if err := c.createConfigurationCRD(c.opConfig.EnableCRDValidation); err != nil {
|
if err := c.createConfigurationCRD(); err != nil {
|
||||||
c.logger.Fatalf("could not register Operator Configuration CustomResourceDefinition: %v", err)
|
c.logger.Fatalf("could not register Operator Configuration CustomResourceDefinition: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -328,7 +328,7 @@ func (c *Controller) initController() {
|
||||||
c.modifyConfigFromEnvironment()
|
c.modifyConfigFromEnvironment()
|
||||||
|
|
||||||
if c.opConfig.EnableCRDRegistration != nil && *c.opConfig.EnableCRDRegistration {
|
if c.opConfig.EnableCRDRegistration != nil && *c.opConfig.EnableCRDRegistration {
|
||||||
if err := c.createPostgresCRD(c.opConfig.EnableCRDValidation); err != nil {
|
if err := c.createPostgresCRD(); err != nil {
|
||||||
c.logger.Fatalf("could not register Postgres CustomResourceDefinition: %v", err)
|
c.logger.Fatalf("could not register Postgres CustomResourceDefinition: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -35,6 +35,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
// general config
|
// general config
|
||||||
result.EnableCRDRegistration = util.CoalesceBool(fromCRD.EnableCRDRegistration, util.True())
|
result.EnableCRDRegistration = util.CoalesceBool(fromCRD.EnableCRDRegistration, util.True())
|
||||||
result.EnableCRDValidation = util.CoalesceBool(fromCRD.EnableCRDValidation, util.True())
|
result.EnableCRDValidation = util.CoalesceBool(fromCRD.EnableCRDValidation, util.True())
|
||||||
|
result.CRDCategories = util.CoalesceStrArr(fromCRD.CRDCategories, []string{"all"})
|
||||||
result.EnableLazySpiloUpgrade = fromCRD.EnableLazySpiloUpgrade
|
result.EnableLazySpiloUpgrade = fromCRD.EnableLazySpiloUpgrade
|
||||||
result.EnablePgVersionEnvVar = fromCRD.EnablePgVersionEnvVar
|
result.EnablePgVersionEnvVar = fromCRD.EnablePgVersionEnvVar
|
||||||
result.EnableSpiloWalPathCompat = fromCRD.EnableSpiloWalPathCompat
|
result.EnableSpiloWalPathCompat = fromCRD.EnableSpiloWalPathCompat
|
||||||
|
|
@ -54,6 +55,9 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
// user config
|
// user config
|
||||||
result.SuperUsername = util.Coalesce(fromCRD.PostgresUsersConfiguration.SuperUsername, "postgres")
|
result.SuperUsername = util.Coalesce(fromCRD.PostgresUsersConfiguration.SuperUsername, "postgres")
|
||||||
result.ReplicationUsername = util.Coalesce(fromCRD.PostgresUsersConfiguration.ReplicationUsername, "standby")
|
result.ReplicationUsername = util.Coalesce(fromCRD.PostgresUsersConfiguration.ReplicationUsername, "standby")
|
||||||
|
result.EnablePasswordRotation = fromCRD.PostgresUsersConfiguration.EnablePasswordRotation
|
||||||
|
result.PasswordRotationInterval = util.CoalesceUInt32(fromCRD.PostgresUsersConfiguration.PasswordRotationInterval, 90)
|
||||||
|
result.PasswordRotationUserRetention = util.CoalesceUInt32(fromCRD.PostgresUsersConfiguration.DeepCopy().PasswordRotationUserRetention, 180)
|
||||||
|
|
||||||
// major version upgrade config
|
// major version upgrade config
|
||||||
result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "off")
|
result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "off")
|
||||||
|
|
@ -110,6 +114,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.DeleteAnnotationDateKey = fromCRD.Kubernetes.DeleteAnnotationDateKey
|
result.DeleteAnnotationDateKey = fromCRD.Kubernetes.DeleteAnnotationDateKey
|
||||||
result.DeleteAnnotationNameKey = fromCRD.Kubernetes.DeleteAnnotationNameKey
|
result.DeleteAnnotationNameKey = fromCRD.Kubernetes.DeleteAnnotationNameKey
|
||||||
result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel
|
result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel
|
||||||
|
result.NodeReadinessLabelMerge = fromCRD.Kubernetes.NodeReadinessLabelMerge
|
||||||
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName
|
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName
|
||||||
result.PodManagementPolicy = util.Coalesce(fromCRD.Kubernetes.PodManagementPolicy, "ordered_ready")
|
result.PodManagementPolicy = util.Coalesce(fromCRD.Kubernetes.PodManagementPolicy, "ordered_ready")
|
||||||
result.MasterPodMoveTimeout = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout), "10m")
|
result.MasterPodMoveTimeout = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout), "10m")
|
||||||
|
|
|
||||||
|
|
@ -53,28 +53,35 @@ func (c *Controller) clusterWorkerID(clusterName spec.NamespacedName) uint32 {
|
||||||
return c.clusterWorkers[clusterName]
|
return c.clusterWorkers[clusterName]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) createOperatorCRD(crd *apiextv1.CustomResourceDefinition) error {
|
func (c *Controller) createOperatorCRD(desiredCrd *apiextv1.CustomResourceDefinition) error {
|
||||||
if _, err := c.KubeClient.CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err != nil {
|
crd, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), desiredCrd.Name, metav1.GetOptions{})
|
||||||
if k8sutil.ResourceAlreadyExists(err) {
|
if k8sutil.ResourceNotFound(err) {
|
||||||
c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name)
|
if _, err := c.KubeClient.CustomResourceDefinitions().Create(context.TODO(), desiredCrd, metav1.CreateOptions{}); err != nil {
|
||||||
|
return fmt.Errorf("could not create customResourceDefinition %q: %v", desiredCrd.Name, err)
|
||||||
patch, err := json.Marshal(crd)
|
}
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not marshal new customResourceDefintion: %v", err)
|
c.logger.Errorf("could not get customResourceDefinition %q: %v", desiredCrd.Name, err)
|
||||||
|
}
|
||||||
|
if crd != nil {
|
||||||
|
c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name)
|
||||||
|
// copy annotations and labels from existing CRD since we do not define them
|
||||||
|
desiredCrd.Annotations = crd.Annotations
|
||||||
|
desiredCrd.Labels = crd.Labels
|
||||||
|
patch, err := json.Marshal(desiredCrd)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not marshal new customResourceDefintion %q: %v", desiredCrd.Name, err)
|
||||||
}
|
}
|
||||||
if _, err := c.KubeClient.CustomResourceDefinitions().Patch(
|
if _, err := c.KubeClient.CustomResourceDefinitions().Patch(
|
||||||
context.TODO(), crd.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil {
|
context.TODO(), crd.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil {
|
||||||
return fmt.Errorf("could not update customResourceDefinition: %v", err)
|
return fmt.Errorf("could not update customResourceDefinition %q: %v", crd.Name, err)
|
||||||
}
|
|
||||||
} else {
|
|
||||||
c.logger.Errorf("could not create customResourceDefinition %q: %v", crd.Name, err)
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
c.logger.Infof("customResourceDefinition %q has been registered", crd.Name)
|
c.logger.Infof("customResourceDefinition %q has been registered", crd.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return wait.Poll(c.config.CRDReadyWaitInterval, c.config.CRDReadyWaitTimeout, func() (bool, error) {
|
return wait.Poll(c.config.CRDReadyWaitInterval, c.config.CRDReadyWaitTimeout, func() (bool, error) {
|
||||||
c, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{})
|
c, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), desiredCrd.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
@ -96,12 +103,12 @@ func (c *Controller) createOperatorCRD(crd *apiextv1.CustomResourceDefinition) e
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) createPostgresCRD(enableValidation *bool) error {
|
func (c *Controller) createPostgresCRD() error {
|
||||||
return c.createOperatorCRD(acidv1.PostgresCRD(enableValidation))
|
return c.createOperatorCRD(acidv1.PostgresCRD(c.opConfig.CRDCategories))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) createConfigurationCRD(enableValidation *bool) error {
|
func (c *Controller) createConfigurationCRD() error {
|
||||||
return c.createOperatorCRD(acidv1.ConfigurationCRD(enableValidation))
|
return c.createOperatorCRD(acidv1.ConfigurationCRD(c.opConfig.CRDCategories))
|
||||||
}
|
}
|
||||||
|
|
||||||
func readDecodedRole(s string) (*spec.PgUser, error) {
|
func readDecodedRole(s string) (*spec.PgUser, error) {
|
||||||
|
|
|
||||||
|
|
@ -55,6 +55,7 @@ type PgUser struct {
|
||||||
MemberOf []string `yaml:"inrole"`
|
MemberOf []string `yaml:"inrole"`
|
||||||
Parameters map[string]string `yaml:"db_parameters"`
|
Parameters map[string]string `yaml:"db_parameters"`
|
||||||
AdminRole string `yaml:"admin_role"`
|
AdminRole string `yaml:"admin_role"`
|
||||||
|
IsDbOwner bool `yaml:"is_db_owner"`
|
||||||
Deleted bool `yaml:"deleted"`
|
Deleted bool `yaml:"deleted"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -20,6 +20,7 @@ type CRD struct {
|
||||||
RepairPeriod time.Duration `name:"repair_period" default:"5m"`
|
RepairPeriod time.Duration `name:"repair_period" default:"5m"`
|
||||||
EnableCRDRegistration *bool `name:"enable_crd_registration" default:"true"`
|
EnableCRDRegistration *bool `name:"enable_crd_registration" default:"true"`
|
||||||
EnableCRDValidation *bool `name:"enable_crd_validation" default:"true"`
|
EnableCRDValidation *bool `name:"enable_crd_validation" default:"true"`
|
||||||
|
CRDCategories []string `name:"crd_categories" default:"all"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resources describes kubernetes resource specific configuration parameters
|
// Resources describes kubernetes resource specific configuration parameters
|
||||||
|
|
@ -55,6 +56,7 @@ type Resources struct {
|
||||||
PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"`
|
PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"`
|
||||||
PodEnvironmentSecret string `name:"pod_environment_secret"`
|
PodEnvironmentSecret string `name:"pod_environment_secret"`
|
||||||
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
||||||
|
NodeReadinessLabelMerge string `name:"node_readiness_label_merge" default:"OR"`
|
||||||
MaxInstances int32 `name:"max_instances" default:"-1"`
|
MaxInstances int32 `name:"max_instances" default:"-1"`
|
||||||
MinInstances int32 `name:"min_instances" default:"-1"`
|
MinInstances int32 `name:"min_instances" default:"-1"`
|
||||||
ShmVolume *bool `name:"enable_shm_volume" default:"true"`
|
ShmVolume *bool `name:"enable_shm_volume" default:"true"`
|
||||||
|
|
@ -99,6 +101,9 @@ type Auth struct {
|
||||||
InfrastructureRolesDefs string `name:"infrastructure_roles_secrets"`
|
InfrastructureRolesDefs string `name:"infrastructure_roles_secrets"`
|
||||||
SuperUsername string `name:"super_username" default:"postgres"`
|
SuperUsername string `name:"super_username" default:"postgres"`
|
||||||
ReplicationUsername string `name:"replication_username" default:"standby"`
|
ReplicationUsername string `name:"replication_username" default:"standby"`
|
||||||
|
EnablePasswordRotation bool `name:"enable_password_rotation" default:"false"`
|
||||||
|
PasswordRotationInterval uint32 `name:"password_rotation_interval" default:"90"`
|
||||||
|
PasswordRotationUserRetention uint32 `name:"password_rotation_user_retention" default:"180"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scalyr holds the configuration for the Scalyr Agent sidecar for log shipping:
|
// Scalyr holds the configuration for the Scalyr Agent sidecar for log shipping:
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ const (
|
||||||
alterUserRenameSQL = `ALTER ROLE "%s" RENAME TO "%s%s"`
|
alterUserRenameSQL = `ALTER ROLE "%s" RENAME TO "%s%s"`
|
||||||
alterRoleResetAllSQL = `ALTER ROLE "%s" RESET ALL`
|
alterRoleResetAllSQL = `ALTER ROLE "%s" RESET ALL`
|
||||||
alterRoleSetSQL = `ALTER ROLE "%s" SET %s TO %s`
|
alterRoleSetSQL = `ALTER ROLE "%s" SET %s TO %s`
|
||||||
|
dropUserSQL = `SET LOCAL synchronous_commit = 'local'; DROP ROLE "%s";`
|
||||||
grantToUserSQL = `GRANT %s TO "%s"`
|
grantToUserSQL = `GRANT %s TO "%s"`
|
||||||
doBlockStmt = `SET LOCAL synchronous_commit = 'local'; DO $$ BEGIN %s; END;$$;`
|
doBlockStmt = `SET LOCAL synchronous_commit = 'local'; DO $$ BEGIN %s; END;$$;`
|
||||||
passwordTemplate = "ENCRYPTED PASSWORD '%s'"
|
passwordTemplate = "ENCRYPTED PASSWORD '%s'"
|
||||||
|
|
@ -288,3 +289,13 @@ func quoteParameterValue(name, val string) string {
|
||||||
}
|
}
|
||||||
return fmt.Sprintf(`'%s'`, strings.Trim(val, " "))
|
return fmt.Sprintf(`'%s'`, strings.Trim(val, " "))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DropPgUser to remove user created by the operator e.g. for password rotation
|
||||||
|
func DropPgUser(user string, db *sql.DB) error {
|
||||||
|
query := fmt.Sprintf(dropUserSQL, user)
|
||||||
|
if _, err := db.Exec(query); err != nil { // TODO: Try several times
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue