Merge branch 'master' into feature/infrastructure-roles-extension
This commit is contained in:
commit
1257b8840f
|
|
@ -30,6 +30,7 @@ _testmain.go
|
||||||
/docker/build/
|
/docker/build/
|
||||||
/github.com/
|
/github.com/
|
||||||
.idea
|
.idea
|
||||||
|
.vscode
|
||||||
|
|
||||||
scm-source.json
|
scm-source.json
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,5 +18,6 @@ install:
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- hack/verify-codegen.sh
|
- hack/verify-codegen.sh
|
||||||
- travis_wait 20 goveralls -service=travis-ci -package ./pkg/... -v
|
- travis_wait 20 go test -race -covermode atomic -coverprofile=profile.cov ./pkg/... -v
|
||||||
|
- goveralls -coverprofile=profile.cov -service=travis-ci -v
|
||||||
- make e2e
|
- make e2e
|
||||||
|
|
|
||||||
|
|
@ -46,7 +46,7 @@ spec:
|
||||||
- name: "RESOURCES_VISIBLE"
|
- name: "RESOURCES_VISIBLE"
|
||||||
value: "{{ .Values.envs.resourcesVisible }}"
|
value: "{{ .Values.envs.resourcesVisible }}"
|
||||||
- name: "TARGET_NAMESPACE"
|
- name: "TARGET_NAMESPACE"
|
||||||
value: {{ .Values.envs.targetNamespace }}
|
value: "{{ .Values.envs.targetNamespace }}"
|
||||||
- name: "TEAMS"
|
- name: "TEAMS"
|
||||||
value: |-
|
value: |-
|
||||||
[
|
[
|
||||||
|
|
|
||||||
|
|
@ -169,6 +169,8 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
pod_environment_configmap:
|
pod_environment_configmap:
|
||||||
type: string
|
type: string
|
||||||
|
pod_environment_secret:
|
||||||
|
type: string
|
||||||
pod_management_policy:
|
pod_management_policy:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
|
|
|
||||||
|
|
@ -104,6 +104,8 @@ configKubernetes:
|
||||||
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
# namespaced name of the ConfigMap with environment variables to populate on every pod
|
# namespaced name of the ConfigMap with environment variables to populate on every pod
|
||||||
# pod_environment_configmap: "default/my-custom-config"
|
# pod_environment_configmap: "default/my-custom-config"
|
||||||
|
# name of the Secret (in cluster namespace) with environment variables to populate on every pod
|
||||||
|
# pod_environment_secret: "my-custom-secret"
|
||||||
|
|
||||||
# specify the pod management policy of stateful sets of Postgres clusters
|
# specify the pod management policy of stateful sets of Postgres clusters
|
||||||
pod_management_policy: "ordered_ready"
|
pod_management_policy: "ordered_ready"
|
||||||
|
|
|
||||||
|
|
@ -95,6 +95,8 @@ configKubernetes:
|
||||||
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
# namespaced name of the ConfigMap with environment variables to populate on every pod
|
# namespaced name of the ConfigMap with environment variables to populate on every pod
|
||||||
# pod_environment_configmap: "default/my-custom-config"
|
# pod_environment_configmap: "default/my-custom-config"
|
||||||
|
# name of the Secret (in cluster namespace) with environment variables to populate on every pod
|
||||||
|
# pod_environment_secret: "my-custom-secret"
|
||||||
|
|
||||||
# specify the pod management policy of stateful sets of Postgres clusters
|
# specify the pod management policy of stateful sets of Postgres clusters
|
||||||
pod_management_policy: "ordered_ready"
|
pod_management_policy: "ordered_ready"
|
||||||
|
|
|
||||||
|
|
@ -319,11 +319,18 @@ spec:
|
||||||
|
|
||||||
|
|
||||||
## Custom Pod Environment Variables
|
## Custom Pod Environment Variables
|
||||||
|
It is possible to configure a ConfigMap as well as a Secret which are used by the Postgres pods as
|
||||||
It is possible to configure a ConfigMap which is used by the Postgres pods as
|
|
||||||
an additional provider for environment variables. One use case is to customize
|
an additional provider for environment variables. One use case is to customize
|
||||||
the Spilo image and configure it with environment variables. The ConfigMap with
|
the Spilo image and configure it with environment variables. Another case could be to provide custom
|
||||||
the additional settings is referenced in the operator's main configuration.
|
cloud provider or backup settings.
|
||||||
|
|
||||||
|
In general the Operator will give preference to the globally configured variables, to not have the custom
|
||||||
|
ones interfere with core functionality. Variables with the 'WAL_' and 'LOG_' prefix can be overwritten though, to allow
|
||||||
|
backup and logshipping to be specified differently.
|
||||||
|
|
||||||
|
|
||||||
|
### Via ConfigMap
|
||||||
|
The ConfigMap with the additional settings is referenced in the operator's main configuration.
|
||||||
A namespace can be specified along with the name. If left out, the configured
|
A namespace can be specified along with the name. If left out, the configured
|
||||||
default namespace of your K8s client will be used and if the ConfigMap is not
|
default namespace of your K8s client will be used and if the ConfigMap is not
|
||||||
found there, the Postgres cluster's namespace is taken when different:
|
found there, the Postgres cluster's namespace is taken when different:
|
||||||
|
|
@ -365,7 +372,54 @@ data:
|
||||||
MY_CUSTOM_VAR: value
|
MY_CUSTOM_VAR: value
|
||||||
```
|
```
|
||||||
|
|
||||||
This ConfigMap is then added as a source of environment variables to the
|
The key-value pairs of the ConfigMap are then added as environment variables to the
|
||||||
|
Postgres StatefulSet/pods.
|
||||||
|
|
||||||
|
|
||||||
|
### Via Secret
|
||||||
|
The Secret with the additional variables is referenced in the operator's main configuration.
|
||||||
|
To protect the values of the secret from being exposed in the pod spec they are each referenced
|
||||||
|
as SecretKeyRef.
|
||||||
|
This does not allow for the secret to be in a different namespace as the pods though
|
||||||
|
|
||||||
|
**postgres-operator ConfigMap**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: postgres-operator
|
||||||
|
data:
|
||||||
|
# referencing secret with custom environment variables
|
||||||
|
pod_environment_secret: postgres-pod-secrets
|
||||||
|
```
|
||||||
|
|
||||||
|
**OperatorConfiguration**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "acid.zalan.do/v1"
|
||||||
|
kind: OperatorConfiguration
|
||||||
|
metadata:
|
||||||
|
name: postgresql-operator-configuration
|
||||||
|
configuration:
|
||||||
|
kubernetes:
|
||||||
|
# referencing secret with custom environment variables
|
||||||
|
pod_environment_secret: postgres-pod-secrets
|
||||||
|
```
|
||||||
|
|
||||||
|
**referenced Secret `postgres-pod-secrets`**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: postgres-pod-secrets
|
||||||
|
namespace: default
|
||||||
|
data:
|
||||||
|
MY_CUSTOM_VAR: dmFsdWU=
|
||||||
|
```
|
||||||
|
|
||||||
|
The key-value pairs of the Secret are all accessible as environment variables to the
|
||||||
Postgres StatefulSet/pods.
|
Postgres StatefulSet/pods.
|
||||||
|
|
||||||
## Limiting the number of min and max instances in clusters
|
## Limiting the number of min and max instances in clusters
|
||||||
|
|
|
||||||
|
|
@ -160,7 +160,7 @@ You can now access the web interface by port forwarding the UI pod (mind the
|
||||||
label selector) and enter `localhost:8081` in your browser:
|
label selector) and enter `localhost:8081` in your browser:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl port-forward "$(kubectl get pod -l name=postgres-operator-ui --output='name')" 8081
|
kubectl port-forward svc/postgres-operator-ui 8081:8081
|
||||||
```
|
```
|
||||||
|
|
||||||
Available option are explained in detail in the [UI docs](operator-ui.md).
|
Available option are explained in detail in the [UI docs](operator-ui.md).
|
||||||
|
|
|
||||||
14
go.mod
14
go.mod
|
|
@ -9,12 +9,12 @@ require (
|
||||||
github.com/r3labs/diff v1.1.0
|
github.com/r3labs/diff v1.1.0
|
||||||
github.com/sirupsen/logrus v1.6.0
|
github.com/sirupsen/logrus v1.6.0
|
||||||
github.com/stretchr/testify v1.5.1
|
github.com/stretchr/testify v1.5.1
|
||||||
golang.org/x/mod v0.3.0 // indirect
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||||
golang.org/x/tools v0.0.0-20200615222825-6aa8f57aacd9 // indirect
|
golang.org/x/tools v0.0.0-20200729041821-df70183b1872 // indirect
|
||||||
gopkg.in/yaml.v2 v2.2.8
|
gopkg.in/yaml.v2 v2.2.8
|
||||||
k8s.io/api v0.18.3
|
k8s.io/api v0.18.6
|
||||||
k8s.io/apiextensions-apiserver v0.18.3
|
k8s.io/apiextensions-apiserver v0.18.6
|
||||||
k8s.io/apimachinery v0.18.3
|
k8s.io/apimachinery v0.18.6
|
||||||
k8s.io/client-go v0.18.3
|
k8s.io/client-go v0.18.6
|
||||||
k8s.io/code-generator v0.18.3
|
k8s.io/code-generator v0.18.6
|
||||||
)
|
)
|
||||||
|
|
|
||||||
42
go.sum
42
go.sum
|
|
@ -291,7 +291,7 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
|
||||||
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||||
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||||
|
|
@ -312,13 +312,13 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
|
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
|
||||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
|
@ -339,8 +339,8 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||||
|
|
@ -351,6 +351,7 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
|
@ -368,6 +369,8 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU=
|
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU=
|
||||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
|
@ -392,8 +395,8 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw
|
||||||
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200615222825-6aa8f57aacd9 h1:cwgUY+1ja2qxWb2dyaCoixaA66WGWmrijSlxaM+JM/g=
|
golang.org/x/tools v0.0.0-20200729041821-df70183b1872 h1:/U95VAvB4ZsR91rpZX2MwiKpejhWr+UxJ+N2VlJuESk=
|
||||||
golang.org/x/tools v0.0.0-20200615222825-6aa8f57aacd9/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200729041821-df70183b1872/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||||
|
|
@ -435,19 +438,20 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
k8s.io/api v0.18.3 h1:2AJaUQdgUZLoDZHrun21PW2Nx9+ll6cUzvn3IKhSIn0=
|
k8s.io/api v0.18.6 h1:osqrAXbOQjkKIWDTjrqxWQ3w0GkKb1KA1XkUGHHYpeE=
|
||||||
k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA=
|
k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI=
|
||||||
k8s.io/apiextensions-apiserver v0.18.3 h1:h6oZO+iAgg0HjxmuNnguNdKNB9+wv3O1EBDdDWJViQ0=
|
k8s.io/apiextensions-apiserver v0.18.6 h1:vDlk7cyFsDyfwn2rNAO2DbmUbvXy5yT5GE3rrqOzaMo=
|
||||||
k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE=
|
k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M=
|
||||||
k8s.io/apimachinery v0.18.3 h1:pOGcbVAhxADgUYnjS08EFXs9QMl8qaH5U4fr5LGUrSk=
|
k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag=
|
||||||
k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
|
k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
|
||||||
k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw=
|
k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg=
|
||||||
k8s.io/client-go v0.18.3 h1:QaJzz92tsN67oorwzmoB0a9r9ZVHuD5ryjbCKP0U22k=
|
k8s.io/client-go v0.18.6 h1:I+oWqJbibLSGsZj8Xs8F0aWVXJVIoUHWaaJV3kUN/Zw=
|
||||||
k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw=
|
k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q=
|
||||||
k8s.io/code-generator v0.18.3 h1:5H57pYEbkMMXCLKD16YQH3yDPAbVLweUsB1M3m70D1c=
|
k8s.io/code-generator v0.18.6 h1:QdfvGfs4gUCS1dru+rLbCKIFxYEV0IRfF8MXwY/ozLk=
|
||||||
k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
|
k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
|
||||||
k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k=
|
k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14=
|
||||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
|
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
|
||||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||||
|
|
|
||||||
|
|
@ -74,6 +74,7 @@ data:
|
||||||
# pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
# pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
pod_deletion_wait_timeout: 10m
|
pod_deletion_wait_timeout: 10m
|
||||||
# pod_environment_configmap: "default/my-custom-config"
|
# pod_environment_configmap: "default/my-custom-config"
|
||||||
|
# pod_environment_secret: "my-custom-secret"
|
||||||
pod_label_wait_timeout: 10m
|
pod_label_wait_timeout: 10m
|
||||||
pod_management_policy: "ordered_ready"
|
pod_management_policy: "ordered_ready"
|
||||||
pod_role_label: spilo-role
|
pod_role_label: spilo-role
|
||||||
|
|
|
||||||
|
|
@ -165,6 +165,8 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
pod_environment_configmap:
|
pod_environment_configmap:
|
||||||
type: string
|
type: string
|
||||||
|
pod_environment_secret:
|
||||||
|
type: string
|
||||||
pod_management_policy:
|
pod_management_policy:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
|
|
|
||||||
|
|
@ -49,6 +49,7 @@ configuration:
|
||||||
pdb_name_format: "postgres-{cluster}-pdb"
|
pdb_name_format: "postgres-{cluster}-pdb"
|
||||||
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
# pod_environment_configmap: "default/my-custom-config"
|
# pod_environment_configmap: "default/my-custom-config"
|
||||||
|
# pod_environment_secret: "my-custom-secret"
|
||||||
pod_management_policy: "ordered_ready"
|
pod_management_policy: "ordered_ready"
|
||||||
# pod_priority_class_name: ""
|
# pod_priority_class_name: ""
|
||||||
pod_role_label: spilo-role
|
pod_role_label: spilo-role
|
||||||
|
|
|
||||||
|
|
@ -971,6 +971,9 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
||||||
"pod_environment_configmap": {
|
"pod_environment_configmap": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
},
|
},
|
||||||
|
"pod_environment_secret": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
"pod_management_policy": {
|
"pod_management_policy": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Enum: []apiextv1beta1.JSON{
|
Enum: []apiextv1beta1.JSON{
|
||||||
|
|
|
||||||
|
|
@ -112,8 +112,9 @@ func (p *Postgresql) UnmarshalJSON(data []byte) error {
|
||||||
|
|
||||||
if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil {
|
if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil {
|
||||||
tmp2.Error = err.Error()
|
tmp2.Error = err.Error()
|
||||||
tmp2.Status.PostgresClusterStatus = ClusterStatusInvalid
|
tmp2.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid}
|
||||||
} else if err := validateCloneClusterDescription(&tmp2.Spec.Clone); err != nil {
|
} else if err := validateCloneClusterDescription(tmp2.Spec.Clone); err != nil {
|
||||||
|
|
||||||
tmp2.Error = err.Error()
|
tmp2.Error = err.Error()
|
||||||
tmp2.Status.PostgresClusterStatus = ClusterStatusInvalid
|
tmp2.Status.PostgresClusterStatus = ClusterStatusInvalid
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
|
|
@ -71,6 +71,7 @@ type KubernetesMetaConfiguration struct {
|
||||||
// TODO: use a proper toleration structure?
|
// TODO: use a proper toleration structure?
|
||||||
PodToleration map[string]string `json:"toleration,omitempty"`
|
PodToleration map[string]string `json:"toleration,omitempty"`
|
||||||
PodEnvironmentConfigMap spec.NamespacedName `json:"pod_environment_configmap,omitempty"`
|
PodEnvironmentConfigMap spec.NamespacedName `json:"pod_environment_configmap,omitempty"`
|
||||||
|
PodEnvironmentSecret string `json:"pod_environment_secret,omitempty"`
|
||||||
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
|
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
|
||||||
MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"`
|
MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"`
|
||||||
EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"`
|
EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"`
|
||||||
|
|
|
||||||
|
|
@ -53,7 +53,7 @@ type PostgresSpec struct {
|
||||||
NumberOfInstances int32 `json:"numberOfInstances"`
|
NumberOfInstances int32 `json:"numberOfInstances"`
|
||||||
Users map[string]UserFlags `json:"users"`
|
Users map[string]UserFlags `json:"users"`
|
||||||
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
||||||
Clone CloneDescription `json:"clone"`
|
Clone *CloneDescription `json:"clone"`
|
||||||
ClusterName string `json:"-"`
|
ClusterName string `json:"-"`
|
||||||
Databases map[string]string `json:"databases,omitempty"`
|
Databases map[string]string `json:"databases,omitempty"`
|
||||||
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
|
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ func extractClusterName(clusterName string, teamName string) (string, error) {
|
||||||
|
|
||||||
func validateCloneClusterDescription(clone *CloneDescription) error {
|
func validateCloneClusterDescription(clone *CloneDescription) error {
|
||||||
// when cloning from the basebackup (no end timestamp) check that the cluster name is a valid service name
|
// when cloning from the basebackup (no end timestamp) check that the cluster name is a valid service name
|
||||||
if clone.ClusterName != "" && clone.EndTimestamp == "" {
|
if clone != nil && clone.ClusterName != "" && clone.EndTimestamp == "" {
|
||||||
if !serviceNameRegex.MatchString(clone.ClusterName) {
|
if !serviceNameRegex.MatchString(clone.ClusterName) {
|
||||||
return fmt.Errorf("clone cluster name must confirm to DNS-1035, regex used for validation is %q",
|
return fmt.Errorf("clone cluster name must confirm to DNS-1035, regex used for validation is %q",
|
||||||
serviceNameRegexString)
|
serviceNameRegexString)
|
||||||
|
|
|
||||||
|
|
@ -163,7 +163,7 @@ var unmarshalCluster = []struct {
|
||||||
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
|
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
|
||||||
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
|
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
|
||||||
},
|
},
|
||||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`),
|
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":"Invalid"}`),
|
||||||
err: nil},
|
err: nil},
|
||||||
{
|
{
|
||||||
about: "example with /status subresource",
|
about: "example with /status subresource",
|
||||||
|
|
@ -184,7 +184,7 @@ var unmarshalCluster = []struct {
|
||||||
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
|
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
|
||||||
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
|
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
|
||||||
},
|
},
|
||||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||||
err: nil},
|
err: nil},
|
||||||
{
|
{
|
||||||
about: "example with detailed input manifest and deprecated pod_priority_class_name -> podPriorityClassName",
|
about: "example with detailed input manifest and deprecated pod_priority_class_name -> podPriorityClassName",
|
||||||
|
|
@ -327,7 +327,7 @@ var unmarshalCluster = []struct {
|
||||||
EndTime: mustParseTime("05:15"),
|
EndTime: mustParseTime("05:15"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Clone: CloneDescription{
|
Clone: &CloneDescription{
|
||||||
ClusterName: "acid-batman",
|
ClusterName: "acid-batman",
|
||||||
},
|
},
|
||||||
ClusterName: "testcluster1",
|
ClusterName: "testcluster1",
|
||||||
|
|
@ -351,7 +351,7 @@ var unmarshalCluster = []struct {
|
||||||
Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid},
|
Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid},
|
||||||
Error: errors.New("name must match {TEAM}-{NAME} format").Error(),
|
Error: errors.New("name must match {TEAM}-{NAME} format").Error(),
|
||||||
},
|
},
|
||||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||||
err: nil},
|
err: nil},
|
||||||
{
|
{
|
||||||
about: "example with clone",
|
about: "example with clone",
|
||||||
|
|
@ -366,7 +366,7 @@ var unmarshalCluster = []struct {
|
||||||
},
|
},
|
||||||
Spec: PostgresSpec{
|
Spec: PostgresSpec{
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
Clone: CloneDescription{
|
Clone: &CloneDescription{
|
||||||
ClusterName: "team-batman",
|
ClusterName: "team-batman",
|
||||||
},
|
},
|
||||||
ClusterName: "testcluster1",
|
ClusterName: "testcluster1",
|
||||||
|
|
@ -405,7 +405,7 @@ var unmarshalCluster = []struct {
|
||||||
err: errors.New("unexpected end of JSON input")},
|
err: errors.New("unexpected end of JSON input")},
|
||||||
{
|
{
|
||||||
about: "expect error on JSON with field's value malformatted",
|
about: "expect error on JSON with field's value malformatted",
|
||||||
in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||||
out: Postgresql{},
|
out: Postgresql{},
|
||||||
marshal: []byte{},
|
marshal: []byte{},
|
||||||
err: errors.New("invalid character 'q' looking for beginning of value"),
|
err: errors.New("invalid character 'q' looking for beginning of value"),
|
||||||
|
|
|
||||||
|
|
@ -579,7 +579,11 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
in.Clone.DeepCopyInto(&out.Clone)
|
if in.Clone != nil {
|
||||||
|
in, out := &in.Clone, &out.Clone
|
||||||
|
*out = new(CloneDescription)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
if in.Databases != nil {
|
if in.Databases != nil {
|
||||||
in, out := &in.Databases, &out.Databases
|
in, out := &in.Databases, &out.Databases
|
||||||
*out = make(map[string]string, len(*in))
|
*out = make(map[string]string, len(*in))
|
||||||
|
|
|
||||||
|
|
@ -124,6 +124,10 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
|
||||||
|
|
||||||
return fmt.Sprintf("%s-%s", e.PodName, e.ResourceVersion), nil
|
return fmt.Sprintf("%s-%s", e.PodName, e.ResourceVersion), nil
|
||||||
})
|
})
|
||||||
|
password_encryption, ok := pgSpec.Spec.PostgresqlParam.Parameters["password_encryption"]
|
||||||
|
if !ok {
|
||||||
|
password_encryption = "md5"
|
||||||
|
}
|
||||||
|
|
||||||
cluster := &Cluster{
|
cluster := &Cluster{
|
||||||
Config: cfg,
|
Config: cfg,
|
||||||
|
|
@ -135,7 +139,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
|
||||||
Secrets: make(map[types.UID]*v1.Secret),
|
Secrets: make(map[types.UID]*v1.Secret),
|
||||||
Services: make(map[PostgresRole]*v1.Service),
|
Services: make(map[PostgresRole]*v1.Service),
|
||||||
Endpoints: make(map[PostgresRole]*v1.Endpoints)},
|
Endpoints: make(map[PostgresRole]*v1.Endpoints)},
|
||||||
userSyncStrategy: users.DefaultUserSyncStrategy{},
|
userSyncStrategy: users.DefaultUserSyncStrategy{password_encryption},
|
||||||
deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy},
|
deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy},
|
||||||
podEventsQueue: podEventsQueue,
|
podEventsQueue: podEventsQueue,
|
||||||
KubeClient: kubeClient,
|
KubeClient: kubeClient,
|
||||||
|
|
@ -797,10 +801,8 @@ func (c *Cluster) Delete() {
|
||||||
c.logger.Warningf("could not delete statefulset: %v", err)
|
c.logger.Warningf("could not delete statefulset: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, obj := range c.Secrets {
|
if err := c.deleteSecrets(); err != nil {
|
||||||
if err := c.deleteSecret(obj); err != nil {
|
c.logger.Warningf("could not delete secrets: %v", err)
|
||||||
c.logger.Warningf("could not delete secret: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.deletePodDisruptionBudget(); err != nil {
|
if err := c.deletePodDisruptionBudget(); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
|
@ -20,7 +21,6 @@ import (
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
pkgspec "github.com/zalando/postgres-operator/pkg/spec"
|
|
||||||
"github.com/zalando/postgres-operator/pkg/util"
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
|
|
@ -715,6 +715,30 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
|
||||||
envVars = append(envVars, v1.EnvVar{Name: "SPILO_CONFIGURATION", Value: spiloConfiguration})
|
envVars = append(envVars, v1.EnvVar{Name: "SPILO_CONFIGURATION", Value: spiloConfiguration})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.patroniUsesKubernetes() {
|
||||||
|
envVars = append(envVars, v1.EnvVar{Name: "DCS_ENABLE_KUBERNETES_API", Value: "true"})
|
||||||
|
} else {
|
||||||
|
envVars = append(envVars, v1.EnvVar{Name: "ETCD_HOST", Value: c.OpConfig.EtcdHost})
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.patroniKubernetesUseConfigMaps() {
|
||||||
|
envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_USE_CONFIGMAPS", Value: "true"})
|
||||||
|
}
|
||||||
|
|
||||||
|
if cloneDescription != nil && cloneDescription.ClusterName != "" {
|
||||||
|
envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Spec.StandbyCluster != nil {
|
||||||
|
envVars = append(envVars, c.generateStandbyEnvironment(standbyDescription)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add vars taken from pod_environment_configmap and pod_environment_secret first
|
||||||
|
// (to allow them to override the globals set in the operator config)
|
||||||
|
if len(customPodEnvVarsList) > 0 {
|
||||||
|
envVars = append(envVars, customPodEnvVarsList...)
|
||||||
|
}
|
||||||
|
|
||||||
if c.OpConfig.WALES3Bucket != "" {
|
if c.OpConfig.WALES3Bucket != "" {
|
||||||
envVars = append(envVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket})
|
envVars = append(envVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket})
|
||||||
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
|
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
|
||||||
|
|
@ -737,28 +761,6 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
|
||||||
envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""})
|
envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""})
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.patroniUsesKubernetes() {
|
|
||||||
envVars = append(envVars, v1.EnvVar{Name: "DCS_ENABLE_KUBERNETES_API", Value: "true"})
|
|
||||||
} else {
|
|
||||||
envVars = append(envVars, v1.EnvVar{Name: "ETCD_HOST", Value: c.OpConfig.EtcdHost})
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.patroniKubernetesUseConfigMaps() {
|
|
||||||
envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_USE_CONFIGMAPS", Value: "true"})
|
|
||||||
}
|
|
||||||
|
|
||||||
if cloneDescription.ClusterName != "" {
|
|
||||||
envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Spec.StandbyCluster != nil {
|
|
||||||
envVars = append(envVars, c.generateStandbyEnvironment(standbyDescription)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(customPodEnvVarsList) > 0 {
|
|
||||||
envVars = append(envVars, customPodEnvVarsList...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return envVars
|
return envVars
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -777,13 +779,81 @@ func deduplicateEnvVars(input []v1.EnvVar, containerName string, logger *logrus.
|
||||||
result = append(result, input[i])
|
result = append(result, input[i])
|
||||||
} else if names[va.Name] == 1 {
|
} else if names[va.Name] == 1 {
|
||||||
names[va.Name]++
|
names[va.Name]++
|
||||||
logger.Warningf("variable %q is defined in %q more than once, the subsequent definitions are ignored",
|
|
||||||
va.Name, containerName)
|
// Some variables (those to configure the WAL_ and LOG_ shipping) may be overwritten, only log as info
|
||||||
|
if strings.HasPrefix(va.Name, "WAL_") || strings.HasPrefix(va.Name, "LOG_") {
|
||||||
|
logger.Infof("global variable %q has been overwritten by configmap/secret for container %q",
|
||||||
|
va.Name, containerName)
|
||||||
|
} else {
|
||||||
|
logger.Warningf("variable %q is defined in %q more than once, the subsequent definitions are ignored",
|
||||||
|
va.Name, containerName)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Return list of variables the pod recieved from the configured ConfigMap
|
||||||
|
func (c *Cluster) getPodEnvironmentConfigMapVariables() ([]v1.EnvVar, error) {
|
||||||
|
configMapPodEnvVarsList := make([]v1.EnvVar, 0)
|
||||||
|
|
||||||
|
if c.OpConfig.PodEnvironmentConfigMap.Name == "" {
|
||||||
|
return configMapPodEnvVarsList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cm, err := c.KubeClient.ConfigMaps(c.OpConfig.PodEnvironmentConfigMap.Namespace).Get(
|
||||||
|
context.TODO(),
|
||||||
|
c.OpConfig.PodEnvironmentConfigMap.Name,
|
||||||
|
metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
// if not found, try again using the cluster's namespace if it's different (old behavior)
|
||||||
|
if k8sutil.ResourceNotFound(err) && c.Namespace != c.OpConfig.PodEnvironmentConfigMap.Namespace {
|
||||||
|
cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(
|
||||||
|
context.TODO(),
|
||||||
|
c.OpConfig.PodEnvironmentConfigMap.Name,
|
||||||
|
metav1.GetOptions{})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range cm.Data {
|
||||||
|
configMapPodEnvVarsList = append(configMapPodEnvVarsList, v1.EnvVar{Name: k, Value: v})
|
||||||
|
}
|
||||||
|
return configMapPodEnvVarsList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return list of variables the pod recieved from the configured Secret
|
||||||
|
func (c *Cluster) getPodEnvironmentSecretVariables() ([]v1.EnvVar, error) {
|
||||||
|
secretPodEnvVarsList := make([]v1.EnvVar, 0)
|
||||||
|
|
||||||
|
if c.OpConfig.PodEnvironmentSecret == "" {
|
||||||
|
return secretPodEnvVarsList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
secret, err := c.KubeClient.Secrets(c.OpConfig.PodEnvironmentSecret).Get(
|
||||||
|
context.TODO(),
|
||||||
|
c.OpConfig.PodEnvironmentSecret,
|
||||||
|
metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not read Secret PodEnvironmentSecretName: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k := range secret.Data {
|
||||||
|
secretPodEnvVarsList = append(secretPodEnvVarsList,
|
||||||
|
v1.EnvVar{Name: k, ValueFrom: &v1.EnvVarSource{
|
||||||
|
SecretKeyRef: &v1.SecretKeySelector{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: c.OpConfig.PodEnvironmentSecret,
|
||||||
|
},
|
||||||
|
Key: k,
|
||||||
|
},
|
||||||
|
}})
|
||||||
|
}
|
||||||
|
|
||||||
|
return secretPodEnvVarsList, nil
|
||||||
|
}
|
||||||
|
|
||||||
func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.ResourceRequirements) *v1.Container {
|
func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.ResourceRequirements) *v1.Container {
|
||||||
name := sidecar.Name
|
name := sidecar.Name
|
||||||
if name == "" {
|
if name == "" {
|
||||||
|
|
@ -943,32 +1013,23 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
initContainers = spec.InitContainers
|
initContainers = spec.InitContainers
|
||||||
}
|
}
|
||||||
|
|
||||||
customPodEnvVarsList := make([]v1.EnvVar, 0)
|
// fetch env vars from custom ConfigMap
|
||||||
|
configMapEnvVarsList, err := c.getPodEnvironmentConfigMapVariables()
|
||||||
if c.OpConfig.PodEnvironmentConfigMap != (pkgspec.NamespacedName{}) {
|
if err != nil {
|
||||||
var cm *v1.ConfigMap
|
return nil, err
|
||||||
cm, err = c.KubeClient.ConfigMaps(c.OpConfig.PodEnvironmentConfigMap.Namespace).Get(
|
|
||||||
context.TODO(),
|
|
||||||
c.OpConfig.PodEnvironmentConfigMap.Name,
|
|
||||||
metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
// if not found, try again using the cluster's namespace if it's different (old behavior)
|
|
||||||
if k8sutil.ResourceNotFound(err) && c.Namespace != c.OpConfig.PodEnvironmentConfigMap.Namespace {
|
|
||||||
cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(
|
|
||||||
context.TODO(),
|
|
||||||
c.OpConfig.PodEnvironmentConfigMap.Name,
|
|
||||||
metav1.GetOptions{})
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for k, v := range cm.Data {
|
|
||||||
customPodEnvVarsList = append(customPodEnvVarsList, v1.EnvVar{Name: k, Value: v})
|
|
||||||
}
|
|
||||||
sort.Slice(customPodEnvVarsList,
|
|
||||||
func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fetch env vars from custom ConfigMap
|
||||||
|
secretEnvVarsList, err := c.getPodEnvironmentSecretVariables()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// concat all custom pod env vars and sort them
|
||||||
|
customPodEnvVarsList := append(configMapEnvVarsList, secretEnvVarsList...)
|
||||||
|
sort.Slice(customPodEnvVarsList,
|
||||||
|
func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name })
|
||||||
|
|
||||||
if spec.StandbyCluster != nil && spec.StandbyCluster.S3WalPath == "" {
|
if spec.StandbyCluster != nil && spec.StandbyCluster.S3WalPath == "" {
|
||||||
return nil, fmt.Errorf("s3_wal_path is empty for standby cluster")
|
return nil, fmt.Errorf("s3_wal_path is empty for standby cluster")
|
||||||
}
|
}
|
||||||
|
|
@ -1004,7 +1065,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
spiloEnvVars := c.generateSpiloPodEnvVars(
|
spiloEnvVars := c.generateSpiloPodEnvVars(
|
||||||
c.Postgresql.GetUID(),
|
c.Postgresql.GetUID(),
|
||||||
spiloConfiguration,
|
spiloConfiguration,
|
||||||
&spec.Clone,
|
spec.Clone,
|
||||||
spec.StandbyCluster,
|
spec.StandbyCluster,
|
||||||
customPodEnvVarsList,
|
customPodEnvVarsList,
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -1,15 +1,18 @@
|
||||||
package cluster
|
package cluster
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
"github.com/zalando/postgres-operator/pkg/util"
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
|
|
@ -22,6 +25,7 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// For testing purposes
|
// For testing purposes
|
||||||
|
|
@ -116,17 +120,17 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
|
||||||
|
|
||||||
expectedValuesGSBucket := []ExpectedValue{
|
expectedValuesGSBucket := []ExpectedValue{
|
||||||
ExpectedValue{
|
ExpectedValue{
|
||||||
envIndex: 14,
|
envIndex: 15,
|
||||||
envVarConstant: "WAL_GS_BUCKET",
|
envVarConstant: "WAL_GS_BUCKET",
|
||||||
envVarValue: "wale-gs-bucket",
|
envVarValue: "wale-gs-bucket",
|
||||||
},
|
},
|
||||||
ExpectedValue{
|
ExpectedValue{
|
||||||
envIndex: 15,
|
envIndex: 16,
|
||||||
envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX",
|
envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX",
|
||||||
envVarValue: "/SomeUUID",
|
envVarValue: "/SomeUUID",
|
||||||
},
|
},
|
||||||
ExpectedValue{
|
ExpectedValue{
|
||||||
envIndex: 16,
|
envIndex: 17,
|
||||||
envVarConstant: "WAL_BUCKET_SCOPE_PREFIX",
|
envVarConstant: "WAL_BUCKET_SCOPE_PREFIX",
|
||||||
envVarValue: "",
|
envVarValue: "",
|
||||||
},
|
},
|
||||||
|
|
@ -134,22 +138,22 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
|
||||||
|
|
||||||
expectedValuesGCPCreds := []ExpectedValue{
|
expectedValuesGCPCreds := []ExpectedValue{
|
||||||
ExpectedValue{
|
ExpectedValue{
|
||||||
envIndex: 14,
|
envIndex: 15,
|
||||||
envVarConstant: "WAL_GS_BUCKET",
|
envVarConstant: "WAL_GS_BUCKET",
|
||||||
envVarValue: "wale-gs-bucket",
|
envVarValue: "wale-gs-bucket",
|
||||||
},
|
},
|
||||||
ExpectedValue{
|
ExpectedValue{
|
||||||
envIndex: 15,
|
envIndex: 16,
|
||||||
envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX",
|
envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX",
|
||||||
envVarValue: "/SomeUUID",
|
envVarValue: "/SomeUUID",
|
||||||
},
|
},
|
||||||
ExpectedValue{
|
ExpectedValue{
|
||||||
envIndex: 16,
|
envIndex: 17,
|
||||||
envVarConstant: "WAL_BUCKET_SCOPE_PREFIX",
|
envVarConstant: "WAL_BUCKET_SCOPE_PREFIX",
|
||||||
envVarValue: "",
|
envVarValue: "",
|
||||||
},
|
},
|
||||||
ExpectedValue{
|
ExpectedValue{
|
||||||
envIndex: 17,
|
envIndex: 18,
|
||||||
envVarConstant: "GOOGLE_APPLICATION_CREDENTIALS",
|
envVarConstant: "GOOGLE_APPLICATION_CREDENTIALS",
|
||||||
envVarValue: "some_path_to_credentials",
|
envVarValue: "some_path_to_credentials",
|
||||||
},
|
},
|
||||||
|
|
@ -713,6 +717,218 @@ func TestSecretVolume(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
testPodEnvironmentConfigMapName = "pod_env_cm"
|
||||||
|
testPodEnvironmentSecretName = "pod_env_sc"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockSecret struct {
|
||||||
|
v1core.SecretInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockConfigMap struct {
|
||||||
|
v1core.ConfigMapInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mockSecret) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) {
|
||||||
|
if name != testPodEnvironmentSecretName {
|
||||||
|
return nil, fmt.Errorf("Secret PodEnvironmentSecret not found")
|
||||||
|
}
|
||||||
|
secret := &v1.Secret{}
|
||||||
|
secret.Name = testPodEnvironmentSecretName
|
||||||
|
secret.Data = map[string][]byte{
|
||||||
|
"minio_access_key": []byte("alpha"),
|
||||||
|
"minio_secret_key": []byte("beta"),
|
||||||
|
}
|
||||||
|
return secret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mockConfigMap) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ConfigMap, error) {
|
||||||
|
if name != testPodEnvironmentConfigMapName {
|
||||||
|
return nil, fmt.Errorf("NotFound")
|
||||||
|
}
|
||||||
|
configmap := &v1.ConfigMap{}
|
||||||
|
configmap.Name = testPodEnvironmentConfigMapName
|
||||||
|
configmap.Data = map[string]string{
|
||||||
|
"foo1": "bar1",
|
||||||
|
"foo2": "bar2",
|
||||||
|
}
|
||||||
|
return configmap, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type MockSecretGetter struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
type MockConfigMapsGetter struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MockSecretGetter) Secrets(namespace string) v1core.SecretInterface {
|
||||||
|
return &mockSecret{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MockConfigMapsGetter) ConfigMaps(namespace string) v1core.ConfigMapInterface {
|
||||||
|
return &mockConfigMap{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockKubernetesClient() k8sutil.KubernetesClient {
|
||||||
|
return k8sutil.KubernetesClient{
|
||||||
|
SecretsGetter: &MockSecretGetter{},
|
||||||
|
ConfigMapsGetter: &MockConfigMapsGetter{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newMockCluster(opConfig config.Config) *Cluster {
|
||||||
|
cluster := &Cluster{
|
||||||
|
Config: Config{OpConfig: opConfig},
|
||||||
|
KubeClient: newMockKubernetesClient(),
|
||||||
|
}
|
||||||
|
return cluster
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPodEnvironmentConfigMapVariables(t *testing.T) {
|
||||||
|
testName := "TestPodEnvironmentConfigMapVariables"
|
||||||
|
tests := []struct {
|
||||||
|
subTest string
|
||||||
|
opConfig config.Config
|
||||||
|
envVars []v1.EnvVar
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
subTest: "no PodEnvironmentConfigMap",
|
||||||
|
envVars: []v1.EnvVar{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "missing PodEnvironmentConfigMap",
|
||||||
|
opConfig: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
PodEnvironmentConfigMap: spec.NamespacedName{
|
||||||
|
Name: "idonotexist",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("could not read PodEnvironmentConfigMap: NotFound"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "simple PodEnvironmentConfigMap",
|
||||||
|
opConfig: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
PodEnvironmentConfigMap: spec.NamespacedName{
|
||||||
|
Name: testPodEnvironmentConfigMapName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
envVars: []v1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "foo1",
|
||||||
|
Value: "bar1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "foo2",
|
||||||
|
Value: "bar2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
c := newMockCluster(tt.opConfig)
|
||||||
|
vars, err := c.getPodEnvironmentConfigMapVariables()
|
||||||
|
sort.Slice(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name })
|
||||||
|
if !reflect.DeepEqual(vars, tt.envVars) {
|
||||||
|
t.Errorf("%s %s: expected `%v` but got `%v`",
|
||||||
|
testName, tt.subTest, tt.envVars, vars)
|
||||||
|
}
|
||||||
|
if tt.err != nil {
|
||||||
|
if err.Error() != tt.err.Error() {
|
||||||
|
t.Errorf("%s %s: expected error `%v` but got `%v`",
|
||||||
|
testName, tt.subTest, tt.err, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s %s: expected no error but got error: `%v`",
|
||||||
|
testName, tt.subTest, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test if the keys of an existing secret are properly referenced
|
||||||
|
func TestPodEnvironmentSecretVariables(t *testing.T) {
|
||||||
|
testName := "TestPodEnvironmentSecretVariables"
|
||||||
|
tests := []struct {
|
||||||
|
subTest string
|
||||||
|
opConfig config.Config
|
||||||
|
envVars []v1.EnvVar
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
subTest: "No PodEnvironmentSecret configured",
|
||||||
|
envVars: []v1.EnvVar{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "Secret referenced by PodEnvironmentSecret does not exist",
|
||||||
|
opConfig: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
PodEnvironmentSecret: "idonotexist",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("could not read Secret PodEnvironmentSecretName: Secret PodEnvironmentSecret not found"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "Pod environment vars reference all keys from secret configured by PodEnvironmentSecret",
|
||||||
|
opConfig: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
PodEnvironmentSecret: testPodEnvironmentSecretName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
envVars: []v1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "minio_access_key",
|
||||||
|
ValueFrom: &v1.EnvVarSource{
|
||||||
|
SecretKeyRef: &v1.SecretKeySelector{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: testPodEnvironmentSecretName,
|
||||||
|
},
|
||||||
|
Key: "minio_access_key",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "minio_secret_key",
|
||||||
|
ValueFrom: &v1.EnvVarSource{
|
||||||
|
SecretKeyRef: &v1.SecretKeySelector{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: testPodEnvironmentSecretName,
|
||||||
|
},
|
||||||
|
Key: "minio_secret_key",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
c := newMockCluster(tt.opConfig)
|
||||||
|
vars, err := c.getPodEnvironmentSecretVariables()
|
||||||
|
sort.Slice(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name })
|
||||||
|
if !reflect.DeepEqual(vars, tt.envVars) {
|
||||||
|
t.Errorf("%s %s: expected `%v` but got `%v`",
|
||||||
|
testName, tt.subTest, tt.envVars, vars)
|
||||||
|
}
|
||||||
|
if tt.err != nil {
|
||||||
|
if err.Error() != tt.err.Error() {
|
||||||
|
t.Errorf("%s %s: expected error `%v` but got `%v`",
|
||||||
|
testName, tt.subTest, tt.err, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s %s: expected no error but got error: `%v`",
|
||||||
|
testName, tt.subTest, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
|
func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
|
||||||
cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"]
|
cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"]
|
||||||
if cpuReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest {
|
if cpuReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest {
|
||||||
|
|
|
||||||
|
|
@ -725,17 +725,26 @@ func (c *Cluster) deleteEndpoint(role PostgresRole) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) deleteSecret(secret *v1.Secret) error {
|
func (c *Cluster) deleteSecrets() error {
|
||||||
c.setProcessName("deleting secret %q", util.NameFromMeta(secret.ObjectMeta))
|
c.setProcessName("deleting secrets")
|
||||||
c.logger.Debugf("deleting secret %q", util.NameFromMeta(secret.ObjectMeta))
|
var errors []string
|
||||||
err := c.KubeClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, c.deleteOptions)
|
errorCount := 0
|
||||||
if err != nil {
|
for uid, secret := range c.Secrets {
|
||||||
return err
|
c.logger.Debugf("deleting secret %q", util.NameFromMeta(secret.ObjectMeta))
|
||||||
|
err := c.KubeClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, c.deleteOptions)
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, fmt.Sprintf("could not delete secret %q: %v", util.NameFromMeta(secret.ObjectMeta), err))
|
||||||
|
errorCount++
|
||||||
|
}
|
||||||
|
c.logger.Infof("secret %q has been deleted", util.NameFromMeta(secret.ObjectMeta))
|
||||||
|
c.Secrets[uid] = nil
|
||||||
}
|
}
|
||||||
c.logger.Infof("secret %q has been deleted", util.NameFromMeta(secret.ObjectMeta))
|
|
||||||
delete(c.Secrets, secret.UID)
|
|
||||||
|
|
||||||
return err
|
if errorCount > 0 {
|
||||||
|
return fmt.Errorf("could not delete all secrets: %v", errors)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) createRoles() (err error) {
|
func (c *Cluster) createRoles() (err error) {
|
||||||
|
|
|
||||||
|
|
@ -63,23 +63,26 @@ func noEmptySync(cluster *Cluster, err error, reason SyncReason) error {
|
||||||
|
|
||||||
func TestConnectionPoolerSynchronization(t *testing.T) {
|
func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
testName := "Test connection pooler synchronization"
|
testName := "Test connection pooler synchronization"
|
||||||
var cluster = New(
|
newCluster := func() *Cluster {
|
||||||
Config{
|
return New(
|
||||||
OpConfig: config.Config{
|
Config{
|
||||||
ProtectedRoles: []string{"admin"},
|
OpConfig: config.Config{
|
||||||
Auth: config.Auth{
|
ProtectedRoles: []string{"admin"},
|
||||||
SuperUsername: superUserName,
|
Auth: config.Auth{
|
||||||
ReplicationUsername: replicationUserName,
|
SuperUsername: superUserName,
|
||||||
|
ReplicationUsername: replicationUserName,
|
||||||
|
},
|
||||||
|
ConnectionPooler: config.ConnectionPooler{
|
||||||
|
ConnectionPoolerDefaultCPURequest: "100m",
|
||||||
|
ConnectionPoolerDefaultCPULimit: "100m",
|
||||||
|
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||||
|
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||||
|
NumberOfInstances: int32ToPointer(1),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
ConnectionPooler: config.ConnectionPooler{
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
ConnectionPoolerDefaultCPURequest: "100m",
|
}
|
||||||
ConnectionPoolerDefaultCPULimit: "100m",
|
cluster := newCluster()
|
||||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
|
||||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
|
||||||
NumberOfInstances: int32ToPointer(1),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
|
||||||
|
|
||||||
cluster.Statefulset = &appsv1.StatefulSet{
|
cluster.Statefulset = &appsv1.StatefulSet{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
|
@ -87,20 +90,20 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
clusterMissingObjects := *cluster
|
clusterMissingObjects := newCluster()
|
||||||
clusterMissingObjects.KubeClient = k8sutil.ClientMissingObjects()
|
clusterMissingObjects.KubeClient = k8sutil.ClientMissingObjects()
|
||||||
|
|
||||||
clusterMock := *cluster
|
clusterMock := newCluster()
|
||||||
clusterMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
clusterMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
||||||
|
|
||||||
clusterDirtyMock := *cluster
|
clusterDirtyMock := newCluster()
|
||||||
clusterDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
clusterDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
||||||
clusterDirtyMock.ConnectionPooler = &ConnectionPoolerObjects{
|
clusterDirtyMock.ConnectionPooler = &ConnectionPoolerObjects{
|
||||||
Deployment: &appsv1.Deployment{},
|
Deployment: &appsv1.Deployment{},
|
||||||
Service: &v1.Service{},
|
Service: &v1.Service{},
|
||||||
}
|
}
|
||||||
|
|
||||||
clusterNewDefaultsMock := *cluster
|
clusterNewDefaultsMock := newCluster()
|
||||||
clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
|
@ -124,7 +127,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterMissingObjects,
|
cluster: clusterMissingObjects,
|
||||||
defaultImage: "pooler:1.0",
|
defaultImage: "pooler:1.0",
|
||||||
defaultInstances: 1,
|
defaultInstances: 1,
|
||||||
check: objectsAreSaved,
|
check: objectsAreSaved,
|
||||||
|
|
@ -139,7 +142,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
EnableConnectionPooler: boolToPointer(true),
|
EnableConnectionPooler: boolToPointer(true),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterMissingObjects,
|
cluster: clusterMissingObjects,
|
||||||
defaultImage: "pooler:1.0",
|
defaultImage: "pooler:1.0",
|
||||||
defaultInstances: 1,
|
defaultInstances: 1,
|
||||||
check: objectsAreSaved,
|
check: objectsAreSaved,
|
||||||
|
|
@ -154,7 +157,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterMissingObjects,
|
cluster: clusterMissingObjects,
|
||||||
defaultImage: "pooler:1.0",
|
defaultImage: "pooler:1.0",
|
||||||
defaultInstances: 1,
|
defaultInstances: 1,
|
||||||
check: objectsAreSaved,
|
check: objectsAreSaved,
|
||||||
|
|
@ -169,7 +172,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
newSpec: &acidv1.Postgresql{
|
newSpec: &acidv1.Postgresql{
|
||||||
Spec: acidv1.PostgresSpec{},
|
Spec: acidv1.PostgresSpec{},
|
||||||
},
|
},
|
||||||
cluster: &clusterMock,
|
cluster: clusterMock,
|
||||||
defaultImage: "pooler:1.0",
|
defaultImage: "pooler:1.0",
|
||||||
defaultInstances: 1,
|
defaultInstances: 1,
|
||||||
check: objectsAreDeleted,
|
check: objectsAreDeleted,
|
||||||
|
|
@ -182,7 +185,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
newSpec: &acidv1.Postgresql{
|
newSpec: &acidv1.Postgresql{
|
||||||
Spec: acidv1.PostgresSpec{},
|
Spec: acidv1.PostgresSpec{},
|
||||||
},
|
},
|
||||||
cluster: &clusterDirtyMock,
|
cluster: clusterDirtyMock,
|
||||||
defaultImage: "pooler:1.0",
|
defaultImage: "pooler:1.0",
|
||||||
defaultInstances: 1,
|
defaultInstances: 1,
|
||||||
check: objectsAreDeleted,
|
check: objectsAreDeleted,
|
||||||
|
|
@ -203,7 +206,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterMock,
|
cluster: clusterMock,
|
||||||
defaultImage: "pooler:1.0",
|
defaultImage: "pooler:1.0",
|
||||||
defaultInstances: 1,
|
defaultInstances: 1,
|
||||||
check: deploymentUpdated,
|
check: deploymentUpdated,
|
||||||
|
|
@ -220,7 +223,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterNewDefaultsMock,
|
cluster: clusterNewDefaultsMock,
|
||||||
defaultImage: "pooler:2.0",
|
defaultImage: "pooler:2.0",
|
||||||
defaultInstances: 2,
|
defaultInstances: 2,
|
||||||
check: deploymentUpdated,
|
check: deploymentUpdated,
|
||||||
|
|
@ -239,7 +242,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterMock,
|
cluster: clusterMock,
|
||||||
defaultImage: "pooler:1.0",
|
defaultImage: "pooler:1.0",
|
||||||
defaultInstances: 1,
|
defaultInstances: 1,
|
||||||
check: noEmptySync,
|
check: noEmptySync,
|
||||||
|
|
|
||||||
|
|
@ -58,6 +58,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition
|
result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition
|
||||||
result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition
|
result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition
|
||||||
result.PodEnvironmentConfigMap = fromCRD.Kubernetes.PodEnvironmentConfigMap
|
result.PodEnvironmentConfigMap = fromCRD.Kubernetes.PodEnvironmentConfigMap
|
||||||
|
result.PodEnvironmentSecret = fromCRD.Kubernetes.PodEnvironmentSecret
|
||||||
result.PodTerminateGracePeriod = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod), "5m")
|
result.PodTerminateGracePeriod = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod), "5m")
|
||||||
result.SpiloPrivileged = fromCRD.Kubernetes.SpiloPrivileged
|
result.SpiloPrivileged = fromCRD.Kubernetes.SpiloPrivileged
|
||||||
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
|
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
|
||||||
|
|
|
||||||
|
|
@ -45,6 +45,7 @@ type Resources struct {
|
||||||
MinCPULimit string `name:"min_cpu_limit" default:"250m"`
|
MinCPULimit string `name:"min_cpu_limit" default:"250m"`
|
||||||
MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"`
|
MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"`
|
||||||
PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"`
|
PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"`
|
||||||
|
PodEnvironmentSecret string `name:"pod_environment_secret"`
|
||||||
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
||||||
MaxInstances int32 `name:"max_instances" default:"-1"`
|
MaxInstances int32 `name:"max_instances" default:"-1"`
|
||||||
MinInstances int32 `name:"min_instances" default:"-1"`
|
MinInstances int32 `name:"min_instances" default:"-1"`
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,7 @@ const (
|
||||||
// an existing roles of another role membership, nor it removes the already assigned flag
|
// an existing roles of another role membership, nor it removes the already assigned flag
|
||||||
// (except for the NOLOGIN). TODO: process other NOflags, i.e. NOSUPERUSER correctly.
|
// (except for the NOLOGIN). TODO: process other NOflags, i.e. NOSUPERUSER correctly.
|
||||||
type DefaultUserSyncStrategy struct {
|
type DefaultUserSyncStrategy struct {
|
||||||
|
PasswordEncryption string
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProduceSyncRequests figures out the types of changes that need to happen with the given users.
|
// ProduceSyncRequests figures out the types of changes that need to happen with the given users.
|
||||||
|
|
@ -45,7 +46,7 @@ func (strategy DefaultUserSyncStrategy) ProduceSyncRequests(dbUsers spec.PgUserM
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
r := spec.PgSyncUserRequest{}
|
r := spec.PgSyncUserRequest{}
|
||||||
newMD5Password := util.PGUserPassword(newUser)
|
newMD5Password := util.NewEncryptor(strategy.PasswordEncryption).PGUserPassword(newUser)
|
||||||
|
|
||||||
if dbUser.Password != newMD5Password {
|
if dbUser.Password != newMD5Password {
|
||||||
r.User.Password = newMD5Password
|
r.User.Password = newMD5Password
|
||||||
|
|
@ -140,7 +141,7 @@ func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.D
|
||||||
if user.Password == "" {
|
if user.Password == "" {
|
||||||
userPassword = "PASSWORD NULL"
|
userPassword = "PASSWORD NULL"
|
||||||
} else {
|
} else {
|
||||||
userPassword = fmt.Sprintf(passwordTemplate, util.PGUserPassword(user))
|
userPassword = fmt.Sprintf(passwordTemplate, util.NewEncryptor(strategy.PasswordEncryption).PGUserPassword(user))
|
||||||
}
|
}
|
||||||
query := fmt.Sprintf(createUserSQL, user.Name, strings.Join(userFlags, " "), userPassword)
|
query := fmt.Sprintf(createUserSQL, user.Name, strings.Join(userFlags, " "), userPassword)
|
||||||
|
|
||||||
|
|
@ -155,7 +156,7 @@ func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB
|
||||||
var resultStmt []string
|
var resultStmt []string
|
||||||
|
|
||||||
if user.Password != "" || len(user.Flags) > 0 {
|
if user.Password != "" || len(user.Flags) > 0 {
|
||||||
alterStmt := produceAlterStmt(user)
|
alterStmt := produceAlterStmt(user, strategy.PasswordEncryption)
|
||||||
resultStmt = append(resultStmt, alterStmt)
|
resultStmt = append(resultStmt, alterStmt)
|
||||||
}
|
}
|
||||||
if len(user.MemberOf) > 0 {
|
if len(user.MemberOf) > 0 {
|
||||||
|
|
@ -174,14 +175,14 @@ func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func produceAlterStmt(user spec.PgUser) string {
|
func produceAlterStmt(user spec.PgUser, encryption string) string {
|
||||||
// ALTER ROLE ... LOGIN ENCRYPTED PASSWORD ..
|
// ALTER ROLE ... LOGIN ENCRYPTED PASSWORD ..
|
||||||
result := make([]string, 0)
|
result := make([]string, 0)
|
||||||
password := user.Password
|
password := user.Password
|
||||||
flags := user.Flags
|
flags := user.Flags
|
||||||
|
|
||||||
if password != "" {
|
if password != "" {
|
||||||
result = append(result, fmt.Sprintf(passwordTemplate, util.PGUserPassword(user)))
|
result = append(result, fmt.Sprintf(passwordTemplate, util.NewEncryptor(encryption).PGUserPassword(user)))
|
||||||
}
|
}
|
||||||
if len(flags) != 0 {
|
if len(flags) != 0 {
|
||||||
result = append(result, strings.Join(flags, " "))
|
result = append(result, strings.Join(flags, " "))
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,11 @@
|
||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/hmac"
|
||||||
"crypto/md5" // #nosec we need it to for PostgreSQL md5 passwords
|
"crypto/md5" // #nosec we need it to for PostgreSQL md5 passwords
|
||||||
cryptoRand "crypto/rand"
|
cryptoRand "crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
@ -16,10 +19,14 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
|
"golang.org/x/crypto/pbkdf2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
md5prefix = "md5"
|
md5prefix = "md5"
|
||||||
|
scramsha256prefix = "SCRAM-SHA-256"
|
||||||
|
saltlength = 16
|
||||||
|
iterations = 4096
|
||||||
)
|
)
|
||||||
|
|
||||||
var passwordChars = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
|
var passwordChars = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
|
||||||
|
|
@ -61,16 +68,62 @@ func NameFromMeta(meta metav1.ObjectMeta) spec.NamespacedName {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PGUserPassword is used to generate md5 password hash for a given user. It does nothing for already hashed passwords.
|
type Hasher func(user spec.PgUser) string
|
||||||
func PGUserPassword(user spec.PgUser) string {
|
type Random func(n int) string
|
||||||
if (len(user.Password) == md5.Size*2+len(md5prefix) && user.Password[:3] == md5prefix) || user.Password == "" {
|
|
||||||
|
type Encryptor struct {
|
||||||
|
encrypt Hasher
|
||||||
|
random Random
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEncryptor(encryption string) *Encryptor {
|
||||||
|
e := Encryptor{random: RandomPassword}
|
||||||
|
m := map[string]Hasher{
|
||||||
|
"md5": e.PGUserPasswordMD5,
|
||||||
|
"scram-sha-256": e.PGUserPasswordScramSHA256,
|
||||||
|
}
|
||||||
|
hasher, ok := m[encryption]
|
||||||
|
if !ok {
|
||||||
|
hasher = e.PGUserPasswordMD5
|
||||||
|
}
|
||||||
|
e.encrypt = hasher
|
||||||
|
return &e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Encryptor) PGUserPassword(user spec.PgUser) string {
|
||||||
|
if (len(user.Password) == md5.Size*2+len(md5prefix) && user.Password[:3] == md5prefix) ||
|
||||||
|
(len(user.Password) > len(scramsha256prefix) && user.Password[:len(scramsha256prefix)] == scramsha256prefix) || user.Password == "" {
|
||||||
// Avoid processing already encrypted or empty passwords
|
// Avoid processing already encrypted or empty passwords
|
||||||
return user.Password
|
return user.Password
|
||||||
}
|
}
|
||||||
|
return e.encrypt(user)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Encryptor) PGUserPasswordMD5(user spec.PgUser) string {
|
||||||
s := md5.Sum([]byte(user.Password + user.Name)) // #nosec, using md5 since PostgreSQL uses it for hashing passwords.
|
s := md5.Sum([]byte(user.Password + user.Name)) // #nosec, using md5 since PostgreSQL uses it for hashing passwords.
|
||||||
return md5prefix + hex.EncodeToString(s[:])
|
return md5prefix + hex.EncodeToString(s[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *Encryptor) PGUserPasswordScramSHA256(user spec.PgUser) string {
|
||||||
|
salt := []byte(e.random(saltlength))
|
||||||
|
key := pbkdf2.Key([]byte(user.Password), salt, iterations, 32, sha256.New)
|
||||||
|
mac := hmac.New(sha256.New, key)
|
||||||
|
mac.Write([]byte("Server Key"))
|
||||||
|
serverKey := mac.Sum(nil)
|
||||||
|
mac = hmac.New(sha256.New, key)
|
||||||
|
mac.Write([]byte("Client Key"))
|
||||||
|
clientKey := mac.Sum(nil)
|
||||||
|
storedKey := sha256.Sum256(clientKey)
|
||||||
|
pass := fmt.Sprintf("%s$%v:%s$%s:%s",
|
||||||
|
scramsha256prefix,
|
||||||
|
iterations,
|
||||||
|
base64.StdEncoding.EncodeToString(salt),
|
||||||
|
base64.StdEncoding.EncodeToString(storedKey[:]),
|
||||||
|
base64.StdEncoding.EncodeToString(serverKey),
|
||||||
|
)
|
||||||
|
return pass
|
||||||
|
}
|
||||||
|
|
||||||
// Diff returns diffs between 2 objects
|
// Diff returns diffs between 2 objects
|
||||||
func Diff(a, b interface{}) []string {
|
func Diff(a, b interface{}) []string {
|
||||||
return pretty.Diff(a, b)
|
return pretty.Diff(a, b)
|
||||||
|
|
|
||||||
|
|
@ -12,20 +12,27 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var pgUsers = []struct {
|
var pgUsers = []struct {
|
||||||
in spec.PgUser
|
in spec.PgUser
|
||||||
out string
|
outmd5 string
|
||||||
|
outscramsha256 string
|
||||||
}{{spec.PgUser{
|
}{{spec.PgUser{
|
||||||
Name: "test",
|
Name: "test",
|
||||||
Password: "password",
|
Password: "password",
|
||||||
Flags: []string{},
|
Flags: []string{},
|
||||||
MemberOf: []string{}},
|
MemberOf: []string{}},
|
||||||
"md587f77988ccb5aa917c93201ba314fcd4"},
|
"md587f77988ccb5aa917c93201ba314fcd4", "SCRAM-SHA-256$4096:c2FsdA==$lF4cRm/Jky763CN4HtxdHnjV4Q8AWTNlKvGmEFFU8IQ=:ub8OgRsftnk2ccDMOt7ffHXNcikRkQkq1lh4xaAqrSw="},
|
||||||
{spec.PgUser{
|
{spec.PgUser{
|
||||||
Name: "test",
|
Name: "test",
|
||||||
Password: "md592f413f3974bdf3799bb6fecb5f9f2c6",
|
Password: "md592f413f3974bdf3799bb6fecb5f9f2c6",
|
||||||
Flags: []string{},
|
Flags: []string{},
|
||||||
MemberOf: []string{}},
|
MemberOf: []string{}},
|
||||||
"md592f413f3974bdf3799bb6fecb5f9f2c6"}}
|
"md592f413f3974bdf3799bb6fecb5f9f2c6", "md592f413f3974bdf3799bb6fecb5f9f2c6"},
|
||||||
|
{spec.PgUser{
|
||||||
|
Name: "test",
|
||||||
|
Password: "SCRAM-SHA-256$4096:S1ByZWhvYVV5VDlJNGZoVw==$ozLevu5k0pAQYRrSY+vZhetO6+/oB+qZvuutOdXR94U=:yADwhy0LGloXzh5RaVwLMFyUokwI17VkHVfKVuHu0Zs=",
|
||||||
|
Flags: []string{},
|
||||||
|
MemberOf: []string{}},
|
||||||
|
"SCRAM-SHA-256$4096:S1ByZWhvYVV5VDlJNGZoVw==$ozLevu5k0pAQYRrSY+vZhetO6+/oB+qZvuutOdXR94U=:yADwhy0LGloXzh5RaVwLMFyUokwI17VkHVfKVuHu0Zs=", "SCRAM-SHA-256$4096:S1ByZWhvYVV5VDlJNGZoVw==$ozLevu5k0pAQYRrSY+vZhetO6+/oB+qZvuutOdXR94U=:yADwhy0LGloXzh5RaVwLMFyUokwI17VkHVfKVuHu0Zs="}}
|
||||||
|
|
||||||
var prettyDiffTest = []struct {
|
var prettyDiffTest = []struct {
|
||||||
inA interface{}
|
inA interface{}
|
||||||
|
|
@ -107,9 +114,16 @@ func TestNameFromMeta(t *testing.T) {
|
||||||
|
|
||||||
func TestPGUserPassword(t *testing.T) {
|
func TestPGUserPassword(t *testing.T) {
|
||||||
for _, tt := range pgUsers {
|
for _, tt := range pgUsers {
|
||||||
pwd := PGUserPassword(tt.in)
|
e := NewEncryptor("md5")
|
||||||
if pwd != tt.out {
|
pwd := e.PGUserPassword(tt.in)
|
||||||
t.Errorf("PgUserPassword expected: %q, got: %q", tt.out, pwd)
|
if pwd != tt.outmd5 {
|
||||||
|
t.Errorf("PgUserPassword expected: %q, got: %q", tt.outmd5, pwd)
|
||||||
|
}
|
||||||
|
e = NewEncryptor("scram-sha-256")
|
||||||
|
e.random = func(n int) string { return "salt" }
|
||||||
|
pwd = e.PGUserPassword(tt.in)
|
||||||
|
if pwd != tt.outscramsha256 {
|
||||||
|
t.Errorf("PgUserPassword expected: %q, got: %q", tt.outscramsha256, pwd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue