resolve conflicts
This commit is contained in:
commit
5f6f0cfd2a
|
|
@ -7,6 +7,8 @@
|
||||||
_obj
|
_obj
|
||||||
_test
|
_test
|
||||||
_manifests
|
_manifests
|
||||||
|
_tmp
|
||||||
|
github.com
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
# Architecture specific extensions/prefixes
|
||||||
*.[568vq]
|
*.[568vq]
|
||||||
|
|
@ -26,6 +28,7 @@ _testmain.go
|
||||||
/vendor/
|
/vendor/
|
||||||
/build/
|
/build/
|
||||||
/docker/build/
|
/docker/build/
|
||||||
|
/github.com/
|
||||||
.idea
|
.idea
|
||||||
|
|
||||||
scm-source.json
|
scm-source.json
|
||||||
|
|
|
||||||
2
Makefile
2
Makefile
|
|
@ -97,4 +97,4 @@ test:
|
||||||
GO111MODULE=on go test ./...
|
GO111MODULE=on go test ./...
|
||||||
|
|
||||||
e2e: docker # build operator image to be tested
|
e2e: docker # build operator image to be tested
|
||||||
cd e2e; make tools test clean
|
cd e2e; make tools e2etest clean
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,9 @@ spec:
|
||||||
ports:
|
ports:
|
||||||
- port: {{ .Values.service.port }}
|
- port: {{ .Values.service.port }}
|
||||||
targetPort: 8081
|
targetPort: 8081
|
||||||
|
{{- if and (eq .Values.service.type "NodePort") .Values.service.nodePort }}
|
||||||
|
nodePort: {{ .Values.service.nodePort }}
|
||||||
|
{{- end }}
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
selector:
|
selector:
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
|
|
||||||
|
|
@ -42,6 +42,9 @@ envs:
|
||||||
service:
|
service:
|
||||||
type: "ClusterIP"
|
type: "ClusterIP"
|
||||||
port: "8080"
|
port: "8080"
|
||||||
|
# If the type of the service is NodePort a port can be specified using the nodePort field
|
||||||
|
# If the nodePort field is not specified, or if it has no value, then a random port is used
|
||||||
|
# notePort: 32521
|
||||||
|
|
||||||
# configure UI ingress. If needed: "enabled: true"
|
# configure UI ingress. If needed: "enabled: true"
|
||||||
ingress:
|
ingress:
|
||||||
|
|
|
||||||
|
|
@ -62,6 +62,8 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
enable_crd_validation:
|
enable_crd_validation:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enable_lazy_spilo_upgrade:
|
||||||
|
type: boolean
|
||||||
enable_shm_volume:
|
enable_shm_volume:
|
||||||
type: boolean
|
type: boolean
|
||||||
enable_unused_pvc_deletion:
|
enable_unused_pvc_deletion:
|
||||||
|
|
@ -86,6 +88,12 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
|
sidecars:
|
||||||
|
type: array
|
||||||
|
nullable: true
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
additionalProperties: true
|
||||||
workers:
|
workers:
|
||||||
type: integer
|
type: integer
|
||||||
minimum: 1
|
minimum: 1
|
||||||
|
|
@ -301,7 +309,7 @@ spec:
|
||||||
type: integer
|
type: integer
|
||||||
ring_log_lines:
|
ring_log_lines:
|
||||||
type: integer
|
type: integer
|
||||||
scalyr:
|
scalyr: # deprecated
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
scalyr_api_key:
|
scalyr_api_key:
|
||||||
|
|
|
||||||
|
|
@ -273,6 +273,26 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
|
preparedDatabases:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
defaultUsers:
|
||||||
|
type: boolean
|
||||||
|
extensions:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
schemas:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
defaultUsers:
|
||||||
|
type: boolean
|
||||||
|
defaultRoles:
|
||||||
|
type: boolean
|
||||||
replicaLoadBalancer: # deprecated
|
replicaLoadBalancer: # deprecated
|
||||||
type: boolean
|
type: boolean
|
||||||
resources:
|
resources:
|
||||||
|
|
@ -364,6 +384,21 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
teamId:
|
teamId:
|
||||||
type: string
|
type: string
|
||||||
|
tls:
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- secretName
|
||||||
|
properties:
|
||||||
|
secretName:
|
||||||
|
type: string
|
||||||
|
certificateFile:
|
||||||
|
type: string
|
||||||
|
privateKeyFile:
|
||||||
|
type: string
|
||||||
|
caFile:
|
||||||
|
type: string
|
||||||
|
caSecretName:
|
||||||
|
type: string
|
||||||
tolerations:
|
tolerations:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
|
|
|
||||||
|
|
@ -42,6 +42,18 @@ rules:
|
||||||
- configmaps
|
- configmaps
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
|
# to send events to the CRs
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- events
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
# to manage endpoints which are also used by Patroni
|
# to manage endpoints which are also used by Patroni
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
|
|
|
||||||
|
|
@ -32,8 +32,6 @@ configuration:
|
||||||
{{ toYaml .Values.configTeamsApi | indent 4 }}
|
{{ toYaml .Values.configTeamsApi | indent 4 }}
|
||||||
logging_rest_api:
|
logging_rest_api:
|
||||||
{{ toYaml .Values.configLoggingRestApi | indent 4 }}
|
{{ toYaml .Values.configLoggingRestApi | indent 4 }}
|
||||||
scalyr:
|
|
||||||
{{ toYaml .Values.configScalyr | indent 4 }}
|
|
||||||
connection_pooler:
|
connection_pooler:
|
||||||
{{ toYaml .Values.configConnectionPooler | indent 4 }}
|
{{ toYaml .Values.configConnectionPooler | indent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,8 @@ configTarget: "OperatorConfigurationCRD"
|
||||||
configGeneral:
|
configGeneral:
|
||||||
# choose if deployment creates/updates CRDs with OpenAPIV3Validation
|
# choose if deployment creates/updates CRDs with OpenAPIV3Validation
|
||||||
enable_crd_validation: true
|
enable_crd_validation: true
|
||||||
|
# update only the statefulsets without immediately doing the rolling update
|
||||||
|
enable_lazy_spilo_upgrade: false
|
||||||
# start any new database pod without limitations on shm memory
|
# start any new database pod without limitations on shm memory
|
||||||
enable_shm_volume: true
|
enable_shm_volume: true
|
||||||
# delete PVCs of shutdown pods
|
# delete PVCs of shutdown pods
|
||||||
|
|
@ -28,7 +30,7 @@ configGeneral:
|
||||||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||||
# kubernetes_use_configmaps: false
|
# kubernetes_use_configmaps: false
|
||||||
# Spilo docker image
|
# Spilo docker image
|
||||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p115
|
||||||
# max number of instances in Postgres cluster. -1 = no limit
|
# max number of instances in Postgres cluster. -1 = no limit
|
||||||
min_instances: -1
|
min_instances: -1
|
||||||
# min number of instances in Postgres cluster. -1 = no limit
|
# min number of instances in Postgres cluster. -1 = no limit
|
||||||
|
|
@ -254,23 +256,6 @@ configTeamsApi:
|
||||||
# URL of the Teams API service
|
# URL of the Teams API service
|
||||||
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
||||||
|
|
||||||
# Scalyr is a log management tool that Zalando uses as a sidecar
|
|
||||||
configScalyr:
|
|
||||||
# API key for the Scalyr sidecar
|
|
||||||
# scalyr_api_key: ""
|
|
||||||
|
|
||||||
# Docker image for the Scalyr sidecar
|
|
||||||
# scalyr_image: ""
|
|
||||||
|
|
||||||
# CPU limit value for the Scalyr sidecar
|
|
||||||
scalyr_cpu_limit: "1"
|
|
||||||
# CPU rquest value for the Scalyr sidecar
|
|
||||||
scalyr_cpu_request: 100m
|
|
||||||
# Memory limit value for the Scalyr sidecar
|
|
||||||
scalyr_memory_limit: 500Mi
|
|
||||||
# Memory request value for the Scalyr sidecar
|
|
||||||
scalyr_memory_request: 50Mi
|
|
||||||
|
|
||||||
configConnectionPooler:
|
configConnectionPooler:
|
||||||
# db schema to install lookup function into
|
# db schema to install lookup function into
|
||||||
connection_pooler_schema: "pooler"
|
connection_pooler_schema: "pooler"
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,8 @@ configTarget: "ConfigMap"
|
||||||
configGeneral:
|
configGeneral:
|
||||||
# choose if deployment creates/updates CRDs with OpenAPIV3Validation
|
# choose if deployment creates/updates CRDs with OpenAPIV3Validation
|
||||||
enable_crd_validation: "true"
|
enable_crd_validation: "true"
|
||||||
|
# update only the statefulsets without immediately doing the rolling update
|
||||||
|
enable_lazy_spilo_upgrade: "false"
|
||||||
# start any new database pod without limitations on shm memory
|
# start any new database pod without limitations on shm memory
|
||||||
enable_shm_volume: "true"
|
enable_shm_volume: "true"
|
||||||
# delete PVCs of shutdown pods
|
# delete PVCs of shutdown pods
|
||||||
|
|
@ -28,7 +30,7 @@ configGeneral:
|
||||||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||||
# kubernetes_use_configmaps: "false"
|
# kubernetes_use_configmaps: "false"
|
||||||
# Spilo docker image
|
# Spilo docker image
|
||||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p115
|
||||||
# max number of instances in Postgres cluster. -1 = no limit
|
# max number of instances in Postgres cluster. -1 = no limit
|
||||||
min_instances: "-1"
|
min_instances: "-1"
|
||||||
# min number of instances in Postgres cluster. -1 = no limit
|
# min number of instances in Postgres cluster. -1 = no limit
|
||||||
|
|
|
||||||
|
|
@ -458,6 +458,17 @@ from numerous escape characters in the latter log entry, view it in CLI with
|
||||||
`PodTemplate` used by the operator is yet to be updated with the default values
|
`PodTemplate` used by the operator is yet to be updated with the default values
|
||||||
used internally in K8s.
|
used internally in K8s.
|
||||||
|
|
||||||
|
The operator also support lazy updates of the Spilo image. That means the pod
|
||||||
|
template of a PG cluster's stateful set is updated immediately with the new
|
||||||
|
image, but no rolling update follows. This feature saves you a switchover - and
|
||||||
|
hence downtime - when you know pods are re-started later anyway, for instance
|
||||||
|
due to the node rotation. To force a rolling update, disable this mode by
|
||||||
|
setting the `enable_lazy_spilo_upgrade` to `false` in the operator configuration
|
||||||
|
and restart the operator pod. With the standard eager rolling updates the
|
||||||
|
operator checks during Sync all pods run images specified in their respective
|
||||||
|
statefulsets. The operator triggers a rolling upgrade for PG clusters that
|
||||||
|
violate this condition.
|
||||||
|
|
||||||
## Logical backups
|
## Logical backups
|
||||||
|
|
||||||
The operator can manage K8s cron jobs to run logical backups of Postgres
|
The operator can manage K8s cron jobs to run logical backups of Postgres
|
||||||
|
|
@ -507,6 +518,33 @@ A secret can be pre-provisioned in different ways:
|
||||||
* Automatically provisioned via a custom K8s controller like
|
* Automatically provisioned via a custom K8s controller like
|
||||||
[kube-aws-iam-controller](https://github.com/mikkeloscar/kube-aws-iam-controller)
|
[kube-aws-iam-controller](https://github.com/mikkeloscar/kube-aws-iam-controller)
|
||||||
|
|
||||||
|
## Sidecars for Postgres clusters
|
||||||
|
|
||||||
|
A list of sidecars is added to each cluster created by the operator. The default
|
||||||
|
is empty.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kind: OperatorConfiguration
|
||||||
|
configuration:
|
||||||
|
sidecars:
|
||||||
|
- image: image:123
|
||||||
|
name: global-sidecar
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /custom-pgdata-mountpoint
|
||||||
|
name: pgdata
|
||||||
|
- ...
|
||||||
|
```
|
||||||
|
|
||||||
|
In addition to any environment variables you specify, the following environment
|
||||||
|
variables are always passed to sidecars:
|
||||||
|
|
||||||
|
- `POD_NAME` - field reference to `metadata.name`
|
||||||
|
- `POD_NAMESPACE` - field reference to `metadata.namespace`
|
||||||
|
- `POSTGRES_USER` - the superuser that can be used to connect to the database
|
||||||
|
- `POSTGRES_PASSWORD` - the password for the superuser
|
||||||
|
|
||||||
## Setting up the Postgres Operator UI
|
## Setting up the Postgres Operator UI
|
||||||
|
|
||||||
Since the v1.2 release the Postgres Operator is shipped with a browser-based
|
Since the v1.2 release the Postgres Operator is shipped with a browser-based
|
||||||
|
|
|
||||||
|
|
@ -435,5 +435,12 @@ Those parameters are grouped under the `tls` top-level key.
|
||||||
client connects with `sslmode=verify-ca` or `sslmode=verify-full`.
|
client connects with `sslmode=verify-ca` or `sslmode=verify-full`.
|
||||||
Default is empty.
|
Default is empty.
|
||||||
|
|
||||||
|
* **caSecretName**
|
||||||
|
By setting the `caSecretName` value, the ca certificate file defined by the
|
||||||
|
`caFile` will be fetched from this secret instead of `secretName` above.
|
||||||
|
This secret has to hold a file with that name in its root.
|
||||||
|
|
||||||
Optionally one can provide full path for any of them. By default it is
|
Optionally one can provide full path for any of them. By default it is
|
||||||
relative to the "/tls/", which is mount path of the tls secret.
|
relative to the "/tls/", which is mount path of the tls secret.
|
||||||
|
If `caSecretName` is defined, the ca.crt path is relative to "/tlsca/",
|
||||||
|
otherwise to the same "/tls/".
|
||||||
|
|
|
||||||
|
|
@ -45,7 +45,7 @@ The following environment variables are accepted by the operator:
|
||||||
all namespaces. Empty value defaults to the operator namespace. Overrides the
|
all namespaces. Empty value defaults to the operator namespace. Overrides the
|
||||||
`watched_namespace` operator parameter.
|
`watched_namespace` operator parameter.
|
||||||
|
|
||||||
* **SCALYR_API_KEY**
|
* **SCALYR_API_KEY** (*deprecated*)
|
||||||
the value of the Scalyr API key to supply to the pods. Overrides the
|
the value of the Scalyr API key to supply to the pods. Overrides the
|
||||||
`scalyr_api_key` operator parameter.
|
`scalyr_api_key` operator parameter.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -75,6 +75,10 @@ Those are top-level keys, containing both leaf keys and groups.
|
||||||
[OpenAPI v3 schema validation](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#validation)
|
[OpenAPI v3 schema validation](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#validation)
|
||||||
The default is `true`.
|
The default is `true`.
|
||||||
|
|
||||||
|
* **enable_lazy_spilo_upgrade**
|
||||||
|
Instruct operator to update only the statefulsets with the new image without immediately doing the rolling update. The assumption is pods will be re-started later with the new image, for example due to the node rotation.
|
||||||
|
The default is `false`.
|
||||||
|
|
||||||
* **etcd_host**
|
* **etcd_host**
|
||||||
Etcd connection string for Patroni defined as `host:port`. Not required when
|
Etcd connection string for Patroni defined as `host:port`. Not required when
|
||||||
Patroni native Kubernetes support is used. The default is empty (use
|
Patroni native Kubernetes support is used. The default is empty (use
|
||||||
|
|
@ -93,9 +97,18 @@ Those are top-level keys, containing both leaf keys and groups.
|
||||||
repository](https://github.com/zalando/spilo).
|
repository](https://github.com/zalando/spilo).
|
||||||
|
|
||||||
* **sidecar_docker_images**
|
* **sidecar_docker_images**
|
||||||
a map of sidecar names to Docker images to run with Spilo. In case of the name
|
*deprecated*: use **sidecars** instead. A map of sidecar names to Docker
|
||||||
conflict with the definition in the cluster manifest the cluster-specific one
|
images to run with Spilo. In case of the name conflict with the definition in
|
||||||
is preferred.
|
the cluster manifest the cluster-specific one is preferred.
|
||||||
|
|
||||||
|
* **sidecars**
|
||||||
|
a list of sidecars to run with Spilo, for any cluster (i.e. globally defined
|
||||||
|
sidecars). Each item in the list is of type
|
||||||
|
[Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core).
|
||||||
|
Globally defined sidecars can be overwritten by specifying a sidecar in the
|
||||||
|
Postgres manifest with the same name.
|
||||||
|
Note: This field is not part of the schema validation. If the container
|
||||||
|
specification is invalid, then the operator fails to create the statefulset.
|
||||||
|
|
||||||
* **enable_shm_volume**
|
* **enable_shm_volume**
|
||||||
Instruct operator to start any new database pod without limitations on shm
|
Instruct operator to start any new database pod without limitations on shm
|
||||||
|
|
@ -133,8 +146,9 @@ Those are top-level keys, containing both leaf keys and groups.
|
||||||
at the cost of overprovisioning memory and potential scheduling problems for
|
at the cost of overprovisioning memory and potential scheduling problems for
|
||||||
containers with high memory limits due to the lack of memory on Kubernetes
|
containers with high memory limits due to the lack of memory on Kubernetes
|
||||||
cluster nodes. This affects all containers created by the operator (Postgres,
|
cluster nodes. This affects all containers created by the operator (Postgres,
|
||||||
Scalyr sidecar, and other sidecars); to set resources for the operator's own
|
Scalyr sidecar, and other sidecars except **sidecars** defined in the operator
|
||||||
container, change the [operator deployment manually](../../manifests/postgres-operator.yaml#L20).
|
configuration); to set resources for the operator's own container, change the
|
||||||
|
[operator deployment manually](../../manifests/postgres-operator.yaml#L20).
|
||||||
The default is `false`.
|
The default is `false`.
|
||||||
|
|
||||||
* **enable_unused_pvc_deletion**
|
* **enable_unused_pvc_deletion**
|
||||||
|
|
@ -210,12 +224,13 @@ configuration they are grouped under the `kubernetes` key.
|
||||||
Default is true.
|
Default is true.
|
||||||
|
|
||||||
* **enable_init_containers**
|
* **enable_init_containers**
|
||||||
global option to allow for creating init containers to run actions before
|
global option to allow for creating init containers in the cluster manifest to
|
||||||
Spilo is started. Default is true.
|
run actions before Spilo is started. Default is true.
|
||||||
|
|
||||||
* **enable_sidecars**
|
* **enable_sidecars**
|
||||||
global option to allow for creating sidecar containers to run alongside Spilo
|
global option to allow for creating sidecar containers in the cluster manifest
|
||||||
on the same pod. Default is true.
|
to run alongside Spilo on the same pod. Globally defined sidecars are always
|
||||||
|
enabled. Default is true.
|
||||||
|
|
||||||
* **secret_name_template**
|
* **secret_name_template**
|
||||||
a template for the name of the database user secrets generated by the
|
a template for the name of the database user secrets generated by the
|
||||||
|
|
@ -580,11 +595,12 @@ configuration they are grouped under the `logging_rest_api` key.
|
||||||
* **cluster_history_entries**
|
* **cluster_history_entries**
|
||||||
number of entries in the cluster history ring buffer. The default is `1000`.
|
number of entries in the cluster history ring buffer. The default is `1000`.
|
||||||
|
|
||||||
## Scalyr options
|
## Scalyr options (*deprecated*)
|
||||||
|
|
||||||
Those parameters define the resource requests/limits and properties of the
|
Those parameters define the resource requests/limits and properties of the
|
||||||
scalyr sidecar. In the CRD-based configuration they are grouped under the
|
scalyr sidecar. In the CRD-based configuration they are grouped under the
|
||||||
`scalyr` key.
|
`scalyr` key. Note, that this section is deprecated. Instead, define Scalyr as
|
||||||
|
a global sidecar under the `sidecars` key in the configuration.
|
||||||
|
|
||||||
* **scalyr_api_key**
|
* **scalyr_api_key**
|
||||||
API key for the Scalyr sidecar. The default is empty.
|
API key for the Scalyr sidecar. The default is empty.
|
||||||
|
|
|
||||||
219
docs/user.md
219
docs/user.md
|
|
@ -53,8 +53,19 @@ them.
|
||||||
|
|
||||||
## Watch pods being created
|
## Watch pods being created
|
||||||
|
|
||||||
|
Check if the database pods are coming up. Use the label `application=spilo` to
|
||||||
|
filter and list the label `spilo-role` to see when the master is promoted and
|
||||||
|
replicas get their labels.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl get pods -w --show-labels
|
kubectl get pods -l application=spilo -L spilo-role -w
|
||||||
|
```
|
||||||
|
|
||||||
|
The operator also emits K8s events to the Postgresql CRD which can be inspected
|
||||||
|
in the operator logs or with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl describe postgresql acid-minimal-cluster
|
||||||
```
|
```
|
||||||
|
|
||||||
## Connect to PostgreSQL
|
## Connect to PostgreSQL
|
||||||
|
|
@ -94,7 +105,10 @@ created on every cluster managed by the operator.
|
||||||
* `teams API roles`: automatically create users for every member of the team
|
* `teams API roles`: automatically create users for every member of the team
|
||||||
owning the database cluster.
|
owning the database cluster.
|
||||||
|
|
||||||
In the next sections, we will cover those use cases in more details.
|
In the next sections, we will cover those use cases in more details. Note, that
|
||||||
|
the Postgres Operator can also create databases with pre-defined owner, reader
|
||||||
|
and writer roles which saves you the manual setup. Read more in the next
|
||||||
|
chapter.
|
||||||
|
|
||||||
### Manifest roles
|
### Manifest roles
|
||||||
|
|
||||||
|
|
@ -216,6 +230,166 @@ to choose superusers, group roles, [PAM configuration](https://github.com/CyberD
|
||||||
etc. An OAuth2 token can be passed to the Teams API via a secret. The name for
|
etc. An OAuth2 token can be passed to the Teams API via a secret. The name for
|
||||||
this secret is configurable with the `oauth_token_secret_name` parameter.
|
this secret is configurable with the `oauth_token_secret_name` parameter.
|
||||||
|
|
||||||
|
## Prepared databases with roles and default privileges
|
||||||
|
|
||||||
|
The `users` section in the manifests only allows for creating database roles
|
||||||
|
with global privileges. Fine-grained data access control or role membership can
|
||||||
|
not be defined and must be set up by the user in the database. But, the Postgres
|
||||||
|
Operator offers a separate section to specify `preparedDatabases` that will be
|
||||||
|
created with pre-defined owner, reader and writer roles for each individual
|
||||||
|
database and, optionally, for each database schema, too. `preparedDatabases`
|
||||||
|
also enable users to specify PostgreSQL extensions that shall be created in a
|
||||||
|
given database schema.
|
||||||
|
|
||||||
|
### Default database and schema
|
||||||
|
|
||||||
|
A prepared database is already created by adding an empty `preparedDatabases`
|
||||||
|
section to the manifest. The database will then be called like the Postgres
|
||||||
|
cluster manifest (`-` are replaced with `_`) and will also contain a schema
|
||||||
|
called `data`.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
spec:
|
||||||
|
preparedDatabases: {}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Default NOLOGIN roles
|
||||||
|
|
||||||
|
Given an example with a specified database and schema:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
spec:
|
||||||
|
preparedDatabases:
|
||||||
|
foo:
|
||||||
|
schemas:
|
||||||
|
bar: {}
|
||||||
|
```
|
||||||
|
|
||||||
|
Postgres Operator will create the following NOLOGIN roles:
|
||||||
|
|
||||||
|
| Role name | Member of | Admin |
|
||||||
|
| -------------- | -------------- | ------------- |
|
||||||
|
| foo_owner | | admin |
|
||||||
|
| foo_reader | | foo_owner |
|
||||||
|
| foo_writer | foo_reader | foo_owner |
|
||||||
|
| foo_bar_owner | | foo_owner |
|
||||||
|
| foo_bar_reader | | foo_bar_owner |
|
||||||
|
| foo_bar_writer | foo_bar_reader | foo_bar_owner |
|
||||||
|
|
||||||
|
The `<dbname>_owner` role is the database owner and should be used when creating
|
||||||
|
new database objects. All members of the `admin` role, e.g. teams API roles, can
|
||||||
|
become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/12/sql-alterdefaultprivileges.html)
|
||||||
|
are configured for the owner role so that the `<dbname>_reader` role
|
||||||
|
automatically gets read-access (SELECT) to new tables and sequences and the
|
||||||
|
`<dbname>_writer` receives write-access (INSERT, UPDATE, DELETE on tables,
|
||||||
|
USAGE and UPDATE on sequences). Both get USAGE on types and EXECUTE on
|
||||||
|
functions.
|
||||||
|
|
||||||
|
The same principle applies for database schemas which are owned by the
|
||||||
|
`<dbname>_<schema>_owner` role. `<dbname>_<schema>_reader` is read-only,
|
||||||
|
`<dbname>_<schema>_writer` has write access and inherit reading from the reader
|
||||||
|
role. Note, that the `<dbname>_*` roles have access incl. default privileges on
|
||||||
|
all schemas, too. If you don't need the dedicated schema roles - i.e. you only
|
||||||
|
use one schema - you can disable the creation like this:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
spec:
|
||||||
|
preparedDatabases:
|
||||||
|
foo:
|
||||||
|
schemas:
|
||||||
|
bar:
|
||||||
|
defaultRoles: false
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, the schemas are owned by the database owner, too.
|
||||||
|
|
||||||
|
### Default LOGIN roles
|
||||||
|
|
||||||
|
The roles described in the previous paragraph can be granted to LOGIN roles from
|
||||||
|
the `users` section in the manifest. Optionally, the Postgres Operator can also
|
||||||
|
create default LOGIN roles for the database an each schema individually. These
|
||||||
|
roles will get the `_user` suffix and they inherit all rights from their NOLOGIN
|
||||||
|
counterparts.
|
||||||
|
|
||||||
|
| Role name | Member of | Admin |
|
||||||
|
| ------------------- | -------------- | ------------- |
|
||||||
|
| foo_owner_user | foo_owner | admin |
|
||||||
|
| foo_reader_user | foo_reader | foo_owner |
|
||||||
|
| foo_writer_user | foo_writer | foo_owner |
|
||||||
|
| foo_bar_owner_user | foo_bar_owner | foo_owner |
|
||||||
|
| foo_bar_reader_user | foo_bar_reader | foo_bar_owner |
|
||||||
|
| foo_bar_writer_user | foo_bar_writer | foo_bar_owner |
|
||||||
|
|
||||||
|
These default users are enabled in the manifest with the `defaultUsers` flag:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
spec:
|
||||||
|
preparedDatabases:
|
||||||
|
foo:
|
||||||
|
defaultUsers: true
|
||||||
|
schemas:
|
||||||
|
bar:
|
||||||
|
defaultUsers: true
|
||||||
|
```
|
||||||
|
|
||||||
|
### Database extensions
|
||||||
|
|
||||||
|
Prepared databases also allow for creating Postgres extensions. They will be
|
||||||
|
created by the database owner in the specified schema.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
spec:
|
||||||
|
preparedDatabases:
|
||||||
|
foo:
|
||||||
|
extensions:
|
||||||
|
pg_partman: public
|
||||||
|
postgis: data
|
||||||
|
```
|
||||||
|
|
||||||
|
Some extensions require SUPERUSER rights on creation unless they are not
|
||||||
|
whitelisted by the [pgextwlist](https://github.com/dimitri/pgextwlist)
|
||||||
|
extension, that is shipped with the Spilo image. To see which extensions are
|
||||||
|
on the list check the `extwlist.extension` parameter in the postgresql.conf
|
||||||
|
file.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
SHOW extwlist.extensions;
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure that `pgextlist` is also listed under `shared_preload_libraries` in
|
||||||
|
the PostgreSQL configuration. Then the database owner should be able to create
|
||||||
|
the extension specified in the manifest.
|
||||||
|
|
||||||
|
### From `databases` to `preparedDatabases`
|
||||||
|
|
||||||
|
If you wish to create the role setup described above for databases listed under
|
||||||
|
the `databases` key, you have to make sure that the owner role follows the
|
||||||
|
`<dbname>_owner` naming convention of `preparedDatabases`. As roles are synced
|
||||||
|
first, this can be done with one edit:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# before
|
||||||
|
spec:
|
||||||
|
databases:
|
||||||
|
foo: db_owner
|
||||||
|
|
||||||
|
# after
|
||||||
|
spec:
|
||||||
|
databases:
|
||||||
|
foo: foo_owner
|
||||||
|
preparedDatabases:
|
||||||
|
foo:
|
||||||
|
schemas:
|
||||||
|
my_existing_schema: {}
|
||||||
|
```
|
||||||
|
|
||||||
|
Adding existing database schemas to the manifest to create roles for them as
|
||||||
|
well is up the user and not done by the operator. Remember that if you don't
|
||||||
|
specify any schema a new database schema called `data` will be created. When
|
||||||
|
everything got synced (roles, schemas, extensions), you are free to remove the
|
||||||
|
database from the `databases` section. Note, that the operator does not delete
|
||||||
|
database objects or revoke privileges when removed from the manifest.
|
||||||
|
|
||||||
## Resource definition
|
## Resource definition
|
||||||
|
|
||||||
The compute resources to be used for the Postgres containers in the pods can be
|
The compute resources to be used for the Postgres containers in the pods can be
|
||||||
|
|
@ -442,6 +616,8 @@ The PostgreSQL volume is shared with sidecars and is mounted at
|
||||||
specified but globally disabled in the configuration. The `enable_sidecars`
|
specified but globally disabled in the configuration. The `enable_sidecars`
|
||||||
option must be set to `true`.
|
option must be set to `true`.
|
||||||
|
|
||||||
|
If you want to add a sidecar to every cluster managed by the operator, you can specify it in the [operator configuration](administrator.md#sidecars-for-postgres-clusters) instead.
|
||||||
|
|
||||||
## InitContainers Support
|
## InitContainers Support
|
||||||
|
|
||||||
Each cluster can specify arbitrary init containers to run. These containers can
|
Each cluster can specify arbitrary init containers to run. These containers can
|
||||||
|
|
@ -571,21 +747,21 @@ spin up more instances).
|
||||||
|
|
||||||
## Custom TLS certificates
|
## Custom TLS certificates
|
||||||
|
|
||||||
By default, the spilo image generates its own TLS certificate during startup.
|
By default, the Spilo image generates its own TLS certificate during startup.
|
||||||
However, this certificate cannot be verified and thus doesn't protect from
|
However, this certificate cannot be verified and thus doesn't protect from
|
||||||
active MITM attacks. In this section we show how to specify a custom TLS
|
active MITM attacks. In this section we show how to specify a custom TLS
|
||||||
certificate which is mounted in the database pods via a K8s Secret.
|
certificate which is mounted in the database pods via a K8s Secret.
|
||||||
|
|
||||||
Before applying these changes, in k8s the operator must also be configured with
|
Before applying these changes, in k8s the operator must also be configured with
|
||||||
the `spilo_fsgroup` set to the GID matching the postgres user group. If you
|
the `spilo_fsgroup` set to the GID matching the postgres user group. If you
|
||||||
don't know the value, use `103` which is the GID from the default spilo image
|
don't know the value, use `103` which is the GID from the default Spilo image
|
||||||
(`spilo_fsgroup=103` in the cluster request spec).
|
(`spilo_fsgroup=103` in the cluster request spec).
|
||||||
|
|
||||||
OpenShift allocates the users and groups dynamically (based on scc), and their
|
OpenShift allocates the users and groups dynamically (based on scc), and their
|
||||||
range is different in every namespace. Due to this dynamic behaviour, it's not
|
range is different in every namespace. Due to this dynamic behaviour, it's not
|
||||||
trivial to know at deploy time the uid/gid of the user in the cluster.
|
trivial to know at deploy time the uid/gid of the user in the cluster.
|
||||||
Therefore, instead of using a global `spilo_fsgroup` setting, use the `spiloFSGroup` field
|
Therefore, instead of using a global `spilo_fsgroup` setting, use the
|
||||||
per Postgres cluster.```
|
`spiloFSGroup` field per Postgres cluster.
|
||||||
|
|
||||||
Upload the cert as a kubernetes secret:
|
Upload the cert as a kubernetes secret:
|
||||||
```sh
|
```sh
|
||||||
|
|
@ -594,7 +770,7 @@ kubectl create secret tls pg-tls \
|
||||||
--cert pg-tls.crt
|
--cert pg-tls.crt
|
||||||
```
|
```
|
||||||
|
|
||||||
Or with a CA:
|
When doing client auth, CA can come optionally from the same secret:
|
||||||
```sh
|
```sh
|
||||||
kubectl create secret generic pg-tls \
|
kubectl create secret generic pg-tls \
|
||||||
--from-file=tls.crt=server.crt \
|
--from-file=tls.crt=server.crt \
|
||||||
|
|
@ -602,9 +778,6 @@ kubectl create secret generic pg-tls \
|
||||||
--from-file=ca.crt=ca.crt
|
--from-file=ca.crt=ca.crt
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively it is also possible to use
|
|
||||||
[cert-manager](https://cert-manager.io/docs/) to generate these secrets.
|
|
||||||
|
|
||||||
Then configure the postgres resource with the TLS secret:
|
Then configure the postgres resource with the TLS secret:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
|
@ -619,5 +792,29 @@ spec:
|
||||||
caFile: "ca.crt" # add this if the secret is configured with a CA
|
caFile: "ca.crt" # add this if the secret is configured with a CA
|
||||||
```
|
```
|
||||||
|
|
||||||
Certificate rotation is handled in the spilo image which checks every 5
|
Optionally, the CA can be provided by a different secret:
|
||||||
|
```sh
|
||||||
|
kubectl create secret generic pg-tls-ca \
|
||||||
|
--from-file=ca.crt=ca.crt
|
||||||
|
```
|
||||||
|
|
||||||
|
Then configure the postgres resource with the TLS secret:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "acid.zalan.do/v1"
|
||||||
|
kind: postgresql
|
||||||
|
|
||||||
|
metadata:
|
||||||
|
name: acid-test-cluster
|
||||||
|
spec:
|
||||||
|
tls:
|
||||||
|
secretName: "pg-tls" # this should hold tls.key and tls.crt
|
||||||
|
caSecretName: "pg-tls-ca" # this should hold ca.crt
|
||||||
|
caFile: "ca.crt" # add this if the secret is configured with a CA
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, it is also possible to use
|
||||||
|
[cert-manager](https://cert-manager.io/docs/) to generate these secrets.
|
||||||
|
|
||||||
|
Certificate rotation is handled in the Spilo image which checks every 5
|
||||||
minutes if the certificates have changed and reloads postgres accordingly.
|
minutes if the certificates have changed and reloads postgres accordingly.
|
||||||
|
|
|
||||||
|
|
@ -44,5 +44,5 @@ tools: docker
|
||||||
# install pinned version of 'kind'
|
# install pinned version of 'kind'
|
||||||
GO111MODULE=on go get sigs.k8s.io/kind@v0.5.1
|
GO111MODULE=on go get sigs.k8s.io/kind@v0.5.1
|
||||||
|
|
||||||
test:
|
e2etest:
|
||||||
./run.sh
|
./run.sh
|
||||||
|
|
|
||||||
|
|
@ -143,15 +143,6 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
})
|
})
|
||||||
k8s.wait_for_pods_to_stop(pod_selector)
|
k8s.wait_for_pods_to_stop(pod_selector)
|
||||||
|
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
|
||||||
'acid.zalan.do', 'v1', 'default',
|
|
||||||
'postgresqls', 'acid-minimal-cluster',
|
|
||||||
{
|
|
||||||
'spec': {
|
|
||||||
'enableConnectionPooler': True,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
k8s.wait_for_pod_start(pod_selector)
|
|
||||||
except timeout_decorator.TimeoutError:
|
except timeout_decorator.TimeoutError:
|
||||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
raise
|
raise
|
||||||
|
|
@ -205,6 +196,66 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
self.assertEqual(repl_svc_type, 'ClusterIP',
|
self.assertEqual(repl_svc_type, 'ClusterIP',
|
||||||
"Expected ClusterIP service type for replica, found {}".format(repl_svc_type))
|
"Expected ClusterIP service type for replica, found {}".format(repl_svc_type))
|
||||||
|
|
||||||
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
|
def test_lazy_spilo_upgrade(self):
|
||||||
|
'''
|
||||||
|
Test lazy upgrade for the Spilo image: operator changes a stateful set but lets pods run with the old image
|
||||||
|
until they are recreated for reasons other than operator's activity. That works because the operator configures
|
||||||
|
stateful sets to use "onDelete" pod update policy.
|
||||||
|
|
||||||
|
The test covers:
|
||||||
|
1) enabling lazy upgrade in existing operator deployment
|
||||||
|
2) forcing the normal rolling upgrade by changing the operator configmap and restarting its pod
|
||||||
|
'''
|
||||||
|
|
||||||
|
k8s = self.k8s
|
||||||
|
|
||||||
|
# update docker image in config and enable the lazy upgrade
|
||||||
|
conf_image = "registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p114"
|
||||||
|
patch_lazy_spilo_upgrade = {
|
||||||
|
"data": {
|
||||||
|
"docker_image": conf_image,
|
||||||
|
"enable_lazy_spilo_upgrade": "true"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
k8s.update_config(patch_lazy_spilo_upgrade)
|
||||||
|
|
||||||
|
pod0 = 'acid-minimal-cluster-0'
|
||||||
|
pod1 = 'acid-minimal-cluster-1'
|
||||||
|
|
||||||
|
# restart the pod to get a container with the new image
|
||||||
|
k8s.api.core_v1.delete_namespaced_pod(pod0, 'default')
|
||||||
|
time.sleep(60)
|
||||||
|
|
||||||
|
# lazy update works if the restarted pod and older pods run different Spilo versions
|
||||||
|
new_image = k8s.get_effective_pod_image(pod0)
|
||||||
|
old_image = k8s.get_effective_pod_image(pod1)
|
||||||
|
self.assertNotEqual(new_image, old_image, "Lazy updated failed: pods have the same image {}".format(new_image))
|
||||||
|
|
||||||
|
# sanity check
|
||||||
|
assert_msg = "Image {} of a new pod differs from {} in operator conf".format(new_image, conf_image)
|
||||||
|
self.assertEqual(new_image, conf_image, assert_msg)
|
||||||
|
|
||||||
|
# clean up
|
||||||
|
unpatch_lazy_spilo_upgrade = {
|
||||||
|
"data": {
|
||||||
|
"enable_lazy_spilo_upgrade": "false",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
k8s.update_config(unpatch_lazy_spilo_upgrade)
|
||||||
|
|
||||||
|
# at this point operator will complete the normal rolling upgrade
|
||||||
|
# so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works
|
||||||
|
|
||||||
|
# XXX there is no easy way to wait until the end of Sync()
|
||||||
|
time.sleep(60)
|
||||||
|
|
||||||
|
image0 = k8s.get_effective_pod_image(pod0)
|
||||||
|
image1 = k8s.get_effective_pod_image(pod1)
|
||||||
|
|
||||||
|
assert_msg = "Disabling lazy upgrade failed: pods still have different images {} and {}".format(image0, image1)
|
||||||
|
self.assertEqual(image0, image1, assert_msg)
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_logical_backup_cron_job(self):
|
def test_logical_backup_cron_job(self):
|
||||||
'''
|
'''
|
||||||
|
|
@ -674,7 +725,7 @@ class K8s:
|
||||||
|
|
||||||
def wait_for_operator_pod_start(self):
|
def wait_for_operator_pod_start(self):
|
||||||
self. wait_for_pod_start("name=postgres-operator")
|
self. wait_for_pod_start("name=postgres-operator")
|
||||||
# HACK operator must register CRD / add existing PG clusters after pod start up
|
# HACK operator must register CRD and/or Sync existing PG clusters after start up
|
||||||
# for local execution ~ 10 seconds suffices
|
# for local execution ~ 10 seconds suffices
|
||||||
time.sleep(60)
|
time.sleep(60)
|
||||||
|
|
||||||
|
|
@ -794,14 +845,16 @@ class K8s:
|
||||||
def wait_for_logical_backup_job_creation(self):
|
def wait_for_logical_backup_job_creation(self):
|
||||||
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
||||||
|
|
||||||
def update_config(self, config_map_patch):
|
def delete_operator_pod(self):
|
||||||
self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch)
|
|
||||||
|
|
||||||
operator_pod = self.api.core_v1.list_namespaced_pod(
|
operator_pod = self.api.core_v1.list_namespaced_pod(
|
||||||
'default', label_selector="name=postgres-operator").items[0].metadata.name
|
'default', label_selector="name=postgres-operator").items[0].metadata.name
|
||||||
self.api.core_v1.delete_namespaced_pod(operator_pod, "default") # restart reloads the conf and issues Sync()
|
self.api.core_v1.delete_namespaced_pod(operator_pod, "default") # restart reloads the conf and issues Sync()
|
||||||
self.wait_for_operator_pod_start()
|
self.wait_for_operator_pod_start()
|
||||||
|
|
||||||
|
def update_config(self, config_map_patch):
|
||||||
|
self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch)
|
||||||
|
self.delete_operator_pod()
|
||||||
|
|
||||||
def create_with_kubectl(self, path):
|
def create_with_kubectl(self, path):
|
||||||
return subprocess.run(
|
return subprocess.run(
|
||||||
["kubectl", "create", "-f", path],
|
["kubectl", "create", "-f", path],
|
||||||
|
|
@ -825,6 +878,14 @@ class K8s:
|
||||||
def get_volume_name(self, pvc_name):
|
def get_volume_name(self, pvc_name):
|
||||||
pvc = self.api.core_v1.read_namespaced_persistent_volume_claim(pvc_name, "default")
|
pvc = self.api.core_v1.read_namespaced_persistent_volume_claim(pvc_name, "default")
|
||||||
return pvc.spec.volume_name
|
return pvc.spec.volume_name
|
||||||
|
def get_effective_pod_image(self, pod_name, namespace='default'):
|
||||||
|
'''
|
||||||
|
Get the Spilo image pod currently uses. In case of lazy rolling updates
|
||||||
|
it may differ from the one specified in the stateful set.
|
||||||
|
'''
|
||||||
|
pod = self.api.core_v1.list_namespaced_pod(
|
||||||
|
namespace, label_selector="statefulset.kubernetes.io/pod-name=" + pod_name)
|
||||||
|
return pod.items[0].spec.containers[0].image
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
||||||
16
go.mod
16
go.mod
|
|
@ -4,16 +4,20 @@ go 1.14
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/aws/aws-sdk-go v1.29.33
|
github.com/aws/aws-sdk-go v1.29.33
|
||||||
|
github.com/emicklei/go-restful v2.9.6+incompatible // indirect
|
||||||
|
github.com/evanphx/json-patch v4.5.0+incompatible // indirect
|
||||||
|
github.com/googleapis/gnostic v0.3.0 // indirect
|
||||||
github.com/lib/pq v1.3.0
|
github.com/lib/pq v1.3.0
|
||||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
|
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
|
||||||
github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a
|
github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a
|
||||||
github.com/sirupsen/logrus v1.5.0
|
github.com/sirupsen/logrus v1.5.0
|
||||||
github.com/stretchr/testify v1.4.0
|
github.com/stretchr/testify v1.4.0
|
||||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4 // indirect
|
golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b // indirect
|
||||||
gopkg.in/yaml.v2 v2.2.8
|
gopkg.in/yaml.v2 v2.2.8
|
||||||
k8s.io/api v0.18.0
|
k8s.io/api v0.18.2
|
||||||
k8s.io/apiextensions-apiserver v0.18.0
|
k8s.io/apiextensions-apiserver v0.18.2
|
||||||
k8s.io/apimachinery v0.18.0
|
k8s.io/apimachinery v0.18.2
|
||||||
k8s.io/client-go v0.18.0
|
k8s.io/client-go v11.0.0+incompatible
|
||||||
k8s.io/code-generator v0.18.0
|
k8s.io/code-generator v0.18.2
|
||||||
|
sigs.k8s.io/kind v0.5.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
|
||||||
66
go.sum
66
go.sum
|
|
@ -46,6 +46,7 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc
|
||||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||||
|
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
|
@ -62,10 +63,14 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg
|
||||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
|
github.com/emicklei/go-restful v2.9.6+incompatible h1:tfrHha8zJ01ywiOEC1miGY8st1/igzWB8OmvPgoYX7w=
|
||||||
|
github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
|
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
|
||||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
|
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
|
||||||
|
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
|
@ -145,6 +150,7 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||||
|
|
@ -155,10 +161,13 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
||||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
|
github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
|
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
|
||||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||||
github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI=
|
github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI=
|
||||||
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||||
|
github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0=
|
||||||
|
github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||||
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
|
|
@ -174,10 +183,12 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
||||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||||
|
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
|
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
|
||||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
|
@ -204,6 +215,7 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN
|
||||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
github.com/mailru/easyjson v0.0.0-20190620125010-da37f6c1e481/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
|
github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
|
||||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||||
|
|
@ -216,6 +228,7 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
|
@ -228,9 +241,11 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+
|
||||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
|
github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
|
||||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||||
|
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||||
|
|
@ -238,7 +253,9 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9
|
||||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||||
|
|
@ -257,6 +274,7 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So
|
||||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
|
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
|
||||||
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||||
|
|
@ -264,7 +282,9 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k
|
||||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
|
github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
|
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
|
|
@ -277,6 +297,7 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||||
|
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
|
@ -289,7 +310,7 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
|
||||||
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||||
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||||
|
|
@ -361,6 +382,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4=
|
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4=
|
||||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190621203818-d432491b9138/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU=
|
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU=
|
||||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
|
@ -388,8 +410,8 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw
|
||||||
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4 h1:kDtqNkeBrZb8B+atrj50B5XLHpzXXqcCdZPP/ApQ5NY=
|
golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b h1:zSzQJAznWxAh9fZxiPy2FZo+ZZEYoYFYYDYdOrU7AaM=
|
||||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||||
|
|
@ -433,30 +455,44 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
|
||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
k8s.io/api v0.18.0 h1:lwYk8Vt7rsVTwjRU6pzEsa9YNhThbmbocQlKvNBB4EQ=
|
k8s.io/api v0.0.0-20190313235455-40a48860b5ab/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
|
||||||
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
|
k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
|
||||||
k8s.io/apiextensions-apiserver v0.18.0 h1:HN4/P8vpGZFvB5SOMuPPH2Wt9Y/ryX+KRvIyAkchu1Q=
|
k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8=
|
||||||
k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
|
k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
|
||||||
k8s.io/apimachinery v0.18.0 h1:fuPfYpk3cs1Okp/515pAf0dNhL66+8zk8RLbSX+EgAE=
|
k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8=
|
||||||
k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
|
k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY=
|
||||||
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
|
k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
|
||||||
k8s.io/client-go v0.18.0 h1:yqKw4cTUQraZK3fcVCMeSa+lqKwcjZ5wtcOIPnxQno4=
|
k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
|
||||||
k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8=
|
k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA=
|
||||||
k8s.io/code-generator v0.18.0 h1:0xIRWzym+qMgVpGmLESDeMfz/orwgxwxFFAo1xfGNtQ=
|
k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
|
||||||
k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
|
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
|
||||||
k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
|
k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE=
|
||||||
|
k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU=
|
||||||
|
k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o=
|
||||||
|
k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
|
||||||
|
k8s.io/code-generator v0.18.2 h1:C1Nn2JiMf244CvBDKVPX0W2mZFJkVBg54T8OV7/Imso=
|
||||||
|
k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
|
||||||
|
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
|
||||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
|
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
|
||||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||||
|
k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4=
|
||||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
|
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
|
||||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
|
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
|
||||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
|
||||||
|
sigs.k8s.io/kind v0.5.1 h1:BYnHEJ9DC+0Yjlyyehqd3xnKtEmFdLKU8QxqOqvQzdw=
|
||||||
|
sigs.k8s.io/kind v0.5.1/go.mod h1:L+Kcoo83/D1+ryU5P2VFbvYm0oqbkJn9zTZq0KNxW68=
|
||||||
|
sigs.k8s.io/kustomize/v3 v3.1.1-0.20190821175718-4b67a6de1296 h1:iQaIG5Dq+3qSiaFrJ/l/0MjjxKmdwyVNpKRYJwUe/+0=
|
||||||
|
sigs.k8s.io/kustomize/v3 v3.1.1-0.20190821175718-4b67a6de1296/go.mod h1:ztX4zYc/QIww3gSripwF7TBOarBTm5BvyAMem0kCzOE=
|
||||||
|
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ=
|
||||||
|
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||||
|
|
|
||||||
|
|
@ -7,34 +7,8 @@ metadata:
|
||||||
# annotations:
|
# annotations:
|
||||||
# "acid.zalan.do/controller": "second-operator"
|
# "acid.zalan.do/controller": "second-operator"
|
||||||
spec:
|
spec:
|
||||||
dockerImage: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
dockerImage: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p115
|
||||||
teamId: "acid"
|
teamId: "acid"
|
||||||
volume:
|
|
||||||
size: 1Gi
|
|
||||||
# storageClass: my-sc
|
|
||||||
additionalVolumes:
|
|
||||||
- name: data
|
|
||||||
mountPath: /home/postgres/pgdata/partitions
|
|
||||||
targetContainers:
|
|
||||||
- postgres
|
|
||||||
volumeSource:
|
|
||||||
PersistentVolumeClaim:
|
|
||||||
claimName: pvc-postgresql-data-partitions
|
|
||||||
readyOnly: false
|
|
||||||
- name: conf
|
|
||||||
mountPath: /etc/telegraf
|
|
||||||
subPath: telegraf.conf
|
|
||||||
targetContainers:
|
|
||||||
- telegraf-sidecar
|
|
||||||
volumeSource:
|
|
||||||
configMap:
|
|
||||||
name: my-config-map
|
|
||||||
- name: empty
|
|
||||||
mountPath: /opt/empty
|
|
||||||
targetContainers:
|
|
||||||
- all
|
|
||||||
volumeSource:
|
|
||||||
emptyDir: {}
|
|
||||||
numberOfInstances: 2
|
numberOfInstances: 2
|
||||||
users: # Application/Robot users
|
users: # Application/Robot users
|
||||||
zalando:
|
zalando:
|
||||||
|
|
@ -47,12 +21,49 @@ spec:
|
||||||
- 127.0.0.1/32
|
- 127.0.0.1/32
|
||||||
databases:
|
databases:
|
||||||
foo: zalando
|
foo: zalando
|
||||||
|
preparedDatabases:
|
||||||
|
bar:
|
||||||
|
defaultUsers: true
|
||||||
|
extensions:
|
||||||
|
pg_partman: public
|
||||||
|
pgcrypto: public
|
||||||
|
schemas:
|
||||||
|
data: {}
|
||||||
|
history:
|
||||||
|
defaultRoles: true
|
||||||
|
defaultUsers: false
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "12"
|
version: "12"
|
||||||
parameters: # Expert section
|
parameters: # Expert section
|
||||||
shared_buffers: "32MB"
|
shared_buffers: "32MB"
|
||||||
max_connections: "10"
|
max_connections: "10"
|
||||||
log_statement: "all"
|
log_statement: "all"
|
||||||
|
volume:
|
||||||
|
size: 1Gi
|
||||||
|
# storageClass: my-sc
|
||||||
|
additionalVolumes:
|
||||||
|
- name: empty
|
||||||
|
mountPath: /opt/empty
|
||||||
|
targetContainers:
|
||||||
|
- all
|
||||||
|
volumeSource:
|
||||||
|
emptyDir: {}
|
||||||
|
# - name: data
|
||||||
|
# mountPath: /home/postgres/pgdata/partitions
|
||||||
|
# targetContainers:
|
||||||
|
# - postgres
|
||||||
|
# volumeSource:
|
||||||
|
# PersistentVolumeClaim:
|
||||||
|
# claimName: pvc-postgresql-data-partitions
|
||||||
|
# readyOnly: false
|
||||||
|
# - name: conf
|
||||||
|
# mountPath: /etc/telegraf
|
||||||
|
# subPath: telegraf.conf
|
||||||
|
# targetContainers:
|
||||||
|
# - telegraf-sidecar
|
||||||
|
# volumeSource:
|
||||||
|
# configMap:
|
||||||
|
# name: my-config-map
|
||||||
|
|
||||||
enableShmVolume: true
|
enableShmVolume: true
|
||||||
# spiloFSGroup: 103
|
# spiloFSGroup: 103
|
||||||
|
|
@ -148,8 +159,10 @@ spec:
|
||||||
certificateFile: "tls.crt"
|
certificateFile: "tls.crt"
|
||||||
privateKeyFile: "tls.key"
|
privateKeyFile: "tls.key"
|
||||||
caFile: "" # optionally configure Postgres with a CA certificate
|
caFile: "" # optionally configure Postgres with a CA certificate
|
||||||
|
caSecretName: "" # optionally the ca.crt can come from this secret instead.
|
||||||
# file names can be also defined with absolute path, and will no longer be relative
|
# file names can be also defined with absolute path, and will no longer be relative
|
||||||
# to the "/tls/" path where the secret is being mounted by default.
|
# to the "/tls/" path where the secret is being mounted by default, and "/tlsca/"
|
||||||
|
# where the caSecret is mounted by default.
|
||||||
# When TLS is enabled, also set spiloFSGroup parameter above to the relevant value.
|
# When TLS is enabled, also set spiloFSGroup parameter above to the relevant value.
|
||||||
# if unknown, set it to 103 which is the usual value in the default spilo images.
|
# if unknown, set it to 103 which is the usual value in the default spilo images.
|
||||||
# In Openshift, there is no need to set spiloFSGroup/spilo_fsgroup.
|
# In Openshift, there is no need to set spiloFSGroup/spilo_fsgroup.
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ data:
|
||||||
# connection_pooler_default_cpu_request: "500m"
|
# connection_pooler_default_cpu_request: "500m"
|
||||||
# connection_pooler_default_memory_limit: 100Mi
|
# connection_pooler_default_memory_limit: 100Mi
|
||||||
# connection_pooler_default_memory_request: 100Mi
|
# connection_pooler_default_memory_request: 100Mi
|
||||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-6"
|
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-7"
|
||||||
# connection_pooler_max_db_connections: 60
|
# connection_pooler_max_db_connections: 60
|
||||||
# connection_pooler_mode: "transaction"
|
# connection_pooler_mode: "transaction"
|
||||||
# connection_pooler_number_of_instances: 2
|
# connection_pooler_number_of_instances: 2
|
||||||
|
|
@ -29,11 +29,12 @@ data:
|
||||||
# default_cpu_request: 100m
|
# default_cpu_request: 100m
|
||||||
# default_memory_limit: 500Mi
|
# default_memory_limit: 500Mi
|
||||||
# default_memory_request: 100Mi
|
# default_memory_request: 100Mi
|
||||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p115
|
||||||
# enable_admin_role_for_users: "true"
|
# enable_admin_role_for_users: "true"
|
||||||
# enable_crd_validation: "true"
|
# enable_crd_validation: "true"
|
||||||
# enable_database_access: "true"
|
# enable_database_access: "true"
|
||||||
# enable_init_containers: "true"
|
# enable_init_containers: "true"
|
||||||
|
# enable_lazy_spilo_upgrade: "false"
|
||||||
enable_master_load_balancer: "false"
|
enable_master_load_balancer: "false"
|
||||||
# enable_pod_antiaffinity: "false"
|
# enable_pod_antiaffinity: "false"
|
||||||
# enable_pod_disruption_budget: "true"
|
# enable_pod_disruption_budget: "true"
|
||||||
|
|
|
||||||
|
|
@ -15,5 +15,7 @@ spec:
|
||||||
foo_user: [] # role for application foo
|
foo_user: [] # role for application foo
|
||||||
databases:
|
databases:
|
||||||
foo: zalando # dbname: owner
|
foo: zalando # dbname: owner
|
||||||
|
preparedDatabases:
|
||||||
|
bar: {}
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "12"
|
version: "12"
|
||||||
|
|
|
||||||
|
|
@ -43,6 +43,18 @@ rules:
|
||||||
- configmaps
|
- configmaps
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
|
# to send events to the CRs
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- events
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
# to manage endpoints which are also used by Patroni
|
# to manage endpoints which are also used by Patroni
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
|
|
|
||||||
|
|
@ -38,6 +38,8 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
enable_crd_validation:
|
enable_crd_validation:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enable_lazy_spilo_upgrade:
|
||||||
|
type: boolean
|
||||||
enable_shm_volume:
|
enable_shm_volume:
|
||||||
type: boolean
|
type: boolean
|
||||||
enable_unused_pvc_deletion:
|
enable_unused_pvc_deletion:
|
||||||
|
|
@ -62,6 +64,12 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
|
sidecars:
|
||||||
|
type: array
|
||||||
|
nullable: true
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
additionalProperties: true
|
||||||
workers:
|
workers:
|
||||||
type: integer
|
type: integer
|
||||||
minimum: 1
|
minimum: 1
|
||||||
|
|
@ -277,7 +285,7 @@ spec:
|
||||||
type: integer
|
type: integer
|
||||||
ring_log_lines:
|
ring_log_lines:
|
||||||
type: integer
|
type: integer
|
||||||
scalyr:
|
scalyr: # deprecated
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
scalyr_api_key:
|
scalyr_api_key:
|
||||||
|
|
|
||||||
|
|
@ -3,19 +3,22 @@ kind: OperatorConfiguration
|
||||||
metadata:
|
metadata:
|
||||||
name: postgresql-operator-default-configuration
|
name: postgresql-operator-default-configuration
|
||||||
configuration:
|
configuration:
|
||||||
|
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p115
|
||||||
# enable_crd_validation: true
|
# enable_crd_validation: true
|
||||||
|
# enable_lazy_spilo_upgrade: false
|
||||||
|
# enable_shm_volume: true
|
||||||
etcd_host: ""
|
etcd_host: ""
|
||||||
# kubernetes_use_configmaps: false
|
# kubernetes_use_configmaps: false
|
||||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
|
||||||
# enable_shm_volume: true
|
|
||||||
# enable_unused_pvc_deletion: false
|
|
||||||
max_instances: -1
|
max_instances: -1
|
||||||
min_instances: -1
|
min_instances: -1
|
||||||
resync_period: 30m
|
resync_period: 30m
|
||||||
repair_period: 5m
|
repair_period: 5m
|
||||||
# set_memory_request_to_limit: false
|
# set_memory_request_to_limit: false
|
||||||
# sidecar_docker_images:
|
# sidecars:
|
||||||
# example: "exampleimage:exampletag"
|
# - image: image:123
|
||||||
|
# name: global-sidecar-1
|
||||||
|
# ports:
|
||||||
|
# - containerPort: 80
|
||||||
workers: 4
|
workers: 4
|
||||||
users:
|
users:
|
||||||
replication_username: standby
|
replication_username: standby
|
||||||
|
|
@ -115,20 +118,12 @@ configuration:
|
||||||
api_port: 8080
|
api_port: 8080
|
||||||
cluster_history_entries: 1000
|
cluster_history_entries: 1000
|
||||||
ring_log_lines: 100
|
ring_log_lines: 100
|
||||||
scalyr:
|
|
||||||
# scalyr_api_key: ""
|
|
||||||
scalyr_cpu_limit: "1"
|
|
||||||
scalyr_cpu_request: 100m
|
|
||||||
# scalyr_image: ""
|
|
||||||
scalyr_memory_limit: 500Mi
|
|
||||||
scalyr_memory_request: 50Mi
|
|
||||||
# scalyr_server_url: ""
|
|
||||||
connection_pooler:
|
connection_pooler:
|
||||||
connection_pooler_default_cpu_limit: "1"
|
connection_pooler_default_cpu_limit: "1"
|
||||||
connection_pooler_default_cpu_request: "500m"
|
connection_pooler_default_cpu_request: "500m"
|
||||||
connection_pooler_default_memory_limit: 100Mi
|
connection_pooler_default_memory_limit: 100Mi
|
||||||
connection_pooler_default_memory_request: 100Mi
|
connection_pooler_default_memory_request: 100Mi
|
||||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-6"
|
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-7"
|
||||||
# connection_pooler_max_db_connections: 60
|
# connection_pooler_max_db_connections: 60
|
||||||
connection_pooler_mode: "transaction"
|
connection_pooler_mode: "transaction"
|
||||||
connection_pooler_number_of_instances: 2
|
connection_pooler_number_of_instances: 2
|
||||||
|
|
|
||||||
|
|
@ -237,6 +237,26 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
|
preparedDatabases:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
defaultUsers:
|
||||||
|
type: boolean
|
||||||
|
extensions:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
schemas:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
defaultUsers:
|
||||||
|
type: boolean
|
||||||
|
defaultRoles:
|
||||||
|
type: boolean
|
||||||
replicaLoadBalancer: # deprecated
|
replicaLoadBalancer: # deprecated
|
||||||
type: boolean
|
type: boolean
|
||||||
resources:
|
resources:
|
||||||
|
|
@ -341,6 +361,8 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
caFile:
|
caFile:
|
||||||
type: string
|
type: string
|
||||||
|
caSecretName:
|
||||||
|
type: string
|
||||||
tolerations:
|
tolerations:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
|
|
|
||||||
|
|
@ -421,6 +421,43 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"preparedDatabases": {
|
||||||
|
Type: "object",
|
||||||
|
AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{
|
||||||
|
Schema: &apiextv1beta1.JSONSchemaProps{
|
||||||
|
Type: "object",
|
||||||
|
Properties: map[string]apiextv1beta1.JSONSchemaProps{
|
||||||
|
"defaultUsers": {
|
||||||
|
Type: "boolean",
|
||||||
|
},
|
||||||
|
"extensions": {
|
||||||
|
Type: "object",
|
||||||
|
AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{
|
||||||
|
Schema: &apiextv1beta1.JSONSchemaProps{
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"schemas": {
|
||||||
|
Type: "object",
|
||||||
|
AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{
|
||||||
|
Schema: &apiextv1beta1.JSONSchemaProps{
|
||||||
|
Type: "object",
|
||||||
|
Properties: map[string]apiextv1beta1.JSONSchemaProps{
|
||||||
|
"defaultUsers": {
|
||||||
|
Type: "boolean",
|
||||||
|
},
|
||||||
|
"defaultRoles": {
|
||||||
|
Type: "boolean",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"replicaLoadBalancer": {
|
"replicaLoadBalancer": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
Description: "Deprecated",
|
Description: "Deprecated",
|
||||||
|
|
@ -513,6 +550,9 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
||||||
"caFile": {
|
"caFile": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
},
|
},
|
||||||
|
"caSecretName": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"tolerations": {
|
"tolerations": {
|
||||||
|
|
@ -758,6 +798,9 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
||||||
"enable_crd_validation": {
|
"enable_crd_validation": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
},
|
},
|
||||||
|
"enable_lazy_spilo_upgrade": {
|
||||||
|
Type: "boolean",
|
||||||
|
},
|
||||||
"enable_shm_volume": {
|
"enable_shm_volume": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
},
|
},
|
||||||
|
|
@ -794,6 +837,17 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"sidecars": {
|
||||||
|
Type: "array",
|
||||||
|
Items: &apiextv1beta1.JSONSchemaPropsOrArray{
|
||||||
|
Schema: &apiextv1beta1.JSONSchemaProps{
|
||||||
|
Type: "object",
|
||||||
|
AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{
|
||||||
|
Allows: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"workers": {
|
"workers": {
|
||||||
Type: "integer",
|
Type: "integer",
|
||||||
Minimum: &min1,
|
Minimum: &min1,
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -182,6 +183,7 @@ type OperatorLogicalBackupConfiguration struct {
|
||||||
// OperatorConfigurationData defines the operation config
|
// OperatorConfigurationData defines the operation config
|
||||||
type OperatorConfigurationData struct {
|
type OperatorConfigurationData struct {
|
||||||
EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"`
|
EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"`
|
||||||
|
EnableLazySpiloUpgrade bool `json:"enable_lazy_spilo_upgrade,omitempty"`
|
||||||
EtcdHost string `json:"etcd_host,omitempty"`
|
EtcdHost string `json:"etcd_host,omitempty"`
|
||||||
KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"`
|
KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"`
|
||||||
DockerImage string `json:"docker_image,omitempty"`
|
DockerImage string `json:"docker_image,omitempty"`
|
||||||
|
|
@ -192,7 +194,9 @@ type OperatorConfigurationData struct {
|
||||||
RepairPeriod Duration `json:"repair_period,omitempty"`
|
RepairPeriod Duration `json:"repair_period,omitempty"`
|
||||||
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
|
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
|
||||||
ShmVolume *bool `json:"enable_shm_volume,omitempty"`
|
ShmVolume *bool `json:"enable_shm_volume,omitempty"`
|
||||||
Sidecars map[string]string `json:"sidecar_docker_images,omitempty"`
|
// deprecated in favour of SidecarContainers
|
||||||
|
SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"`
|
||||||
|
SidecarContainers []v1.Container `json:"sidecars,omitempty"`
|
||||||
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
|
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
|
||||||
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`
|
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`
|
||||||
PostgresPodResources PostgresPodResourcesDefaults `json:"postgres_pod_resources"`
|
PostgresPodResources PostgresPodResourcesDefaults `json:"postgres_pod_resources"`
|
||||||
|
|
|
||||||
|
|
@ -56,6 +56,7 @@ type PostgresSpec struct {
|
||||||
Clone CloneDescription `json:"clone"`
|
Clone CloneDescription `json:"clone"`
|
||||||
ClusterName string `json:"-"`
|
ClusterName string `json:"-"`
|
||||||
Databases map[string]string `json:"databases,omitempty"`
|
Databases map[string]string `json:"databases,omitempty"`
|
||||||
|
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
|
||||||
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
|
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
|
||||||
Sidecars []Sidecar `json:"sidecars,omitempty"`
|
Sidecars []Sidecar `json:"sidecars,omitempty"`
|
||||||
InitContainers []v1.Container `json:"initContainers,omitempty"`
|
InitContainers []v1.Container `json:"initContainers,omitempty"`
|
||||||
|
|
@ -84,6 +85,19 @@ type PostgresqlList struct {
|
||||||
Items []Postgresql `json:"items"`
|
Items []Postgresql `json:"items"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PreparedDatabase describes elements to be bootstrapped
|
||||||
|
type PreparedDatabase struct {
|
||||||
|
PreparedSchemas map[string]PreparedSchema `json:"schemas,omitempty"`
|
||||||
|
DefaultUsers bool `json:"defaultUsers,omitempty" defaults:"false"`
|
||||||
|
Extensions map[string]string `json:"extensions,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreparedSchema describes elements to be bootstrapped per schema
|
||||||
|
type PreparedSchema struct {
|
||||||
|
DefaultRoles *bool `json:"defaultRoles,omitempty" defaults:"true"`
|
||||||
|
DefaultUsers bool `json:"defaultUsers,omitempty" defaults:"false"`
|
||||||
|
}
|
||||||
|
|
||||||
// MaintenanceWindow describes the time window when the operator is allowed to do maintenance on a cluster.
|
// MaintenanceWindow describes the time window when the operator is allowed to do maintenance on a cluster.
|
||||||
type MaintenanceWindow struct {
|
type MaintenanceWindow struct {
|
||||||
Everyday bool
|
Everyday bool
|
||||||
|
|
@ -104,7 +118,7 @@ type AdditionalVolume struct {
|
||||||
MountPath string `json:"mountPath"`
|
MountPath string `json:"mountPath"`
|
||||||
SubPath string `json:"subPath"`
|
SubPath string `json:"subPath"`
|
||||||
TargetContainers []string `json:"targetContainers"`
|
TargetContainers []string `json:"targetContainers"`
|
||||||
VolumeSource v1.VolumeSource `json:"volume"`
|
VolumeSource v1.VolumeSource `json:"volumeSource"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values.
|
// PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values.
|
||||||
|
|
@ -148,6 +162,7 @@ type TLSDescription struct {
|
||||||
CertificateFile string `json:"certificateFile,omitempty"`
|
CertificateFile string `json:"certificateFile,omitempty"`
|
||||||
PrivateKeyFile string `json:"privateKeyFile,omitempty"`
|
PrivateKeyFile string `json:"privateKeyFile,omitempty"`
|
||||||
CAFile string `json:"caFile,omitempty"`
|
CAFile string `json:"caFile,omitempty"`
|
||||||
|
CASecretName string `json:"caSecretName,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// CloneDescription describes which cluster the new should clone and up to which point in time
|
// CloneDescription describes which cluster the new should clone and up to which point in time
|
||||||
|
|
|
||||||
|
|
@ -47,6 +47,28 @@ func (in *AWSGCPConfiguration) DeepCopy() *AWSGCPConfiguration {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *AdditionalVolume) DeepCopyInto(out *AdditionalVolume) {
|
||||||
|
*out = *in
|
||||||
|
if in.TargetContainers != nil {
|
||||||
|
in, out := &in.TargetContainers, &out.TargetContainers
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
in.VolumeSource.DeepCopyInto(&out.VolumeSource)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalVolume.
|
||||||
|
func (in *AdditionalVolume) DeepCopy() *AdditionalVolume {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(AdditionalVolume)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *CloneDescription) DeepCopyInto(out *CloneDescription) {
|
func (in *CloneDescription) DeepCopyInto(out *CloneDescription) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
|
@ -290,13 +312,20 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData
|
||||||
*out = new(bool)
|
*out = new(bool)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
if in.Sidecars != nil {
|
if in.SidecarImages != nil {
|
||||||
in, out := &in.Sidecars, &out.Sidecars
|
in, out := &in.SidecarImages, &out.SidecarImages
|
||||||
*out = make(map[string]string, len(*in))
|
*out = make(map[string]string, len(*in))
|
||||||
for key, val := range *in {
|
for key, val := range *in {
|
||||||
(*out)[key] = val
|
(*out)[key] = val
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.SidecarContainers != nil {
|
||||||
|
in, out := &in.SidecarContainers, &out.SidecarContainers
|
||||||
|
*out = make([]corev1.Container, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
out.PostgresUsersConfiguration = in.PostgresUsersConfiguration
|
out.PostgresUsersConfiguration = in.PostgresUsersConfiguration
|
||||||
in.Kubernetes.DeepCopyInto(&out.Kubernetes)
|
in.Kubernetes.DeepCopyInto(&out.Kubernetes)
|
||||||
out.PostgresPodResources = in.PostgresPodResources
|
out.PostgresPodResources = in.PostgresPodResources
|
||||||
|
|
@ -541,6 +570,13 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
||||||
(*out)[key] = val
|
(*out)[key] = val
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.PreparedDatabases != nil {
|
||||||
|
in, out := &in.PreparedDatabases, &out.PreparedDatabases
|
||||||
|
*out = make(map[string]PreparedDatabase, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = *val.DeepCopy()
|
||||||
|
}
|
||||||
|
}
|
||||||
if in.Tolerations != nil {
|
if in.Tolerations != nil {
|
||||||
in, out := &in.Tolerations, &out.Tolerations
|
in, out := &in.Tolerations, &out.Tolerations
|
||||||
*out = make([]corev1.Toleration, len(*in))
|
*out = make([]corev1.Toleration, len(*in))
|
||||||
|
|
@ -591,6 +627,13 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
||||||
*out = new(TLSDescription)
|
*out = new(TLSDescription)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.AdditionalVolumes != nil {
|
||||||
|
in, out := &in.AdditionalVolumes, &out.AdditionalVolumes
|
||||||
|
*out = make([]AdditionalVolume, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
if in.InitContainersOld != nil {
|
if in.InitContainersOld != nil {
|
||||||
in, out := &in.InitContainersOld, &out.InitContainersOld
|
in, out := &in.InitContainersOld, &out.InitContainersOld
|
||||||
*out = make([]corev1.Container, len(*in))
|
*out = make([]corev1.Container, len(*in))
|
||||||
|
|
@ -727,6 +770,57 @@ func (in *PostgresqlParam) DeepCopy() *PostgresqlParam {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PreparedDatabase) DeepCopyInto(out *PreparedDatabase) {
|
||||||
|
*out = *in
|
||||||
|
if in.PreparedSchemas != nil {
|
||||||
|
in, out := &in.PreparedSchemas, &out.PreparedSchemas
|
||||||
|
*out = make(map[string]PreparedSchema, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = *val.DeepCopy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.Extensions != nil {
|
||||||
|
in, out := &in.Extensions, &out.Extensions
|
||||||
|
*out = make(map[string]string, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreparedDatabase.
|
||||||
|
func (in *PreparedDatabase) DeepCopy() *PreparedDatabase {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PreparedDatabase)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PreparedSchema) DeepCopyInto(out *PreparedSchema) {
|
||||||
|
*out = *in
|
||||||
|
if in.DefaultRoles != nil {
|
||||||
|
in, out := &in.DefaultRoles, &out.DefaultRoles
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreparedSchema.
|
||||||
|
func (in *PreparedSchema) DeepCopy() *PreparedSchema {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PreparedSchema)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) {
|
func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -21,8 +22,11 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
"k8s.io/client-go/tools/reference"
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
"github.com/zalando/postgres-operator/pkg/util"
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
|
|
@ -81,6 +85,7 @@ type Cluster struct {
|
||||||
acidv1.Postgresql
|
acidv1.Postgresql
|
||||||
Config
|
Config
|
||||||
logger *logrus.Entry
|
logger *logrus.Entry
|
||||||
|
eventRecorder record.EventRecorder
|
||||||
patroni patroni.Interface
|
patroni patroni.Interface
|
||||||
pgUsers map[string]spec.PgUser
|
pgUsers map[string]spec.PgUser
|
||||||
systemUsers map[string]spec.PgUser
|
systemUsers map[string]spec.PgUser
|
||||||
|
|
@ -109,7 +114,7 @@ type compareStatefulsetResult struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new cluster. This function should be called from a controller.
|
// New creates a new cluster. This function should be called from a controller.
|
||||||
func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgresql, logger *logrus.Entry) *Cluster {
|
func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgresql, logger *logrus.Entry, eventRecorder record.EventRecorder) *Cluster {
|
||||||
deletePropagationPolicy := metav1.DeletePropagationOrphan
|
deletePropagationPolicy := metav1.DeletePropagationOrphan
|
||||||
|
|
||||||
podEventsQueue := cache.NewFIFO(func(obj interface{}) (string, error) {
|
podEventsQueue := cache.NewFIFO(func(obj interface{}) (string, error) {
|
||||||
|
|
@ -140,7 +145,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
|
||||||
cluster.teamsAPIClient = teams.NewTeamsAPI(cfg.OpConfig.TeamsAPIUrl, logger)
|
cluster.teamsAPIClient = teams.NewTeamsAPI(cfg.OpConfig.TeamsAPIUrl, logger)
|
||||||
cluster.oauthTokenGetter = newSecretOauthTokenGetter(&kubeClient, cfg.OpConfig.OAuthTokenSecretName)
|
cluster.oauthTokenGetter = newSecretOauthTokenGetter(&kubeClient, cfg.OpConfig.OAuthTokenSecretName)
|
||||||
cluster.patroni = patroni.New(cluster.logger)
|
cluster.patroni = patroni.New(cluster.logger)
|
||||||
|
cluster.eventRecorder = eventRecorder
|
||||||
return cluster
|
return cluster
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -166,6 +171,16 @@ func (c *Cluster) setProcessName(procName string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetReference of Postgres CR object
|
||||||
|
// i.e. required to emit events to this resource
|
||||||
|
func (c *Cluster) GetReference() *v1.ObjectReference {
|
||||||
|
ref, err := reference.GetReference(scheme.Scheme, &c.Postgresql)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Errorf("could not get reference for Postgresql CR %v/%v: %v", c.Postgresql.Namespace, c.Postgresql.Name, err)
|
||||||
|
}
|
||||||
|
return ref
|
||||||
|
}
|
||||||
|
|
||||||
// SetStatus of Postgres cluster
|
// SetStatus of Postgres cluster
|
||||||
// TODO: eventually switch to updateStatus() for kubernetes 1.11 and above
|
// TODO: eventually switch to updateStatus() for kubernetes 1.11 and above
|
||||||
func (c *Cluster) setStatus(status string) {
|
func (c *Cluster) setStatus(status string) {
|
||||||
|
|
@ -213,6 +228,10 @@ func (c *Cluster) initUsers() error {
|
||||||
return fmt.Errorf("could not init infrastructure roles: %v", err)
|
return fmt.Errorf("could not init infrastructure roles: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := c.initPreparedDatabaseRoles(); err != nil {
|
||||||
|
return fmt.Errorf("could not init default users: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if err := c.initRobotUsers(); err != nil {
|
if err := c.initRobotUsers(); err != nil {
|
||||||
return fmt.Errorf("could not init robot users: %v", err)
|
return fmt.Errorf("could not init robot users: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -245,6 +264,7 @@ func (c *Cluster) Create() error {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
c.setStatus(acidv1.ClusterStatusCreating)
|
c.setStatus(acidv1.ClusterStatusCreating)
|
||||||
|
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Create", "Started creation of new cluster resources")
|
||||||
|
|
||||||
if err = c.enforceMinResourceLimits(&c.Spec); err != nil {
|
if err = c.enforceMinResourceLimits(&c.Spec); err != nil {
|
||||||
return fmt.Errorf("could not enforce minimum resource limits: %v", err)
|
return fmt.Errorf("could not enforce minimum resource limits: %v", err)
|
||||||
|
|
@ -263,6 +283,7 @@ func (c *Cluster) Create() error {
|
||||||
return fmt.Errorf("could not create %s endpoint: %v", role, err)
|
return fmt.Errorf("could not create %s endpoint: %v", role, err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("endpoint %q has been successfully created", util.NameFromMeta(ep.ObjectMeta))
|
c.logger.Infof("endpoint %q has been successfully created", util.NameFromMeta(ep.ObjectMeta))
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Endpoints", "Endpoint %q has been successfully created", util.NameFromMeta(ep.ObjectMeta))
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Services[role] != nil {
|
if c.Services[role] != nil {
|
||||||
|
|
@ -273,6 +294,7 @@ func (c *Cluster) Create() error {
|
||||||
return fmt.Errorf("could not create %s service: %v", role, err)
|
return fmt.Errorf("could not create %s service: %v", role, err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("%s service %q has been successfully created", role, util.NameFromMeta(service.ObjectMeta))
|
c.logger.Infof("%s service %q has been successfully created", role, util.NameFromMeta(service.ObjectMeta))
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Services", "The service %q for role %s has been successfully created", util.NameFromMeta(service.ObjectMeta), role)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = c.initUsers(); err != nil {
|
if err = c.initUsers(); err != nil {
|
||||||
|
|
@ -284,6 +306,7 @@ func (c *Cluster) Create() error {
|
||||||
return fmt.Errorf("could not create secrets: %v", err)
|
return fmt.Errorf("could not create secrets: %v", err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("secrets have been successfully created")
|
c.logger.Infof("secrets have been successfully created")
|
||||||
|
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Secrets", "The secrets have been successfully created")
|
||||||
|
|
||||||
if c.PodDisruptionBudget != nil {
|
if c.PodDisruptionBudget != nil {
|
||||||
return fmt.Errorf("pod disruption budget already exists in the cluster")
|
return fmt.Errorf("pod disruption budget already exists in the cluster")
|
||||||
|
|
@ -302,6 +325,7 @@ func (c *Cluster) Create() error {
|
||||||
return fmt.Errorf("could not create statefulset: %v", err)
|
return fmt.Errorf("could not create statefulset: %v", err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("statefulset %q has been successfully created", util.NameFromMeta(ss.ObjectMeta))
|
c.logger.Infof("statefulset %q has been successfully created", util.NameFromMeta(ss.ObjectMeta))
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "StatefulSet", "Statefulset %q has been successfully created", util.NameFromMeta(ss.ObjectMeta))
|
||||||
|
|
||||||
c.logger.Info("waiting for the cluster being ready")
|
c.logger.Info("waiting for the cluster being ready")
|
||||||
|
|
||||||
|
|
@ -310,6 +334,7 @@ func (c *Cluster) Create() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.logger.Infof("pods are ready")
|
c.logger.Infof("pods are ready")
|
||||||
|
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "StatefulSet", "Pods are ready")
|
||||||
|
|
||||||
// create database objects unless we are running without pods or disabled
|
// create database objects unless we are running without pods or disabled
|
||||||
// that feature explicitly
|
// that feature explicitly
|
||||||
|
|
@ -323,6 +348,9 @@ func (c *Cluster) Create() error {
|
||||||
if err = c.syncDatabases(); err != nil {
|
if err = c.syncDatabases(); err != nil {
|
||||||
return fmt.Errorf("could not sync databases: %v", err)
|
return fmt.Errorf("could not sync databases: %v", err)
|
||||||
}
|
}
|
||||||
|
if err = c.syncPreparedDatabases(); err != nil {
|
||||||
|
return fmt.Errorf("could not sync prepared databases: %v", err)
|
||||||
|
}
|
||||||
c.logger.Infof("databases have been successfully created")
|
c.logger.Infof("databases have been successfully created")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -450,6 +478,14 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// lazy Spilo update: modify the image in the statefulset itself but let its pods run with the old image
|
||||||
|
// until they are re-created for other reasons, for example node rotation
|
||||||
|
if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Containers[0].Image, statefulSet.Spec.Template.Spec.Containers[0].Image) {
|
||||||
|
needsReplace = true
|
||||||
|
needsRollUpdate = false
|
||||||
|
reasons = append(reasons, "lazy Spilo update: new statefulset's pod image doesn't match the current one")
|
||||||
|
}
|
||||||
|
|
||||||
if needsRollUpdate || needsReplace {
|
if needsRollUpdate || needsReplace {
|
||||||
match = false
|
match = false
|
||||||
}
|
}
|
||||||
|
|
@ -481,8 +517,6 @@ func (c *Cluster) compareContainers(description string, setA, setB []v1.Containe
|
||||||
checks := []containerCheck{
|
checks := []containerCheck{
|
||||||
newCheck("new statefulset %s's %s (index %d) name doesn't match the current one",
|
newCheck("new statefulset %s's %s (index %d) name doesn't match the current one",
|
||||||
func(a, b v1.Container) bool { return a.Name != b.Name }),
|
func(a, b v1.Container) bool { return a.Name != b.Name }),
|
||||||
newCheck("new statefulset %s's %s (index %d) image doesn't match the current one",
|
|
||||||
func(a, b v1.Container) bool { return a.Image != b.Image }),
|
|
||||||
newCheck("new statefulset %s's %s (index %d) ports don't match the current one",
|
newCheck("new statefulset %s's %s (index %d) ports don't match the current one",
|
||||||
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Ports, b.Ports) }),
|
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Ports, b.Ports) }),
|
||||||
newCheck("new statefulset %s's %s (index %d) resources don't match the current ones",
|
newCheck("new statefulset %s's %s (index %d) resources don't match the current ones",
|
||||||
|
|
@ -493,6 +527,11 @@ func (c *Cluster) compareContainers(description string, setA, setB []v1.Containe
|
||||||
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }),
|
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !c.OpConfig.EnableLazySpiloUpgrade {
|
||||||
|
checks = append(checks, newCheck("new statefulset %s's %s (index %d) image doesn't match the current one",
|
||||||
|
func(a, b v1.Container) bool { return a.Image != b.Image }))
|
||||||
|
}
|
||||||
|
|
||||||
for index, containerA := range setA {
|
for index, containerA := range setA {
|
||||||
containerB := setB[index]
|
containerB := setB[index]
|
||||||
for _, check := range checks {
|
for _, check := range checks {
|
||||||
|
|
@ -555,6 +594,7 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
|
||||||
}
|
}
|
||||||
if isSmaller {
|
if isSmaller {
|
||||||
c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit)
|
c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit)
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit)
|
||||||
spec.Resources.ResourceLimits.CPU = minCPULimit
|
spec.Resources.ResourceLimits.CPU = minCPULimit
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -567,6 +607,7 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
|
||||||
}
|
}
|
||||||
if isSmaller {
|
if isSmaller {
|
||||||
c.logger.Warningf("defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit)
|
c.logger.Warningf("defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit)
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit)
|
||||||
spec.Resources.ResourceLimits.Memory = minMemoryLimit
|
spec.Resources.ResourceLimits.Memory = minMemoryLimit
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -598,6 +639,8 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
if oldSpec.Spec.PostgresqlParam.PgVersion != newSpec.Spec.PostgresqlParam.PgVersion { // PG versions comparison
|
if oldSpec.Spec.PostgresqlParam.PgVersion != newSpec.Spec.PostgresqlParam.PgVersion { // PG versions comparison
|
||||||
c.logger.Warningf("postgresql version change(%q -> %q) has no effect",
|
c.logger.Warningf("postgresql version change(%q -> %q) has no effect",
|
||||||
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "PostgreSQL", "postgresql version change(%q -> %q) has no effect",
|
||||||
|
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
||||||
//we need that hack to generate statefulset with the old version
|
//we need that hack to generate statefulset with the old version
|
||||||
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
||||||
}
|
}
|
||||||
|
|
@ -614,7 +657,8 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
|
|
||||||
// connection pooler needs one system user created, which is done in
|
// connection pooler needs one system user created, which is done in
|
||||||
// initUsers. Check if it needs to be called.
|
// initUsers. Check if it needs to be called.
|
||||||
sameUsers := reflect.DeepEqual(oldSpec.Spec.Users, newSpec.Spec.Users)
|
sameUsers := reflect.DeepEqual(oldSpec.Spec.Users, newSpec.Spec.Users) &&
|
||||||
|
reflect.DeepEqual(oldSpec.Spec.PreparedDatabases, newSpec.Spec.PreparedDatabases)
|
||||||
needConnectionPooler := c.needConnectionPoolerWorker(&newSpec.Spec)
|
needConnectionPooler := c.needConnectionPoolerWorker(&newSpec.Spec)
|
||||||
if !sameUsers || needConnectionPooler {
|
if !sameUsers || needConnectionPooler {
|
||||||
c.logger.Debugf("syncing secrets")
|
c.logger.Debugf("syncing secrets")
|
||||||
|
|
@ -731,18 +775,28 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
c.logger.Errorf("could not sync roles: %v", err)
|
c.logger.Errorf("could not sync roles: %v", err)
|
||||||
updateFailed = true
|
updateFailed = true
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(oldSpec.Spec.Databases, newSpec.Spec.Databases) {
|
if !reflect.DeepEqual(oldSpec.Spec.Databases, newSpec.Spec.Databases) ||
|
||||||
|
!reflect.DeepEqual(oldSpec.Spec.PreparedDatabases, newSpec.Spec.PreparedDatabases) {
|
||||||
c.logger.Infof("syncing databases")
|
c.logger.Infof("syncing databases")
|
||||||
if err := c.syncDatabases(); err != nil {
|
if err := c.syncDatabases(); err != nil {
|
||||||
c.logger.Errorf("could not sync databases: %v", err)
|
c.logger.Errorf("could not sync databases: %v", err)
|
||||||
updateFailed = true
|
updateFailed = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if !reflect.DeepEqual(oldSpec.Spec.PreparedDatabases, newSpec.Spec.PreparedDatabases) {
|
||||||
|
c.logger.Infof("syncing prepared databases")
|
||||||
|
if err := c.syncPreparedDatabases(); err != nil {
|
||||||
|
c.logger.Errorf("could not sync prepared databases: %v", err)
|
||||||
|
updateFailed = true
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sync connection pooler
|
// sync connection pooler
|
||||||
if err := c.syncConnectionPooler(oldSpec, newSpec, c.installLookupFunction); err != nil {
|
if _, err := c.syncConnectionPooler(oldSpec, newSpec,
|
||||||
return fmt.Errorf("could not sync connection pooler: %v", err)
|
c.installLookupFunction); err != nil {
|
||||||
|
c.logger.Errorf("could not sync connection pooler: %v", err)
|
||||||
|
updateFailed = true
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -756,6 +810,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
func (c *Cluster) Delete() {
|
func (c *Cluster) Delete() {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Delete", "Started deletion of new cluster resources")
|
||||||
|
|
||||||
// delete the backup job before the stateful set of the cluster to prevent connections to non-existing pods
|
// delete the backup job before the stateful set of the cluster to prevent connections to non-existing pods
|
||||||
// deleting the cron job also removes pods and batch jobs it created
|
// deleting the cron job also removes pods and batch jobs it created
|
||||||
|
|
@ -783,9 +838,11 @@ func (c *Cluster) Delete() {
|
||||||
|
|
||||||
for _, role := range []PostgresRole{Master, Replica} {
|
for _, role := range []PostgresRole{Master, Replica} {
|
||||||
|
|
||||||
|
if !c.patroniKubernetesUseConfigMaps() {
|
||||||
if err := c.deleteEndpoint(role); err != nil {
|
if err := c.deleteEndpoint(role); err != nil {
|
||||||
c.logger.Warningf("could not delete %s endpoint: %v", role, err)
|
c.logger.Warningf("could not delete %s endpoint: %v", role, err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := c.deleteService(role); err != nil {
|
if err := c.deleteService(role); err != nil {
|
||||||
c.logger.Warningf("could not delete %s service: %v", role, err)
|
c.logger.Warningf("could not delete %s service: %v", role, err)
|
||||||
|
|
@ -910,6 +967,100 @@ func (c *Cluster) initSystemUsers() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) initPreparedDatabaseRoles() error {
|
||||||
|
|
||||||
|
if c.Spec.PreparedDatabases != nil && len(c.Spec.PreparedDatabases) == 0 { // TODO: add option to disable creating such a default DB
|
||||||
|
c.Spec.PreparedDatabases = map[string]acidv1.PreparedDatabase{strings.Replace(c.Name, "-", "_", -1): {}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// create maps with default roles/users as keys and their membership as values
|
||||||
|
defaultRoles := map[string]string{
|
||||||
|
constants.OwnerRoleNameSuffix: "",
|
||||||
|
constants.ReaderRoleNameSuffix: "",
|
||||||
|
constants.WriterRoleNameSuffix: constants.ReaderRoleNameSuffix,
|
||||||
|
}
|
||||||
|
defaultUsers := map[string]string{
|
||||||
|
constants.OwnerRoleNameSuffix + constants.UserRoleNameSuffix: constants.OwnerRoleNameSuffix,
|
||||||
|
constants.ReaderRoleNameSuffix + constants.UserRoleNameSuffix: constants.ReaderRoleNameSuffix,
|
||||||
|
constants.WriterRoleNameSuffix + constants.UserRoleNameSuffix: constants.WriterRoleNameSuffix,
|
||||||
|
}
|
||||||
|
|
||||||
|
for preparedDbName, preparedDB := range c.Spec.PreparedDatabases {
|
||||||
|
// default roles per database
|
||||||
|
if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName); err != nil {
|
||||||
|
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
|
||||||
|
}
|
||||||
|
if preparedDB.DefaultUsers {
|
||||||
|
if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName); err != nil {
|
||||||
|
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// default roles per database schema
|
||||||
|
preparedSchemas := preparedDB.PreparedSchemas
|
||||||
|
if len(preparedDB.PreparedSchemas) == 0 {
|
||||||
|
preparedSchemas = map[string]acidv1.PreparedSchema{"data": {DefaultRoles: util.True()}}
|
||||||
|
}
|
||||||
|
for preparedSchemaName, preparedSchema := range preparedSchemas {
|
||||||
|
if preparedSchema.DefaultRoles == nil || *preparedSchema.DefaultRoles {
|
||||||
|
if err := c.initDefaultRoles(defaultRoles,
|
||||||
|
preparedDbName+constants.OwnerRoleNameSuffix,
|
||||||
|
preparedDbName+"_"+preparedSchemaName); err != nil {
|
||||||
|
return fmt.Errorf("could not initialize default roles for database schema %s: %v", preparedSchemaName, err)
|
||||||
|
}
|
||||||
|
if preparedSchema.DefaultUsers {
|
||||||
|
if err := c.initDefaultRoles(defaultUsers,
|
||||||
|
preparedDbName+constants.OwnerRoleNameSuffix,
|
||||||
|
preparedDbName+"_"+preparedSchemaName); err != nil {
|
||||||
|
return fmt.Errorf("could not initialize default users for database schema %s: %v", preparedSchemaName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix string) error {
|
||||||
|
|
||||||
|
for defaultRole, inherits := range defaultRoles {
|
||||||
|
|
||||||
|
roleName := prefix + defaultRole
|
||||||
|
|
||||||
|
flags := []string{constants.RoleFlagNoLogin}
|
||||||
|
if defaultRole[len(defaultRole)-5:] == constants.UserRoleNameSuffix {
|
||||||
|
flags = []string{constants.RoleFlagLogin}
|
||||||
|
}
|
||||||
|
|
||||||
|
memberOf := make([]string, 0)
|
||||||
|
if inherits != "" {
|
||||||
|
memberOf = append(memberOf, prefix+inherits)
|
||||||
|
}
|
||||||
|
|
||||||
|
adminRole := ""
|
||||||
|
if strings.Contains(defaultRole, constants.OwnerRoleNameSuffix) {
|
||||||
|
adminRole = admin
|
||||||
|
} else {
|
||||||
|
adminRole = prefix + constants.OwnerRoleNameSuffix
|
||||||
|
}
|
||||||
|
|
||||||
|
newRole := spec.PgUser{
|
||||||
|
Origin: spec.RoleOriginBootstrap,
|
||||||
|
Name: roleName,
|
||||||
|
Password: util.RandomPassword(constants.PasswordLength),
|
||||||
|
Flags: flags,
|
||||||
|
MemberOf: memberOf,
|
||||||
|
AdminRole: adminRole,
|
||||||
|
}
|
||||||
|
if currentRole, present := c.pgUsers[roleName]; present {
|
||||||
|
c.pgUsers[roleName] = c.resolveNameConflict(¤tRole, &newRole)
|
||||||
|
} else {
|
||||||
|
c.pgUsers[roleName] = newRole
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cluster) initRobotUsers() error {
|
func (c *Cluster) initRobotUsers() error {
|
||||||
for username, userFlags := range c.Spec.Users {
|
for username, userFlags := range c.Spec.Users {
|
||||||
if !isValidUsername(username) {
|
if !isValidUsername(username) {
|
||||||
|
|
@ -1092,6 +1243,7 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
c.logger.Debugf("switching over from %q to %q", curMaster.Name, candidate)
|
c.logger.Debugf("switching over from %q to %q", curMaster.Name, candidate)
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switching over from %q to %q", curMaster.Name, candidate)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
|
@ -1118,6 +1270,7 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e
|
||||||
|
|
||||||
if err = c.patroni.Switchover(curMaster, candidate.Name); err == nil {
|
if err = c.patroni.Switchover(curMaster, candidate.Name); err == nil {
|
||||||
c.logger.Debugf("successfully switched over from %q to %q", curMaster.Name, candidate)
|
c.logger.Debugf("successfully switched over from %q to %q", curMaster.Name, candidate)
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Successfully switched over from %q to %q", curMaster.Name, candidate)
|
||||||
if err = <-podLabelErr; err != nil {
|
if err = <-podLabelErr; err != nil {
|
||||||
err = fmt.Errorf("could not get master pod label: %v", err)
|
err = fmt.Errorf("could not get master pod label: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -1133,6 +1286,7 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e
|
||||||
// close the label waiting channel no sooner than the waiting goroutine terminates.
|
// close the label waiting channel no sooner than the waiting goroutine terminates.
|
||||||
close(podLabelErr)
|
close(podLabelErr)
|
||||||
|
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switchover from %q to %q FAILED: %v", curMaster.Name, candidate, err)
|
||||||
return err
|
return err
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
@ -1160,11 +1314,19 @@ type clusterObjectDelete func(name string) error
|
||||||
|
|
||||||
func (c *Cluster) deletePatroniClusterObjects() error {
|
func (c *Cluster) deletePatroniClusterObjects() error {
|
||||||
// TODO: figure out how to remove leftover patroni objects in other cases
|
// TODO: figure out how to remove leftover patroni objects in other cases
|
||||||
|
var actionsList []simpleActionWithResult
|
||||||
|
|
||||||
if !c.patroniUsesKubernetes() {
|
if !c.patroniUsesKubernetes() {
|
||||||
c.logger.Infof("not cleaning up Etcd Patroni objects on cluster delete")
|
c.logger.Infof("not cleaning up Etcd Patroni objects on cluster delete")
|
||||||
}
|
}
|
||||||
c.logger.Debugf("removing leftover Patroni objects (endpoints, services and configmaps)")
|
|
||||||
for _, deleter := range []simpleActionWithResult{c.deletePatroniClusterEndpoints, c.deletePatroniClusterServices, c.deletePatroniClusterConfigMaps} {
|
if !c.patroniKubernetesUseConfigMaps() {
|
||||||
|
actionsList = append(actionsList, c.deletePatroniClusterEndpoints)
|
||||||
|
}
|
||||||
|
actionsList = append(actionsList, c.deletePatroniClusterServices, c.deletePatroniClusterConfigMaps)
|
||||||
|
|
||||||
|
c.logger.Debugf("removing leftover Patroni objects (endpoints / services and configmaps)")
|
||||||
|
for _, deleter := range actionsList {
|
||||||
if err := deleter(); err != nil {
|
if err := deleter(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,8 @@ import (
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/teams"
|
"github.com/zalando/postgres-operator/pkg/util/teams"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -21,6 +23,8 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var logger = logrus.New().WithField("test", "cluster")
|
var logger = logrus.New().WithField("test", "cluster")
|
||||||
|
var eventRecorder = record.NewFakeRecorder(1)
|
||||||
|
|
||||||
var cl = New(
|
var cl = New(
|
||||||
Config{
|
Config{
|
||||||
OpConfig: config.Config{
|
OpConfig: config.Config{
|
||||||
|
|
@ -32,8 +36,9 @@ var cl = New(
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
k8sutil.NewMockKubernetesClient(),
|
k8sutil.NewMockKubernetesClient(),
|
||||||
acidv1.Postgresql{},
|
acidv1.Postgresql{ObjectMeta: metav1.ObjectMeta{Name: "acid-test", Namespace: "test"}},
|
||||||
logger,
|
logger,
|
||||||
|
eventRecorder,
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestInitRobotUsers(t *testing.T) {
|
func TestInitRobotUsers(t *testing.T) {
|
||||||
|
|
@ -756,3 +761,89 @@ func TestInitSystemUsers(t *testing.T) {
|
||||||
t.Errorf("%s, System users are not allowed to be a connection pool user", testName)
|
t.Errorf("%s, System users are not allowed to be a connection pool user", testName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPreparedDatabases(t *testing.T) {
|
||||||
|
testName := "TestDefaultPreparedDatabase"
|
||||||
|
|
||||||
|
cl.Spec.PreparedDatabases = map[string]acidv1.PreparedDatabase{}
|
||||||
|
cl.initPreparedDatabaseRoles()
|
||||||
|
|
||||||
|
for _, role := range []string{"acid_test_owner", "acid_test_reader", "acid_test_writer",
|
||||||
|
"acid_test_data_owner", "acid_test_data_reader", "acid_test_data_writer"} {
|
||||||
|
if _, exist := cl.pgUsers[role]; !exist {
|
||||||
|
t.Errorf("%s, default role %q for prepared database not present", testName, role)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testName = "TestPreparedDatabaseWithSchema"
|
||||||
|
|
||||||
|
cl.Spec.PreparedDatabases = map[string]acidv1.PreparedDatabase{
|
||||||
|
"foo": {
|
||||||
|
DefaultUsers: true,
|
||||||
|
PreparedSchemas: map[string]acidv1.PreparedSchema{
|
||||||
|
"bar": {
|
||||||
|
DefaultUsers: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
cl.initPreparedDatabaseRoles()
|
||||||
|
|
||||||
|
for _, role := range []string{
|
||||||
|
"foo_owner", "foo_reader", "foo_writer",
|
||||||
|
"foo_owner_user", "foo_reader_user", "foo_writer_user",
|
||||||
|
"foo_bar_owner", "foo_bar_reader", "foo_bar_writer",
|
||||||
|
"foo_bar_owner_user", "foo_bar_reader_user", "foo_bar_writer_user"} {
|
||||||
|
if _, exist := cl.pgUsers[role]; !exist {
|
||||||
|
t.Errorf("%s, default role %q for prepared database not present", testName, role)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
roleTests := []struct {
|
||||||
|
subTest string
|
||||||
|
role string
|
||||||
|
memberOf string
|
||||||
|
admin string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
subTest: "Test admin role of owner",
|
||||||
|
role: "foo_owner",
|
||||||
|
memberOf: "",
|
||||||
|
admin: "admin",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "Test writer is a member of reader",
|
||||||
|
role: "foo_writer",
|
||||||
|
memberOf: "foo_reader",
|
||||||
|
admin: "foo_owner",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "Test reader LOGIN role",
|
||||||
|
role: "foo_reader_user",
|
||||||
|
memberOf: "foo_reader",
|
||||||
|
admin: "foo_owner",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "Test schema owner",
|
||||||
|
role: "foo_bar_owner",
|
||||||
|
memberOf: "",
|
||||||
|
admin: "foo_owner",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "Test schema writer LOGIN role",
|
||||||
|
role: "foo_bar_writer_user",
|
||||||
|
memberOf: "foo_bar_writer",
|
||||||
|
admin: "foo_bar_owner",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range roleTests {
|
||||||
|
user := cl.pgUsers[tt.role]
|
||||||
|
if (tt.memberOf == "" && len(user.MemberOf) > 0) || (tt.memberOf != "" && user.MemberOf[0] != tt.memberOf) {
|
||||||
|
t.Errorf("%s, incorrect membership for default role %q. Expected %q, got %q", tt.subTest, tt.role, tt.memberOf, user.MemberOf[0])
|
||||||
|
}
|
||||||
|
if user.AdminRole != tt.admin {
|
||||||
|
t.Errorf("%s, incorrect admin role for default role %q. Expected %q, got %q", tt.subTest, tt.role, tt.admin, user.AdminRole)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -28,8 +28,34 @@ const (
|
||||||
ORDER BY 1;`
|
ORDER BY 1;`
|
||||||
|
|
||||||
getDatabasesSQL = `SELECT datname, pg_get_userbyid(datdba) AS owner FROM pg_database;`
|
getDatabasesSQL = `SELECT datname, pg_get_userbyid(datdba) AS owner FROM pg_database;`
|
||||||
|
getSchemasSQL = `SELECT n.nspname AS dbschema FROM pg_catalog.pg_namespace n
|
||||||
|
WHERE n.nspname !~ '^pg_' AND n.nspname <> 'information_schema' ORDER BY 1`
|
||||||
|
getExtensionsSQL = `SELECT e.extname, n.nspname FROM pg_catalog.pg_extension e
|
||||||
|
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = e.extnamespace ORDER BY 1;`
|
||||||
|
|
||||||
createDatabaseSQL = `CREATE DATABASE "%s" OWNER "%s";`
|
createDatabaseSQL = `CREATE DATABASE "%s" OWNER "%s";`
|
||||||
|
createDatabaseSchemaSQL = `SET ROLE TO "%s"; CREATE SCHEMA IF NOT EXISTS "%s" AUTHORIZATION "%s"`
|
||||||
alterDatabaseOwnerSQL = `ALTER DATABASE "%s" OWNER TO "%s";`
|
alterDatabaseOwnerSQL = `ALTER DATABASE "%s" OWNER TO "%s";`
|
||||||
|
createExtensionSQL = `CREATE EXTENSION IF NOT EXISTS "%s" SCHEMA "%s"`
|
||||||
|
alterExtensionSQL = `ALTER EXTENSION "%s" SET SCHEMA "%s"`
|
||||||
|
|
||||||
|
globalDefaultPrivilegesSQL = `SET ROLE TO "%s";
|
||||||
|
ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO "%s","%s";
|
||||||
|
ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO "%s";
|
||||||
|
ALTER DEFAULT PRIVILEGES GRANT SELECT ON SEQUENCES TO "%s";
|
||||||
|
ALTER DEFAULT PRIVILEGES GRANT INSERT, UPDATE, DELETE ON TABLES TO "%s";
|
||||||
|
ALTER DEFAULT PRIVILEGES GRANT USAGE, UPDATE ON SEQUENCES TO "%s";
|
||||||
|
ALTER DEFAULT PRIVILEGES GRANT EXECUTE ON FUNCTIONS TO "%s","%s";
|
||||||
|
ALTER DEFAULT PRIVILEGES GRANT USAGE ON TYPES TO "%s","%s";`
|
||||||
|
schemaDefaultPrivilegesSQL = `SET ROLE TO "%s";
|
||||||
|
GRANT USAGE ON SCHEMA "%s" TO "%s","%s";
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA "%s" GRANT SELECT ON TABLES TO "%s";
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA "%s" GRANT SELECT ON SEQUENCES TO "%s";
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA "%s" GRANT INSERT, UPDATE, DELETE ON TABLES TO "%s";
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA "%s" GRANT USAGE, UPDATE ON SEQUENCES TO "%s";
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA "%s" GRANT EXECUTE ON FUNCTIONS TO "%s","%s";
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA "%s" GRANT USAGE ON TYPES TO "%s","%s";`
|
||||||
|
|
||||||
connectionPoolerLookup = `
|
connectionPoolerLookup = `
|
||||||
CREATE SCHEMA IF NOT EXISTS {{.pooler_schema}};
|
CREATE SCHEMA IF NOT EXISTS {{.pooler_schema}};
|
||||||
|
|
||||||
|
|
@ -221,43 +247,141 @@ func (c *Cluster) getDatabases() (dbs map[string]string, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// executeCreateDatabase creates new database with the given owner.
|
// executeCreateDatabase creates new database with the given owner.
|
||||||
// The caller is responsible for openinging and closing the database connection.
|
// The caller is responsible for opening and closing the database connection.
|
||||||
func (c *Cluster) executeCreateDatabase(datname, owner string) error {
|
func (c *Cluster) executeCreateDatabase(databaseName, owner string) error {
|
||||||
return c.execCreateOrAlterDatabase(datname, owner, createDatabaseSQL,
|
return c.execCreateOrAlterDatabase(databaseName, owner, createDatabaseSQL,
|
||||||
"creating database", "create database")
|
"creating database", "create database")
|
||||||
}
|
}
|
||||||
|
|
||||||
// executeCreateDatabase changes the owner of the given database.
|
// executeAlterDatabaseOwner changes the owner of the given database.
|
||||||
// The caller is responsible for openinging and closing the database connection.
|
// The caller is responsible for opening and closing the database connection.
|
||||||
func (c *Cluster) executeAlterDatabaseOwner(datname string, owner string) error {
|
func (c *Cluster) executeAlterDatabaseOwner(databaseName string, owner string) error {
|
||||||
return c.execCreateOrAlterDatabase(datname, owner, alterDatabaseOwnerSQL,
|
return c.execCreateOrAlterDatabase(databaseName, owner, alterDatabaseOwnerSQL,
|
||||||
"changing owner for database", "alter database owner")
|
"changing owner for database", "alter database owner")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) execCreateOrAlterDatabase(datname, owner, statement, doing, operation string) error {
|
func (c *Cluster) execCreateOrAlterDatabase(databaseName, owner, statement, doing, operation string) error {
|
||||||
if !c.databaseNameOwnerValid(datname, owner) {
|
if !c.databaseNameOwnerValid(databaseName, owner) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
c.logger.Infof("%s %q owner %q", doing, datname, owner)
|
c.logger.Infof("%s %q owner %q", doing, databaseName, owner)
|
||||||
if _, err := c.pgDb.Exec(fmt.Sprintf(statement, datname, owner)); err != nil {
|
if _, err := c.pgDb.Exec(fmt.Sprintf(statement, databaseName, owner)); err != nil {
|
||||||
return fmt.Errorf("could not execute %s: %v", operation, err)
|
return fmt.Errorf("could not execute %s: %v", operation, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) databaseNameOwnerValid(datname, owner string) bool {
|
func (c *Cluster) databaseNameOwnerValid(databaseName, owner string) bool {
|
||||||
if _, ok := c.pgUsers[owner]; !ok {
|
if _, ok := c.pgUsers[owner]; !ok {
|
||||||
c.logger.Infof("skipping creation of the %q database, user %q does not exist", datname, owner)
|
c.logger.Infof("skipping creation of the %q database, user %q does not exist", databaseName, owner)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !databaseNameRegexp.MatchString(datname) {
|
if !databaseNameRegexp.MatchString(databaseName) {
|
||||||
c.logger.Infof("database %q has invalid name", datname)
|
c.logger.Infof("database %q has invalid name", databaseName)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getSchemas returns the list of current database schemas
|
||||||
|
// The caller is responsible for opening and closing the database connection
|
||||||
|
func (c *Cluster) getSchemas() (schemas []string, err error) {
|
||||||
|
var (
|
||||||
|
rows *sql.Rows
|
||||||
|
dbschemas []string
|
||||||
|
)
|
||||||
|
|
||||||
|
if rows, err = c.pgDb.Query(getSchemasSQL); err != nil {
|
||||||
|
return nil, fmt.Errorf("could not query database schemas: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err2 := rows.Close(); err2 != nil {
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("error when closing query cursor: %v, previous error: %v", err2, err)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("error when closing query cursor: %v", err2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var dbschema string
|
||||||
|
|
||||||
|
if err = rows.Scan(&dbschema); err != nil {
|
||||||
|
return nil, fmt.Errorf("error when processing row: %v", err)
|
||||||
|
}
|
||||||
|
dbschemas = append(dbschemas, dbschema)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dbschemas, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// executeCreateDatabaseSchema creates new database schema with the given owner.
|
||||||
|
// The caller is responsible for opening and closing the database connection.
|
||||||
|
func (c *Cluster) executeCreateDatabaseSchema(databaseName, schemaName, dbOwner string, schemaOwner string) error {
|
||||||
|
return c.execCreateDatabaseSchema(databaseName, schemaName, dbOwner, schemaOwner, createDatabaseSchemaSQL,
|
||||||
|
"creating database schema", "create database schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) execCreateDatabaseSchema(databaseName, schemaName, dbOwner, schemaOwner, statement, doing, operation string) error {
|
||||||
|
if !c.databaseSchemaNameValid(schemaName) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
c.logger.Infof("%s %q owner %q", doing, schemaName, schemaOwner)
|
||||||
|
if _, err := c.pgDb.Exec(fmt.Sprintf(statement, dbOwner, schemaName, schemaOwner)); err != nil {
|
||||||
|
return fmt.Errorf("could not execute %s: %v", operation, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// set default privileges for schema
|
||||||
|
c.execAlterSchemaDefaultPrivileges(schemaName, schemaOwner, databaseName)
|
||||||
|
if schemaOwner != dbOwner {
|
||||||
|
c.execAlterSchemaDefaultPrivileges(schemaName, dbOwner, databaseName+"_"+schemaName)
|
||||||
|
c.execAlterSchemaDefaultPrivileges(schemaName, schemaOwner, databaseName+"_"+schemaName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) databaseSchemaNameValid(schemaName string) bool {
|
||||||
|
if !databaseNameRegexp.MatchString(schemaName) {
|
||||||
|
c.logger.Infof("database schema %q has invalid name", schemaName)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) execAlterSchemaDefaultPrivileges(schemaName, owner, rolePrefix string) error {
|
||||||
|
if _, err := c.pgDb.Exec(fmt.Sprintf(schemaDefaultPrivilegesSQL, owner,
|
||||||
|
schemaName, rolePrefix+constants.ReaderRoleNameSuffix, rolePrefix+constants.WriterRoleNameSuffix, // schema
|
||||||
|
schemaName, rolePrefix+constants.ReaderRoleNameSuffix, // tables
|
||||||
|
schemaName, rolePrefix+constants.ReaderRoleNameSuffix, // sequences
|
||||||
|
schemaName, rolePrefix+constants.WriterRoleNameSuffix, // tables
|
||||||
|
schemaName, rolePrefix+constants.WriterRoleNameSuffix, // sequences
|
||||||
|
schemaName, rolePrefix+constants.ReaderRoleNameSuffix, rolePrefix+constants.WriterRoleNameSuffix, // types
|
||||||
|
schemaName, rolePrefix+constants.ReaderRoleNameSuffix, rolePrefix+constants.WriterRoleNameSuffix)); err != nil { // functions
|
||||||
|
return fmt.Errorf("could not alter default privileges for database schema %s: %v", schemaName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) execAlterGlobalDefaultPrivileges(owner, rolePrefix string) error {
|
||||||
|
if _, err := c.pgDb.Exec(fmt.Sprintf(globalDefaultPrivilegesSQL, owner,
|
||||||
|
rolePrefix+constants.WriterRoleNameSuffix, rolePrefix+constants.ReaderRoleNameSuffix, // schemas
|
||||||
|
rolePrefix+constants.ReaderRoleNameSuffix, // tables
|
||||||
|
rolePrefix+constants.ReaderRoleNameSuffix, // sequences
|
||||||
|
rolePrefix+constants.WriterRoleNameSuffix, // tables
|
||||||
|
rolePrefix+constants.WriterRoleNameSuffix, // sequences
|
||||||
|
rolePrefix+constants.ReaderRoleNameSuffix, rolePrefix+constants.WriterRoleNameSuffix, // types
|
||||||
|
rolePrefix+constants.ReaderRoleNameSuffix, rolePrefix+constants.WriterRoleNameSuffix)); err != nil { // functions
|
||||||
|
return fmt.Errorf("could not alter default privileges for database %s: %v", rolePrefix, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func makeUserFlags(rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin bool) (result []string) {
|
func makeUserFlags(rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin bool) (result []string) {
|
||||||
if rolsuper {
|
if rolsuper {
|
||||||
result = append(result, constants.RoleFlagSuperuser)
|
result = append(result, constants.RoleFlagSuperuser)
|
||||||
|
|
@ -278,8 +402,67 @@ func makeUserFlags(rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a connection pooler credentials lookup function in every database to
|
// getExtension returns the list of current database extensions
|
||||||
// perform remote authentication.
|
// The caller is responsible for opening and closing the database connection
|
||||||
|
func (c *Cluster) getExtensions() (dbExtensions map[string]string, err error) {
|
||||||
|
var (
|
||||||
|
rows *sql.Rows
|
||||||
|
)
|
||||||
|
|
||||||
|
if rows, err = c.pgDb.Query(getExtensionsSQL); err != nil {
|
||||||
|
return nil, fmt.Errorf("could not query database extensions: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err2 := rows.Close(); err2 != nil {
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("error when closing query cursor: %v, previous error: %v", err2, err)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("error when closing query cursor: %v", err2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
dbExtensions = make(map[string]string)
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var extension, schema string
|
||||||
|
|
||||||
|
if err = rows.Scan(&extension, &schema); err != nil {
|
||||||
|
return nil, fmt.Errorf("error when processing row: %v", err)
|
||||||
|
}
|
||||||
|
dbExtensions[extension] = schema
|
||||||
|
}
|
||||||
|
|
||||||
|
return dbExtensions, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// executeCreateExtension creates new extension in the given schema.
|
||||||
|
// The caller is responsible for opening and closing the database connection.
|
||||||
|
func (c *Cluster) executeCreateExtension(extName, schemaName string) error {
|
||||||
|
return c.execCreateOrAlterExtension(extName, schemaName, createExtensionSQL,
|
||||||
|
"creating extension", "create extension")
|
||||||
|
}
|
||||||
|
|
||||||
|
// executeAlterExtension changes the schema of the given extension.
|
||||||
|
// The caller is responsible for opening and closing the database connection.
|
||||||
|
func (c *Cluster) executeAlterExtension(extName, schemaName string) error {
|
||||||
|
return c.execCreateOrAlterExtension(extName, schemaName, alterExtensionSQL,
|
||||||
|
"changing schema for extension", "alter extension schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) execCreateOrAlterExtension(extName, schemaName, statement, doing, operation string) error {
|
||||||
|
|
||||||
|
c.logger.Infof("%s %q schema %q", doing, extName, schemaName)
|
||||||
|
if _, err := c.pgDb.Exec(fmt.Sprintf(statement, extName, schemaName)); err != nil {
|
||||||
|
return fmt.Errorf("could not execute %s: %v", operation, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a connection pool credentials lookup function in every database to
|
||||||
|
// perform remote authentification.
|
||||||
func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error {
|
func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error {
|
||||||
var stmtBytes bytes.Buffer
|
var stmtBytes bytes.Buffer
|
||||||
c.logger.Info("Installing lookup function")
|
c.logger.Info("Installing lookup function")
|
||||||
|
|
@ -305,7 +488,7 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error {
|
||||||
|
|
||||||
templater := template.Must(template.New("sql").Parse(connectionPoolerLookup))
|
templater := template.Must(template.New("sql").Parse(connectionPoolerLookup))
|
||||||
|
|
||||||
for dbname, _ := range currentDatabases {
|
for dbname := range currentDatabases {
|
||||||
if dbname == "template0" || dbname == "template1" {
|
if dbname == "template0" || dbname == "template1" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -462,8 +462,7 @@ func generateContainer(
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateSidecarContainers(sidecars []acidv1.Sidecar,
|
func generateSidecarContainers(sidecars []acidv1.Sidecar,
|
||||||
volumeMounts []v1.VolumeMount, defaultResources acidv1.Resources,
|
defaultResources acidv1.Resources, startIndex int, logger *logrus.Entry) ([]v1.Container, error) {
|
||||||
superUserName string, credentialsSecretName string, logger *logrus.Entry) ([]v1.Container, error) {
|
|
||||||
|
|
||||||
if len(sidecars) > 0 {
|
if len(sidecars) > 0 {
|
||||||
result := make([]v1.Container, 0)
|
result := make([]v1.Container, 0)
|
||||||
|
|
@ -482,7 +481,7 @@ func generateSidecarContainers(sidecars []acidv1.Sidecar,
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
sc := getSidecarContainer(sidecar, index, volumeMounts, resources, superUserName, credentialsSecretName, logger)
|
sc := getSidecarContainer(sidecar, startIndex+index, resources)
|
||||||
result = append(result, *sc)
|
result = append(result, *sc)
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
|
|
@ -490,6 +489,55 @@ func generateSidecarContainers(sidecars []acidv1.Sidecar,
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// adds common fields to sidecars
|
||||||
|
func patchSidecarContainers(in []v1.Container, volumeMounts []v1.VolumeMount, superUserName string, credentialsSecretName string, logger *logrus.Entry) []v1.Container {
|
||||||
|
result := []v1.Container{}
|
||||||
|
|
||||||
|
for _, container := range in {
|
||||||
|
container.VolumeMounts = append(container.VolumeMounts, volumeMounts...)
|
||||||
|
env := []v1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "POD_NAME",
|
||||||
|
ValueFrom: &v1.EnvVarSource{
|
||||||
|
FieldRef: &v1.ObjectFieldSelector{
|
||||||
|
APIVersion: "v1",
|
||||||
|
FieldPath: "metadata.name",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POD_NAMESPACE",
|
||||||
|
ValueFrom: &v1.EnvVarSource{
|
||||||
|
FieldRef: &v1.ObjectFieldSelector{
|
||||||
|
APIVersion: "v1",
|
||||||
|
FieldPath: "metadata.namespace",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POSTGRES_USER",
|
||||||
|
Value: superUserName,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POSTGRES_PASSWORD",
|
||||||
|
ValueFrom: &v1.EnvVarSource{
|
||||||
|
SecretKeyRef: &v1.SecretKeySelector{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: credentialsSecretName,
|
||||||
|
},
|
||||||
|
Key: "password",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
mergedEnv := append(container.Env, env...)
|
||||||
|
container.Env = deduplicateEnvVars(mergedEnv, container.Name, logger)
|
||||||
|
result = append(result, container)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
// Check whether or not we're requested to mount an shm volume,
|
// Check whether or not we're requested to mount an shm volume,
|
||||||
// taking into account that PostgreSQL manifest has precedence.
|
// taking into account that PostgreSQL manifest has precedence.
|
||||||
func mountShmVolumeNeeded(opConfig config.Config, spec *acidv1.PostgresSpec) *bool {
|
func mountShmVolumeNeeded(opConfig config.Config, spec *acidv1.PostgresSpec) *bool {
|
||||||
|
|
@ -519,7 +567,6 @@ func (c *Cluster) generatePodTemplate(
|
||||||
podAntiAffinityTopologyKey string,
|
podAntiAffinityTopologyKey string,
|
||||||
additionalSecretMount string,
|
additionalSecretMount string,
|
||||||
additionalSecretMountPath string,
|
additionalSecretMountPath string,
|
||||||
volumes []v1.Volume,
|
|
||||||
additionalVolumes []acidv1.AdditionalVolume,
|
additionalVolumes []acidv1.AdditionalVolume,
|
||||||
) (*v1.PodTemplateSpec, error) {
|
) (*v1.PodTemplateSpec, error) {
|
||||||
|
|
||||||
|
|
@ -539,7 +586,6 @@ func (c *Cluster) generatePodTemplate(
|
||||||
InitContainers: initContainers,
|
InitContainers: initContainers,
|
||||||
Tolerations: *tolerationsSpec,
|
Tolerations: *tolerationsSpec,
|
||||||
SecurityContext: &securityContext,
|
SecurityContext: &securityContext,
|
||||||
Volumes: volumes,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if shmVolume != nil && *shmVolume {
|
if shmVolume != nil && *shmVolume {
|
||||||
|
|
@ -726,58 +772,18 @@ func deduplicateEnvVars(input []v1.EnvVar, containerName string, logger *logrus.
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSidecarContainer(sidecar acidv1.Sidecar, index int, volumeMounts []v1.VolumeMount,
|
func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.ResourceRequirements) *v1.Container {
|
||||||
resources *v1.ResourceRequirements, superUserName string, credentialsSecretName string, logger *logrus.Entry) *v1.Container {
|
|
||||||
name := sidecar.Name
|
name := sidecar.Name
|
||||||
if name == "" {
|
if name == "" {
|
||||||
name = fmt.Sprintf("sidecar-%d", index)
|
name = fmt.Sprintf("sidecar-%d", index)
|
||||||
}
|
}
|
||||||
|
|
||||||
env := []v1.EnvVar{
|
|
||||||
{
|
|
||||||
Name: "POD_NAME",
|
|
||||||
ValueFrom: &v1.EnvVarSource{
|
|
||||||
FieldRef: &v1.ObjectFieldSelector{
|
|
||||||
APIVersion: "v1",
|
|
||||||
FieldPath: "metadata.name",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POD_NAMESPACE",
|
|
||||||
ValueFrom: &v1.EnvVarSource{
|
|
||||||
FieldRef: &v1.ObjectFieldSelector{
|
|
||||||
APIVersion: "v1",
|
|
||||||
FieldPath: "metadata.namespace",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POSTGRES_USER",
|
|
||||||
Value: superUserName,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POSTGRES_PASSWORD",
|
|
||||||
ValueFrom: &v1.EnvVarSource{
|
|
||||||
SecretKeyRef: &v1.SecretKeySelector{
|
|
||||||
LocalObjectReference: v1.LocalObjectReference{
|
|
||||||
Name: credentialsSecretName,
|
|
||||||
},
|
|
||||||
Key: "password",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if len(sidecar.Env) > 0 {
|
|
||||||
env = append(env, sidecar.Env...)
|
|
||||||
}
|
|
||||||
return &v1.Container{
|
return &v1.Container{
|
||||||
Name: name,
|
Name: name,
|
||||||
Image: sidecar.DockerImage,
|
Image: sidecar.DockerImage,
|
||||||
ImagePullPolicy: v1.PullIfNotPresent,
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
Resources: *resources,
|
Resources: *resources,
|
||||||
VolumeMounts: volumeMounts,
|
Env: sidecar.Env,
|
||||||
Env: deduplicateEnvVars(env, name, logger),
|
|
||||||
Ports: sidecar.Ports,
|
Ports: sidecar.Ports,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -854,7 +860,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
sidecarContainers []v1.Container
|
sidecarContainers []v1.Container
|
||||||
podTemplate *v1.PodTemplateSpec
|
podTemplate *v1.PodTemplateSpec
|
||||||
volumeClaimTemplate *v1.PersistentVolumeClaim
|
volumeClaimTemplate *v1.PersistentVolumeClaim
|
||||||
volumes []v1.Volume
|
additionalVolumes = spec.AdditionalVolumes
|
||||||
)
|
)
|
||||||
|
|
||||||
// Improve me. Please.
|
// Improve me. Please.
|
||||||
|
|
@ -1007,8 +1013,10 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
// this is combined with the FSGroup in the section above
|
// this is combined with the FSGroup in the section above
|
||||||
// to give read access to the postgres user
|
// to give read access to the postgres user
|
||||||
defaultMode := int32(0640)
|
defaultMode := int32(0640)
|
||||||
volumes = append(volumes, v1.Volume{
|
mountPath := "/tls"
|
||||||
Name: "tls-secret",
|
additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{
|
||||||
|
Name: spec.TLS.SecretName,
|
||||||
|
MountPath: mountPath,
|
||||||
VolumeSource: v1.VolumeSource{
|
VolumeSource: v1.VolumeSource{
|
||||||
Secret: &v1.SecretVolumeSource{
|
Secret: &v1.SecretVolumeSource{
|
||||||
SecretName: spec.TLS.SecretName,
|
SecretName: spec.TLS.SecretName,
|
||||||
|
|
@ -1017,13 +1025,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
mountPath := "/tls"
|
|
||||||
volumeMounts = append(volumeMounts, v1.VolumeMount{
|
|
||||||
MountPath: mountPath,
|
|
||||||
Name: "tls-secret",
|
|
||||||
ReadOnly: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
// use the same filenames as Secret resources by default
|
// use the same filenames as Secret resources by default
|
||||||
certFile := ensurePath(spec.TLS.CertificateFile, mountPath, "tls.crt")
|
certFile := ensurePath(spec.TLS.CertificateFile, mountPath, "tls.crt")
|
||||||
privateKeyFile := ensurePath(spec.TLS.PrivateKeyFile, mountPath, "tls.key")
|
privateKeyFile := ensurePath(spec.TLS.PrivateKeyFile, mountPath, "tls.key")
|
||||||
|
|
@ -1034,11 +1035,31 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
)
|
)
|
||||||
|
|
||||||
if spec.TLS.CAFile != "" {
|
if spec.TLS.CAFile != "" {
|
||||||
caFile := ensurePath(spec.TLS.CAFile, mountPath, "")
|
// support scenario when the ca.crt resides in a different secret, diff path
|
||||||
|
mountPathCA := mountPath
|
||||||
|
if spec.TLS.CASecretName != "" {
|
||||||
|
mountPathCA = mountPath + "ca"
|
||||||
|
}
|
||||||
|
|
||||||
|
caFile := ensurePath(spec.TLS.CAFile, mountPathCA, "")
|
||||||
spiloEnvVars = append(
|
spiloEnvVars = append(
|
||||||
spiloEnvVars,
|
spiloEnvVars,
|
||||||
v1.EnvVar{Name: "SSL_CA_FILE", Value: caFile},
|
v1.EnvVar{Name: "SSL_CA_FILE", Value: caFile},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// the ca file from CASecretName secret takes priority
|
||||||
|
if spec.TLS.CASecretName != "" {
|
||||||
|
additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{
|
||||||
|
Name: spec.TLS.CASecretName,
|
||||||
|
MountPath: mountPathCA,
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
Secret: &v1.SecretVolumeSource{
|
||||||
|
SecretName: spec.TLS.CASecretName,
|
||||||
|
DefaultMode: &defaultMode,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1052,37 +1073,63 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
c.OpConfig.Resources.SpiloPrivileged,
|
c.OpConfig.Resources.SpiloPrivileged,
|
||||||
)
|
)
|
||||||
|
|
||||||
// resolve conflicts between operator-global and per-cluster sidecars
|
// generate container specs for sidecars specified in the cluster manifest
|
||||||
sideCars := c.mergeSidecars(spec.Sidecars)
|
clusterSpecificSidecars := []v1.Container{}
|
||||||
|
if spec.Sidecars != nil && len(spec.Sidecars) > 0 {
|
||||||
|
// warn if sidecars are defined, but globally disabled (does not apply to globally defined sidecars)
|
||||||
|
if c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) {
|
||||||
|
c.logger.Warningf("sidecars specified but disabled in configuration - next statefulset creation would fail")
|
||||||
|
}
|
||||||
|
|
||||||
resourceRequirementsScalyrSidecar := makeResources(
|
if clusterSpecificSidecars, err = generateSidecarContainers(spec.Sidecars, defaultResources, 0, c.logger); err != nil {
|
||||||
c.OpConfig.ScalyrCPURequest,
|
return nil, fmt.Errorf("could not generate sidecar containers: %v", err)
|
||||||
c.OpConfig.ScalyrMemoryRequest,
|
}
|
||||||
c.OpConfig.ScalyrCPULimit,
|
}
|
||||||
c.OpConfig.ScalyrMemoryLimit,
|
|
||||||
)
|
// decrapted way of providing global sidecars
|
||||||
|
var globalSidecarContainersByDockerImage []v1.Container
|
||||||
|
var globalSidecarsByDockerImage []acidv1.Sidecar
|
||||||
|
for name, dockerImage := range c.OpConfig.SidecarImages {
|
||||||
|
globalSidecarsByDockerImage = append(globalSidecarsByDockerImage, acidv1.Sidecar{Name: name, DockerImage: dockerImage})
|
||||||
|
}
|
||||||
|
if globalSidecarContainersByDockerImage, err = generateSidecarContainers(globalSidecarsByDockerImage, defaultResources, len(clusterSpecificSidecars), c.logger); err != nil {
|
||||||
|
return nil, fmt.Errorf("could not generate sidecar containers: %v", err)
|
||||||
|
}
|
||||||
|
// make the resulting list reproducible
|
||||||
|
// c.OpConfig.SidecarImages is unsorted by Golang definition
|
||||||
|
// .Name is unique
|
||||||
|
sort.Slice(globalSidecarContainersByDockerImage, func(i, j int) bool {
|
||||||
|
return globalSidecarContainersByDockerImage[i].Name < globalSidecarContainersByDockerImage[j].Name
|
||||||
|
})
|
||||||
|
|
||||||
// generate scalyr sidecar container
|
// generate scalyr sidecar container
|
||||||
if scalyrSidecar :=
|
var scalyrSidecars []v1.Container
|
||||||
|
if scalyrSidecar, err :=
|
||||||
generateScalyrSidecarSpec(c.Name,
|
generateScalyrSidecarSpec(c.Name,
|
||||||
c.OpConfig.ScalyrAPIKey,
|
c.OpConfig.ScalyrAPIKey,
|
||||||
c.OpConfig.ScalyrServerURL,
|
c.OpConfig.ScalyrServerURL,
|
||||||
c.OpConfig.ScalyrImage,
|
c.OpConfig.ScalyrImage,
|
||||||
&resourceRequirementsScalyrSidecar, c.logger); scalyrSidecar != nil {
|
c.OpConfig.ScalyrCPURequest,
|
||||||
sideCars = append(sideCars, *scalyrSidecar)
|
c.OpConfig.ScalyrMemoryRequest,
|
||||||
|
c.OpConfig.ScalyrCPULimit,
|
||||||
|
c.OpConfig.ScalyrMemoryLimit,
|
||||||
|
defaultResources,
|
||||||
|
c.logger); err != nil {
|
||||||
|
return nil, fmt.Errorf("could not generate Scalyr sidecar: %v", err)
|
||||||
|
} else {
|
||||||
|
if scalyrSidecar != nil {
|
||||||
|
scalyrSidecars = append(scalyrSidecars, *scalyrSidecar)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// generate sidecar containers
|
sidecarContainers, conflicts := mergeContainers(clusterSpecificSidecars, c.Config.OpConfig.SidecarContainers, globalSidecarContainersByDockerImage, scalyrSidecars)
|
||||||
if sideCars != nil && len(sideCars) > 0 {
|
for containerName := range conflicts {
|
||||||
if c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) {
|
c.logger.Warningf("a sidecar is specified twice. Ignoring sidecar %q in favor of %q with high a precendence",
|
||||||
c.logger.Warningf("sidecars specified but disabled in configuration - next statefulset creation would fail")
|
containerName, containerName)
|
||||||
}
|
|
||||||
if sidecarContainers, err = generateSidecarContainers(sideCars, volumeMounts, defaultResources,
|
|
||||||
c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger); err != nil {
|
|
||||||
return nil, fmt.Errorf("could not generate sidecar containers: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sidecarContainers = patchSidecarContainers(sidecarContainers, volumeMounts, c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger)
|
||||||
|
|
||||||
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
|
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
|
||||||
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
|
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
|
||||||
|
|
||||||
|
|
@ -1108,8 +1155,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
c.OpConfig.PodAntiAffinityTopologyKey,
|
c.OpConfig.PodAntiAffinityTopologyKey,
|
||||||
c.OpConfig.AdditionalSecretMount,
|
c.OpConfig.AdditionalSecretMount,
|
||||||
c.OpConfig.AdditionalSecretMountPath,
|
c.OpConfig.AdditionalSecretMountPath,
|
||||||
volumes,
|
additionalVolumes)
|
||||||
spec.AdditionalVolumes)
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not generate pod template: %v", err)
|
return nil, fmt.Errorf("could not generate pod template: %v", err)
|
||||||
|
|
@ -1176,17 +1222,25 @@ func (c *Cluster) generatePodAnnotations(spec *acidv1.PostgresSpec) map[string]s
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage string,
|
func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage string,
|
||||||
containerResources *acidv1.Resources, logger *logrus.Entry) *acidv1.Sidecar {
|
scalyrCPURequest string, scalyrMemoryRequest string, scalyrCPULimit string, scalyrMemoryLimit string,
|
||||||
|
defaultResources acidv1.Resources, logger *logrus.Entry) (*v1.Container, error) {
|
||||||
if APIKey == "" || dockerImage == "" {
|
if APIKey == "" || dockerImage == "" {
|
||||||
if APIKey == "" && dockerImage != "" {
|
if APIKey == "" && dockerImage != "" {
|
||||||
logger.Warning("Not running Scalyr sidecar: SCALYR_API_KEY must be defined")
|
logger.Warning("Not running Scalyr sidecar: SCALYR_API_KEY must be defined")
|
||||||
}
|
}
|
||||||
return nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
scalarSpec := &acidv1.Sidecar{
|
resourcesScalyrSidecar := makeResources(
|
||||||
Name: "scalyr-sidecar",
|
scalyrCPURequest,
|
||||||
DockerImage: dockerImage,
|
scalyrMemoryRequest,
|
||||||
Env: []v1.EnvVar{
|
scalyrCPULimit,
|
||||||
|
scalyrMemoryLimit,
|
||||||
|
)
|
||||||
|
resourceRequirementsScalyrSidecar, err := generateResourceRequirements(resourcesScalyrSidecar, defaultResources)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid resources for Scalyr sidecar: %v", err)
|
||||||
|
}
|
||||||
|
env := []v1.EnvVar{
|
||||||
{
|
{
|
||||||
Name: "SCALYR_API_KEY",
|
Name: "SCALYR_API_KEY",
|
||||||
Value: APIKey,
|
Value: APIKey,
|
||||||
|
|
@ -1195,38 +1249,17 @@ func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage strin
|
||||||
Name: "SCALYR_SERVER_HOST",
|
Name: "SCALYR_SERVER_HOST",
|
||||||
Value: clusterName,
|
Value: clusterName,
|
||||||
},
|
},
|
||||||
},
|
|
||||||
Resources: *containerResources,
|
|
||||||
}
|
}
|
||||||
if serverURL != "" {
|
if serverURL != "" {
|
||||||
scalarSpec.Env = append(scalarSpec.Env, v1.EnvVar{Name: "SCALYR_SERVER_URL", Value: serverURL})
|
env = append(env, v1.EnvVar{Name: "SCALYR_SERVER_URL", Value: serverURL})
|
||||||
}
|
}
|
||||||
return scalarSpec
|
return &v1.Container{
|
||||||
}
|
Name: "scalyr-sidecar",
|
||||||
|
Image: dockerImage,
|
||||||
// mergeSidecar merges globally-defined sidecars with those defined in the cluster manifest
|
Env: env,
|
||||||
func (c *Cluster) mergeSidecars(sidecars []acidv1.Sidecar) []acidv1.Sidecar {
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
globalSidecarsToSkip := map[string]bool{}
|
Resources: *resourceRequirementsScalyrSidecar,
|
||||||
result := make([]acidv1.Sidecar, 0)
|
}, nil
|
||||||
|
|
||||||
for i, sidecar := range sidecars {
|
|
||||||
dockerImage, ok := c.OpConfig.Sidecars[sidecar.Name]
|
|
||||||
if ok {
|
|
||||||
if dockerImage != sidecar.DockerImage {
|
|
||||||
c.logger.Warningf("merging definitions for sidecar %q: "+
|
|
||||||
"ignoring %q in the global scope in favor of %q defined in the cluster",
|
|
||||||
sidecar.Name, dockerImage, sidecar.DockerImage)
|
|
||||||
}
|
|
||||||
globalSidecarsToSkip[sidecar.Name] = true
|
|
||||||
}
|
|
||||||
result = append(result, sidecars[i])
|
|
||||||
}
|
|
||||||
for name, dockerImage := range c.OpConfig.Sidecars {
|
|
||||||
if !globalSidecarsToSkip[name] {
|
|
||||||
result = append(result, acidv1.Sidecar{Name: name, DockerImage: dockerImage})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
|
func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
|
||||||
|
|
@ -1437,6 +1470,13 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//skip NOLOGIN users
|
||||||
|
for _, flag := range pgUser.Flags {
|
||||||
|
if flag == constants.RoleFlagNoLogin {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
username := pgUser.Name
|
username := pgUser.Name
|
||||||
secret := v1.Secret{
|
secret := v1.Secret{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
|
@ -1614,11 +1654,11 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription)
|
||||||
c.logger.Info(msg, description.S3WalPath)
|
c.logger.Info(msg, description.S3WalPath)
|
||||||
|
|
||||||
envs := []v1.EnvVar{
|
envs := []v1.EnvVar{
|
||||||
v1.EnvVar{
|
{
|
||||||
Name: "CLONE_WAL_S3_BUCKET",
|
Name: "CLONE_WAL_S3_BUCKET",
|
||||||
Value: c.OpConfig.WALES3Bucket,
|
Value: c.OpConfig.WALES3Bucket,
|
||||||
},
|
},
|
||||||
v1.EnvVar{
|
{
|
||||||
Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX",
|
Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX",
|
||||||
Value: getBucketScopeSuffix(description.UID),
|
Value: getBucketScopeSuffix(description.UID),
|
||||||
},
|
},
|
||||||
|
|
@ -1790,7 +1830,6 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
||||||
"",
|
"",
|
||||||
c.OpConfig.AdditionalSecretMount,
|
c.OpConfig.AdditionalSecretMount,
|
||||||
c.OpConfig.AdditionalSecretMountPath,
|
c.OpConfig.AdditionalSecretMountPath,
|
||||||
nil,
|
|
||||||
[]acidv1.AdditionalVolume{}); err != nil {
|
[]acidv1.AdditionalVolume{}); err != nil {
|
||||||
return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err)
|
return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ import (
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
)
|
)
|
||||||
|
|
@ -37,7 +38,7 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
|
||||||
ReplicationUsername: replicationUserName,
|
ReplicationUsername: replicationUserName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
testName := "TestGenerateSpiloConfig"
|
testName := "TestGenerateSpiloConfig"
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
|
@ -102,7 +103,7 @@ func TestCreateLoadBalancerLogic(t *testing.T) {
|
||||||
ReplicationUsername: replicationUserName,
|
ReplicationUsername: replicationUserName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
testName := "TestCreateLoadBalancerLogic"
|
testName := "TestCreateLoadBalancerLogic"
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
|
@ -164,7 +165,8 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
|
||||||
acidv1.Postgresql{
|
acidv1.Postgresql{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
||||||
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}},
|
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}},
|
||||||
logger),
|
logger,
|
||||||
|
eventRecorder),
|
||||||
policyv1beta1.PodDisruptionBudget{
|
policyv1beta1.PodDisruptionBudget{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "postgres-myapp-database-pdb",
|
Name: "postgres-myapp-database-pdb",
|
||||||
|
|
@ -187,7 +189,8 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
|
||||||
acidv1.Postgresql{
|
acidv1.Postgresql{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
||||||
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 0}},
|
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 0}},
|
||||||
logger),
|
logger,
|
||||||
|
eventRecorder),
|
||||||
policyv1beta1.PodDisruptionBudget{
|
policyv1beta1.PodDisruptionBudget{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "postgres-myapp-database-pdb",
|
Name: "postgres-myapp-database-pdb",
|
||||||
|
|
@ -210,7 +213,8 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
|
||||||
acidv1.Postgresql{
|
acidv1.Postgresql{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
||||||
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}},
|
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}},
|
||||||
logger),
|
logger,
|
||||||
|
eventRecorder),
|
||||||
policyv1beta1.PodDisruptionBudget{
|
policyv1beta1.PodDisruptionBudget{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "postgres-myapp-database-pdb",
|
Name: "postgres-myapp-database-pdb",
|
||||||
|
|
@ -233,7 +237,8 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
|
||||||
acidv1.Postgresql{
|
acidv1.Postgresql{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
||||||
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}},
|
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}},
|
||||||
logger),
|
logger,
|
||||||
|
eventRecorder),
|
||||||
policyv1beta1.PodDisruptionBudget{
|
policyv1beta1.PodDisruptionBudget{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "postgres-myapp-database-databass-budget",
|
Name: "postgres-myapp-database-databass-budget",
|
||||||
|
|
@ -368,7 +373,7 @@ func TestCloneEnv(t *testing.T) {
|
||||||
ReplicationUsername: replicationUserName,
|
ReplicationUsername: replicationUserName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
envs := cluster.generateCloneEnvironment(tt.cloneOpts)
|
envs := cluster.generateCloneEnvironment(tt.cloneOpts)
|
||||||
|
|
@ -502,7 +507,7 @@ func TestGetPgVersion(t *testing.T) {
|
||||||
ReplicationUsername: replicationUserName,
|
ReplicationUsername: replicationUserName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
pgVersion, err := cluster.getNewPgVersion(tt.pgContainer, tt.newPgVersion)
|
pgVersion, err := cluster.getNewPgVersion(tt.pgContainer, tt.newPgVersion)
|
||||||
|
|
@ -678,7 +683,7 @@ func TestConnectionPoolerPodSpec(t *testing.T) {
|
||||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
var clusterNoDefaultRes = New(
|
var clusterNoDefaultRes = New(
|
||||||
Config{
|
Config{
|
||||||
|
|
@ -690,7 +695,7 @@ func TestConnectionPoolerPodSpec(t *testing.T) {
|
||||||
},
|
},
|
||||||
ConnectionPooler: config.ConnectionPooler{},
|
ConnectionPooler: config.ConnectionPooler{},
|
||||||
},
|
},
|
||||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { return nil }
|
noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { return nil }
|
||||||
|
|
||||||
|
|
@ -803,7 +808,7 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) {
|
||||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
cluster.Statefulset = &appsv1.StatefulSet{
|
cluster.Statefulset = &appsv1.StatefulSet{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "test-sts",
|
Name: "test-sts",
|
||||||
|
|
@ -904,7 +909,7 @@ func TestConnectionPoolerServiceSpec(t *testing.T) {
|
||||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
cluster.Statefulset = &appsv1.StatefulSet{
|
cluster.Statefulset = &appsv1.StatefulSet{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "test-sts",
|
Name: "test-sts",
|
||||||
|
|
@ -961,6 +966,7 @@ func TestTLS(t *testing.T) {
|
||||||
var spec acidv1.PostgresSpec
|
var spec acidv1.PostgresSpec
|
||||||
var cluster *Cluster
|
var cluster *Cluster
|
||||||
var spiloFSGroup = int64(103)
|
var spiloFSGroup = int64(103)
|
||||||
|
var additionalVolumes = spec.AdditionalVolumes
|
||||||
|
|
||||||
makeSpec := func(tls acidv1.TLSDescription) acidv1.PostgresSpec {
|
makeSpec := func(tls acidv1.TLSDescription) acidv1.PostgresSpec {
|
||||||
return acidv1.PostgresSpec{
|
return acidv1.PostgresSpec{
|
||||||
|
|
@ -989,7 +995,7 @@ func TestTLS(t *testing.T) {
|
||||||
SpiloFSGroup: &spiloFSGroup,
|
SpiloFSGroup: &spiloFSGroup,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
spec = makeSpec(acidv1.TLSDescription{SecretName: "my-secret", CAFile: "ca.crt"})
|
spec = makeSpec(acidv1.TLSDescription{SecretName: "my-secret", CAFile: "ca.crt"})
|
||||||
s, err := cluster.generateStatefulSet(&spec)
|
s, err := cluster.generateStatefulSet(&spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -1000,8 +1006,20 @@ func TestTLS(t *testing.T) {
|
||||||
assert.Equal(t, &fsGroup, s.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned")
|
assert.Equal(t, &fsGroup, s.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned")
|
||||||
|
|
||||||
defaultMode := int32(0640)
|
defaultMode := int32(0640)
|
||||||
|
mountPath := "/tls"
|
||||||
|
additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{
|
||||||
|
Name: spec.TLS.SecretName,
|
||||||
|
MountPath: mountPath,
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
Secret: &v1.SecretVolumeSource{
|
||||||
|
SecretName: spec.TLS.SecretName,
|
||||||
|
DefaultMode: &defaultMode,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
volume := v1.Volume{
|
volume := v1.Volume{
|
||||||
Name: "tls-secret",
|
Name: "my-secret",
|
||||||
VolumeSource: v1.VolumeSource{
|
VolumeSource: v1.VolumeSource{
|
||||||
Secret: &v1.SecretVolumeSource{
|
Secret: &v1.SecretVolumeSource{
|
||||||
SecretName: "my-secret",
|
SecretName: "my-secret",
|
||||||
|
|
@ -1013,8 +1031,7 @@ func TestTLS(t *testing.T) {
|
||||||
|
|
||||||
assert.Contains(t, s.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
|
assert.Contains(t, s.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
|
||||||
MountPath: "/tls",
|
MountPath: "/tls",
|
||||||
Name: "tls-secret",
|
Name: "my-secret",
|
||||||
ReadOnly: true,
|
|
||||||
}, "the volume gets mounted in /tls")
|
}, "the volume gets mounted in /tls")
|
||||||
|
|
||||||
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
|
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
|
||||||
|
|
@ -1100,7 +1117,7 @@ func TestAdditionalVolume(t *testing.T) {
|
||||||
ReplicationUsername: replicationUserName,
|
ReplicationUsername: replicationUserName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
// Test with additional volume mounted in all containers
|
// Test with additional volume mounted in all containers
|
||||||
|
|
@ -1190,3 +1207,201 @@ func TestAdditionalVolume(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// inject sidecars through all available mechanisms and check the resulting container specs
|
||||||
|
func TestSidecars(t *testing.T) {
|
||||||
|
var err error
|
||||||
|
var spec acidv1.PostgresSpec
|
||||||
|
var cluster *Cluster
|
||||||
|
|
||||||
|
generateKubernetesResources := func(cpuRequest string, cpuLimit string, memoryRequest string, memoryLimit string) v1.ResourceRequirements {
|
||||||
|
parsedCPURequest, err := resource.ParseQuantity(cpuRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
parsedCPULimit, err := resource.ParseQuantity(cpuLimit)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
parsedMemoryRequest, err := resource.ParseQuantity(memoryRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
parsedMemoryLimit, err := resource.ParseQuantity(memoryLimit)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
return v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: parsedCPURequest,
|
||||||
|
v1.ResourceMemory: parsedMemoryRequest,
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: parsedCPULimit,
|
||||||
|
v1.ResourceMemory: parsedMemoryLimit,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
spec = acidv1.PostgresSpec{
|
||||||
|
TeamID: "myapp", NumberOfInstances: 1,
|
||||||
|
Resources: acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||||
|
},
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1G",
|
||||||
|
},
|
||||||
|
Sidecars: []acidv1.Sidecar{
|
||||||
|
acidv1.Sidecar{
|
||||||
|
Name: "cluster-specific-sidecar",
|
||||||
|
},
|
||||||
|
acidv1.Sidecar{
|
||||||
|
Name: "cluster-specific-sidecar-with-resources",
|
||||||
|
Resources: acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: "210m", Memory: "0.8Gi"},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{CPU: "510m", Memory: "1.4Gi"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
acidv1.Sidecar{
|
||||||
|
Name: "replace-sidecar",
|
||||||
|
DockerImage: "overwrite-image",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster = New(
|
||||||
|
Config{
|
||||||
|
OpConfig: config.Config{
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
ProtectedRoles: []string{"admin"},
|
||||||
|
Auth: config.Auth{
|
||||||
|
SuperUsername: superUserName,
|
||||||
|
ReplicationUsername: replicationUserName,
|
||||||
|
},
|
||||||
|
Resources: config.Resources{
|
||||||
|
DefaultCPURequest: "200m",
|
||||||
|
DefaultCPULimit: "500m",
|
||||||
|
DefaultMemoryRequest: "0.7Gi",
|
||||||
|
DefaultMemoryLimit: "1.3Gi",
|
||||||
|
},
|
||||||
|
SidecarImages: map[string]string{
|
||||||
|
"deprecated-global-sidecar": "image:123",
|
||||||
|
},
|
||||||
|
SidecarContainers: []v1.Container{
|
||||||
|
v1.Container{
|
||||||
|
Name: "global-sidecar",
|
||||||
|
},
|
||||||
|
// will be replaced by a cluster specific sidecar with the same name
|
||||||
|
v1.Container{
|
||||||
|
Name: "replace-sidecar",
|
||||||
|
Image: "replaced-image",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Scalyr: config.Scalyr{
|
||||||
|
ScalyrAPIKey: "abc",
|
||||||
|
ScalyrImage: "scalyr-image",
|
||||||
|
ScalyrCPURequest: "220m",
|
||||||
|
ScalyrCPULimit: "520m",
|
||||||
|
ScalyrMemoryRequest: "0.9Gi",
|
||||||
|
// ise default memory limit
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
|
s, err := cluster.generateStatefulSet(&spec)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
env := []v1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "POD_NAME",
|
||||||
|
ValueFrom: &v1.EnvVarSource{
|
||||||
|
FieldRef: &v1.ObjectFieldSelector{
|
||||||
|
APIVersion: "v1",
|
||||||
|
FieldPath: "metadata.name",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POD_NAMESPACE",
|
||||||
|
ValueFrom: &v1.EnvVarSource{
|
||||||
|
FieldRef: &v1.ObjectFieldSelector{
|
||||||
|
APIVersion: "v1",
|
||||||
|
FieldPath: "metadata.namespace",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POSTGRES_USER",
|
||||||
|
Value: superUserName,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POSTGRES_PASSWORD",
|
||||||
|
ValueFrom: &v1.EnvVarSource{
|
||||||
|
SecretKeyRef: &v1.SecretKeySelector{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: "",
|
||||||
|
},
|
||||||
|
Key: "password",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
mounts := []v1.VolumeMount{
|
||||||
|
v1.VolumeMount{
|
||||||
|
Name: "pgdata",
|
||||||
|
MountPath: "/home/postgres/pgdata",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// deduplicated sidecars and Patroni
|
||||||
|
assert.Equal(t, 7, len(s.Spec.Template.Spec.Containers), "wrong number of containers")
|
||||||
|
|
||||||
|
// cluster specific sidecar
|
||||||
|
assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{
|
||||||
|
Name: "cluster-specific-sidecar",
|
||||||
|
Env: env,
|
||||||
|
Resources: generateKubernetesResources("200m", "500m", "0.7Gi", "1.3Gi"),
|
||||||
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
|
VolumeMounts: mounts,
|
||||||
|
})
|
||||||
|
|
||||||
|
// container specific resources
|
||||||
|
expectedResources := generateKubernetesResources("210m", "510m", "0.8Gi", "1.4Gi")
|
||||||
|
assert.Equal(t, expectedResources.Requests[v1.ResourceCPU], s.Spec.Template.Spec.Containers[2].Resources.Requests[v1.ResourceCPU])
|
||||||
|
assert.Equal(t, expectedResources.Limits[v1.ResourceCPU], s.Spec.Template.Spec.Containers[2].Resources.Limits[v1.ResourceCPU])
|
||||||
|
assert.Equal(t, expectedResources.Requests[v1.ResourceMemory], s.Spec.Template.Spec.Containers[2].Resources.Requests[v1.ResourceMemory])
|
||||||
|
assert.Equal(t, expectedResources.Limits[v1.ResourceMemory], s.Spec.Template.Spec.Containers[2].Resources.Limits[v1.ResourceMemory])
|
||||||
|
|
||||||
|
// deprecated global sidecar
|
||||||
|
assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{
|
||||||
|
Name: "deprecated-global-sidecar",
|
||||||
|
Image: "image:123",
|
||||||
|
Env: env,
|
||||||
|
Resources: generateKubernetesResources("200m", "500m", "0.7Gi", "1.3Gi"),
|
||||||
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
|
VolumeMounts: mounts,
|
||||||
|
})
|
||||||
|
|
||||||
|
// global sidecar
|
||||||
|
assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{
|
||||||
|
Name: "global-sidecar",
|
||||||
|
Env: env,
|
||||||
|
VolumeMounts: mounts,
|
||||||
|
})
|
||||||
|
|
||||||
|
// replaced sidecar
|
||||||
|
assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{
|
||||||
|
Name: "replace-sidecar",
|
||||||
|
Image: "overwrite-image",
|
||||||
|
Resources: generateKubernetesResources("200m", "500m", "0.7Gi", "1.3Gi"),
|
||||||
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
|
Env: env,
|
||||||
|
VolumeMounts: mounts,
|
||||||
|
})
|
||||||
|
|
||||||
|
// replaced sidecar
|
||||||
|
// the order in env is important
|
||||||
|
scalyrEnv := append([]v1.EnvVar{v1.EnvVar{Name: "SCALYR_API_KEY", Value: "abc"}, v1.EnvVar{Name: "SCALYR_SERVER_HOST", Value: ""}}, env...)
|
||||||
|
assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{
|
||||||
|
Name: "scalyr-sidecar",
|
||||||
|
Image: "scalyr-image",
|
||||||
|
Resources: generateKubernetesResources("220m", "520m", "0.9Gi", "1.3Gi"),
|
||||||
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
|
Env: scalyrEnv,
|
||||||
|
VolumeMounts: mounts,
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -294,6 +294,27 @@ func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) {
|
||||||
return pod, nil
|
return pod, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) isSafeToRecreatePods(pods *v1.PodList) bool {
|
||||||
|
|
||||||
|
/*
|
||||||
|
Operator should not re-create pods if there is at least one replica being bootstrapped
|
||||||
|
because Patroni might use other replicas to take basebackup from (see Patroni's "clonefrom" tag).
|
||||||
|
|
||||||
|
XXX operator cannot forbid replica re-init, so we might still fail if re-init is started
|
||||||
|
after this check succeeds but before a pod is re-created
|
||||||
|
*/
|
||||||
|
|
||||||
|
for _, pod := range pods.Items {
|
||||||
|
state, err := c.patroni.GetPatroniMemberState(&pod)
|
||||||
|
if err != nil || state == "creating replica" {
|
||||||
|
c.logger.Warningf("cannot re-create replica %s: it is currently being initialized", pod.Name)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cluster) recreatePods() error {
|
func (c *Cluster) recreatePods() error {
|
||||||
c.setProcessName("starting to recreate pods")
|
c.setProcessName("starting to recreate pods")
|
||||||
ls := c.labelsSet(false)
|
ls := c.labelsSet(false)
|
||||||
|
|
@ -309,6 +330,10 @@ func (c *Cluster) recreatePods() error {
|
||||||
}
|
}
|
||||||
c.logger.Infof("there are %d pods in the cluster to recreate", len(pods.Items))
|
c.logger.Infof("there are %d pods in the cluster to recreate", len(pods.Items))
|
||||||
|
|
||||||
|
if !c.isSafeToRecreatePods(pods) {
|
||||||
|
return fmt.Errorf("postpone pod recreation until next Sync: recreation is unsafe because pods are being initilalized")
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
masterPod, newMasterPod, newPod *v1.Pod
|
masterPod, newMasterPod, newPod *v1.Pod
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,7 @@ func TestConnectionPoolerCreationAndDeletion(t *testing.T) {
|
||||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger)
|
}, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
cluster.Statefulset = &appsv1.StatefulSet{
|
cluster.Statefulset = &appsv1.StatefulSet{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
|
@ -85,7 +85,7 @@ func TestNeedConnectionPooler(t *testing.T) {
|
||||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger)
|
}, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
cluster.Spec = acidv1.PostgresSpec{
|
cluster.Spec = acidv1.PostgresSpec{
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
|
|
|
||||||
|
|
@ -3,11 +3,7 @@ package cluster
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
|
|
@ -15,6 +11,11 @@ import (
|
||||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/volumes"
|
"github.com/zalando/postgres-operator/pkg/util/volumes"
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Sync syncs the cluster, making sure the actual Kubernetes objects correspond to what is defined in the manifest.
|
// Sync syncs the cluster, making sure the actual Kubernetes objects correspond to what is defined in the manifest.
|
||||||
|
|
@ -108,6 +109,11 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
||||||
err = fmt.Errorf("could not sync databases: %v", err)
|
err = fmt.Errorf("could not sync databases: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
c.logger.Debugf("syncing prepared databases with schemas")
|
||||||
|
if err = c.syncPreparedDatabases(); err != nil {
|
||||||
|
err = fmt.Errorf("could not sync prepared database: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove PVCs of shut down pods
|
// remove PVCs of shut down pods
|
||||||
|
|
@ -117,7 +123,7 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// sync connection pooler
|
// sync connection pooler
|
||||||
if err = c.syncConnectionPooler(&oldSpec, newSpec, c.installLookupFunction); err != nil {
|
if _, err = c.syncConnectionPooler(&oldSpec, newSpec, c.installLookupFunction); err != nil {
|
||||||
return fmt.Errorf("could not sync connection pooler: %v", err)
|
return fmt.Errorf("could not sync connection pooler: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -128,10 +134,11 @@ func (c *Cluster) syncServices() error {
|
||||||
for _, role := range []PostgresRole{Master, Replica} {
|
for _, role := range []PostgresRole{Master, Replica} {
|
||||||
c.logger.Debugf("syncing %s service", role)
|
c.logger.Debugf("syncing %s service", role)
|
||||||
|
|
||||||
|
if !c.patroniKubernetesUseConfigMaps() {
|
||||||
if err := c.syncEndpoint(role); err != nil {
|
if err := c.syncEndpoint(role); err != nil {
|
||||||
return fmt.Errorf("could not sync %s endpoint: %v", role, err)
|
return fmt.Errorf("could not sync %s endpoint: %v", role, err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if err := c.syncService(role); err != nil {
|
if err := c.syncService(role); err != nil {
|
||||||
return fmt.Errorf("could not sync %s service: %v", role, err)
|
return fmt.Errorf("could not sync %s service: %v", role, err)
|
||||||
}
|
}
|
||||||
|
|
@ -257,6 +264,28 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) mustUpdatePodsAfterLazyUpdate(desiredSset *appsv1.StatefulSet) (bool, error) {
|
||||||
|
|
||||||
|
pods, err := c.listPods()
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("could not list pods of the statefulset: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pod := range pods {
|
||||||
|
|
||||||
|
effectivePodImage := pod.Spec.Containers[0].Image
|
||||||
|
ssImage := desiredSset.Spec.Template.Spec.Containers[0].Image
|
||||||
|
|
||||||
|
if ssImage != effectivePodImage {
|
||||||
|
c.logger.Infof("not all pods were re-started when the lazy upgrade was enabled; forcing the rolling upgrade now")
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cluster) syncStatefulSet() error {
|
func (c *Cluster) syncStatefulSet() error {
|
||||||
var (
|
var (
|
||||||
podsRollingUpdateRequired bool
|
podsRollingUpdateRequired bool
|
||||||
|
|
@ -335,6 +364,19 @@ func (c *Cluster) syncStatefulSet() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !podsRollingUpdateRequired && !c.OpConfig.EnableLazySpiloUpgrade {
|
||||||
|
// even if desired and actual statefulsets match
|
||||||
|
// there still may be not up-to-date pods on condition
|
||||||
|
// (a) the lazy update was just disabled
|
||||||
|
// and
|
||||||
|
// (b) some of the pods were not restarted when the lazy update was still in place
|
||||||
|
podsRollingUpdateRequired, err = c.mustUpdatePodsAfterLazyUpdate(desiredSS)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not list pods of the statefulset: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply special PostgreSQL parameters that can only be set via the Patroni API.
|
// Apply special PostgreSQL parameters that can only be set via the Patroni API.
|
||||||
|
|
@ -348,10 +390,12 @@ func (c *Cluster) syncStatefulSet() error {
|
||||||
// statefulset or those that got their configuration from the outdated statefulset)
|
// statefulset or those that got their configuration from the outdated statefulset)
|
||||||
if podsRollingUpdateRequired {
|
if podsRollingUpdateRequired {
|
||||||
c.logger.Debugln("performing rolling update")
|
c.logger.Debugln("performing rolling update")
|
||||||
|
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Performing rolling update")
|
||||||
if err := c.recreatePods(); err != nil {
|
if err := c.recreatePods(); err != nil {
|
||||||
return fmt.Errorf("could not recreate pods: %v", err)
|
return fmt.Errorf("could not recreate pods: %v", err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("pods have been recreated")
|
c.logger.Infof("pods have been recreated")
|
||||||
|
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Rolling update done - pods have been recreated")
|
||||||
if err := c.applyRollingUpdateFlagforStatefulSet(false); err != nil {
|
if err := c.applyRollingUpdateFlagforStatefulSet(false); err != nil {
|
||||||
c.logger.Warningf("could not clear rolling update for the statefulset: %v", err)
|
c.logger.Warningf("could not clear rolling update for the statefulset: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -531,6 +575,7 @@ func (c *Cluster) syncDatabases() error {
|
||||||
|
|
||||||
createDatabases := make(map[string]string)
|
createDatabases := make(map[string]string)
|
||||||
alterOwnerDatabases := make(map[string]string)
|
alterOwnerDatabases := make(map[string]string)
|
||||||
|
preparedDatabases := make([]string, 0)
|
||||||
|
|
||||||
if err := c.initDbConn(); err != nil {
|
if err := c.initDbConn(); err != nil {
|
||||||
return fmt.Errorf("could not init database connection")
|
return fmt.Errorf("could not init database connection")
|
||||||
|
|
@ -546,12 +591,24 @@ func (c *Cluster) syncDatabases() error {
|
||||||
return fmt.Errorf("could not get current databases: %v", err)
|
return fmt.Errorf("could not get current databases: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for datname, newOwner := range c.Spec.Databases {
|
// if no prepared databases are specified create a database named like the cluster
|
||||||
currentOwner, exists := currentDatabases[datname]
|
if c.Spec.PreparedDatabases != nil && len(c.Spec.PreparedDatabases) == 0 { // TODO: add option to disable creating such a default DB
|
||||||
|
c.Spec.PreparedDatabases = map[string]acidv1.PreparedDatabase{strings.Replace(c.Name, "-", "_", -1): {}}
|
||||||
|
}
|
||||||
|
for preparedDatabaseName := range c.Spec.PreparedDatabases {
|
||||||
|
_, exists := currentDatabases[preparedDatabaseName]
|
||||||
if !exists {
|
if !exists {
|
||||||
createDatabases[datname] = newOwner
|
createDatabases[preparedDatabaseName] = preparedDatabaseName + constants.OwnerRoleNameSuffix
|
||||||
|
preparedDatabases = append(preparedDatabases, preparedDatabaseName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for databaseName, newOwner := range c.Spec.Databases {
|
||||||
|
currentOwner, exists := currentDatabases[databaseName]
|
||||||
|
if !exists {
|
||||||
|
createDatabases[databaseName] = newOwner
|
||||||
} else if currentOwner != newOwner {
|
} else if currentOwner != newOwner {
|
||||||
alterOwnerDatabases[datname] = newOwner
|
alterOwnerDatabases[databaseName] = newOwner
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -559,13 +616,116 @@ func (c *Cluster) syncDatabases() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for datname, owner := range createDatabases {
|
for databaseName, owner := range createDatabases {
|
||||||
if err = c.executeCreateDatabase(datname, owner); err != nil {
|
if err = c.executeCreateDatabase(databaseName, owner); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for datname, owner := range alterOwnerDatabases {
|
for databaseName, owner := range alterOwnerDatabases {
|
||||||
if err = c.executeAlterDatabaseOwner(datname, owner); err != nil {
|
if err = c.executeAlterDatabaseOwner(databaseName, owner); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// set default privileges for prepared database
|
||||||
|
for _, preparedDatabase := range preparedDatabases {
|
||||||
|
if err = c.execAlterGlobalDefaultPrivileges(preparedDatabase+constants.OwnerRoleNameSuffix, preparedDatabase); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) syncPreparedDatabases() error {
|
||||||
|
c.setProcessName("syncing prepared databases")
|
||||||
|
for preparedDbName, preparedDB := range c.Spec.PreparedDatabases {
|
||||||
|
if err := c.initDbConnWithName(preparedDbName); err != nil {
|
||||||
|
return fmt.Errorf("could not init connection to database %s: %v", preparedDbName, err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := c.closeDbConn(); err != nil {
|
||||||
|
c.logger.Errorf("could not close database connection: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// now, prepare defined schemas
|
||||||
|
preparedSchemas := preparedDB.PreparedSchemas
|
||||||
|
if len(preparedDB.PreparedSchemas) == 0 {
|
||||||
|
preparedSchemas = map[string]acidv1.PreparedSchema{"data": {DefaultRoles: util.True()}}
|
||||||
|
}
|
||||||
|
if err := c.syncPreparedSchemas(preparedDbName, preparedSchemas); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// install extensions
|
||||||
|
if err := c.syncExtensions(preparedDB.Extensions); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) syncPreparedSchemas(databaseName string, preparedSchemas map[string]acidv1.PreparedSchema) error {
|
||||||
|
c.setProcessName("syncing prepared schemas")
|
||||||
|
|
||||||
|
currentSchemas, err := c.getSchemas()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not get current schemas: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var schemas []string
|
||||||
|
|
||||||
|
for schema := range preparedSchemas {
|
||||||
|
schemas = append(schemas, schema)
|
||||||
|
}
|
||||||
|
|
||||||
|
if createPreparedSchemas, equal := util.SubstractStringSlices(schemas, currentSchemas); !equal {
|
||||||
|
for _, schemaName := range createPreparedSchemas {
|
||||||
|
owner := constants.OwnerRoleNameSuffix
|
||||||
|
dbOwner := databaseName + owner
|
||||||
|
if preparedSchemas[schemaName].DefaultRoles == nil || *preparedSchemas[schemaName].DefaultRoles {
|
||||||
|
owner = databaseName + "_" + schemaName + owner
|
||||||
|
} else {
|
||||||
|
owner = dbOwner
|
||||||
|
}
|
||||||
|
if err = c.executeCreateDatabaseSchema(databaseName, schemaName, dbOwner, owner); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) syncExtensions(extensions map[string]string) error {
|
||||||
|
c.setProcessName("syncing database extensions")
|
||||||
|
|
||||||
|
createExtensions := make(map[string]string)
|
||||||
|
alterExtensions := make(map[string]string)
|
||||||
|
|
||||||
|
currentExtensions, err := c.getExtensions()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not get current database extensions: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for extName, newSchema := range extensions {
|
||||||
|
currentSchema, exists := currentExtensions[extName]
|
||||||
|
if !exists {
|
||||||
|
createExtensions[extName] = newSchema
|
||||||
|
} else if currentSchema != newSchema {
|
||||||
|
alterExtensions[extName] = newSchema
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for extName, schema := range createExtensions {
|
||||||
|
if err = c.executeCreateExtension(extName, schema); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for extName, schema := range alterExtensions {
|
||||||
|
if err = c.executeAlterExtension(extName, schema); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -626,7 +786,13 @@ func (c *Cluster) syncLogicalBackupJob() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, lookup InstallFunction) error {
|
func (c *Cluster) syncConnectionPooler(oldSpec,
|
||||||
|
newSpec *acidv1.Postgresql,
|
||||||
|
lookup InstallFunction) (SyncReason, error) {
|
||||||
|
|
||||||
|
var reason SyncReason
|
||||||
|
var err error
|
||||||
|
|
||||||
if c.ConnectionPooler == nil {
|
if c.ConnectionPooler == nil {
|
||||||
c.ConnectionPooler = &ConnectionPoolerObjects{}
|
c.ConnectionPooler = &ConnectionPoolerObjects{}
|
||||||
}
|
}
|
||||||
|
|
@ -663,20 +829,20 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, look
|
||||||
specUser,
|
specUser,
|
||||||
c.OpConfig.ConnectionPooler.User)
|
c.OpConfig.ConnectionPooler.User)
|
||||||
|
|
||||||
if err := lookup(schema, user); err != nil {
|
if err = lookup(schema, user); err != nil {
|
||||||
return err
|
return NoSync, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.syncConnectionPoolerWorker(oldSpec, newSpec); err != nil {
|
if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec); err != nil {
|
||||||
c.logger.Errorf("could not sync connection pooler: %v", err)
|
c.logger.Errorf("could not sync connection pooler: %v", err)
|
||||||
return err
|
return reason, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if oldNeedConnectionPooler && !newNeedConnectionPooler {
|
if oldNeedConnectionPooler && !newNeedConnectionPooler {
|
||||||
// delete and cleanup resources
|
// delete and cleanup resources
|
||||||
if err := c.deleteConnectionPooler(); err != nil {
|
if err = c.deleteConnectionPooler(); err != nil {
|
||||||
c.logger.Warningf("could not remove connection pooler: %v", err)
|
c.logger.Warningf("could not remove connection pooler: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -687,20 +853,22 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, look
|
||||||
(c.ConnectionPooler.Deployment != nil ||
|
(c.ConnectionPooler.Deployment != nil ||
|
||||||
c.ConnectionPooler.Service != nil) {
|
c.ConnectionPooler.Service != nil) {
|
||||||
|
|
||||||
if err := c.deleteConnectionPooler(); err != nil {
|
if err = c.deleteConnectionPooler(); err != nil {
|
||||||
c.logger.Warningf("could not remove connection pooler: %v", err)
|
c.logger.Warningf("could not remove connection pooler: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return reason, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Synchronize connection pooler resources. Effectively we're interested only in
|
// Synchronize connection pooler resources. Effectively we're interested only in
|
||||||
// synchronizing the corresponding deployment, but in case of deployment or
|
// synchronizing the corresponding deployment, but in case of deployment or
|
||||||
// service is missing, create it. After checking, also remember an object for
|
// service is missing, create it. After checking, also remember an object for
|
||||||
// the future references.
|
// the future references.
|
||||||
func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql) error {
|
func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql) (
|
||||||
|
SyncReason, error) {
|
||||||
|
|
||||||
deployment, err := c.KubeClient.
|
deployment, err := c.KubeClient.
|
||||||
Deployments(c.Namespace).
|
Deployments(c.Namespace).
|
||||||
Get(context.TODO(), c.connectionPoolerName(), metav1.GetOptions{})
|
Get(context.TODO(), c.connectionPoolerName(), metav1.GetOptions{})
|
||||||
|
|
@ -712,7 +880,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
||||||
deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec)
|
deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg = "could not generate deployment for connection pooler: %v"
|
msg = "could not generate deployment for connection pooler: %v"
|
||||||
return fmt.Errorf(msg, err)
|
return NoSync, fmt.Errorf(msg, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
deployment, err := c.KubeClient.
|
deployment, err := c.KubeClient.
|
||||||
|
|
@ -720,18 +888,35 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
||||||
Create(context.TODO(), deploymentSpec, metav1.CreateOptions{})
|
Create(context.TODO(), deploymentSpec, metav1.CreateOptions{})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return NoSync, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.ConnectionPooler.Deployment = deployment
|
c.ConnectionPooler.Deployment = deployment
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return fmt.Errorf("could not get connection pooler deployment to sync: %v", err)
|
msg := "could not get connection pooler deployment to sync: %v"
|
||||||
|
return NoSync, fmt.Errorf(msg, err)
|
||||||
} else {
|
} else {
|
||||||
c.ConnectionPooler.Deployment = deployment
|
c.ConnectionPooler.Deployment = deployment
|
||||||
|
|
||||||
// actual synchronization
|
// actual synchronization
|
||||||
oldConnectionPooler := oldSpec.Spec.ConnectionPooler
|
oldConnectionPooler := oldSpec.Spec.ConnectionPooler
|
||||||
newConnectionPooler := newSpec.Spec.ConnectionPooler
|
newConnectionPooler := newSpec.Spec.ConnectionPooler
|
||||||
|
|
||||||
|
// sync implementation below assumes that both old and new specs are
|
||||||
|
// not nil, but it can happen. To avoid any confusion like updating a
|
||||||
|
// deployment because the specification changed from nil to an empty
|
||||||
|
// struct (that was initialized somewhere before) replace any nil with
|
||||||
|
// an empty spec.
|
||||||
|
if oldConnectionPooler == nil {
|
||||||
|
oldConnectionPooler = &acidv1.ConnectionPooler{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if newConnectionPooler == nil {
|
||||||
|
newConnectionPooler = &acidv1.ConnectionPooler{}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler)
|
||||||
|
|
||||||
specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler)
|
specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler)
|
||||||
defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment)
|
defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment)
|
||||||
reason := append(specReason, defaultsReason...)
|
reason := append(specReason, defaultsReason...)
|
||||||
|
|
@ -742,7 +927,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
||||||
newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec)
|
newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg := "could not generate deployment for connection pooler: %v"
|
msg := "could not generate deployment for connection pooler: %v"
|
||||||
return fmt.Errorf(msg, err)
|
return reason, fmt.Errorf(msg, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
oldDeploymentSpec := c.ConnectionPooler.Deployment
|
oldDeploymentSpec := c.ConnectionPooler.Deployment
|
||||||
|
|
@ -752,11 +937,11 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
||||||
newDeploymentSpec)
|
newDeploymentSpec)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return reason, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.ConnectionPooler.Deployment = deployment
|
c.ConnectionPooler.Deployment = deployment
|
||||||
return nil
|
return reason, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -774,16 +959,17 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
||||||
Create(context.TODO(), serviceSpec, metav1.CreateOptions{})
|
Create(context.TODO(), serviceSpec, metav1.CreateOptions{})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return NoSync, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.ConnectionPooler.Service = service
|
c.ConnectionPooler.Service = service
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return fmt.Errorf("could not get connection pooler service to sync: %v", err)
|
msg := "could not get connection pooler service to sync: %v"
|
||||||
|
return NoSync, fmt.Errorf(msg, err)
|
||||||
} else {
|
} else {
|
||||||
// Service updates are not supported and probably not that useful anyway
|
// Service updates are not supported and probably not that useful anyway
|
||||||
c.ConnectionPooler.Service = service
|
c.ConnectionPooler.Service = service
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return NoSync, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ package cluster
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
|
@ -17,7 +18,7 @@ func int32ToPointer(value int32) *int32 {
|
||||||
return &value
|
return &value
|
||||||
}
|
}
|
||||||
|
|
||||||
func deploymentUpdated(cluster *Cluster, err error) error {
|
func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error {
|
||||||
if cluster.ConnectionPooler.Deployment.Spec.Replicas == nil ||
|
if cluster.ConnectionPooler.Deployment.Spec.Replicas == nil ||
|
||||||
*cluster.ConnectionPooler.Deployment.Spec.Replicas != 2 {
|
*cluster.ConnectionPooler.Deployment.Spec.Replicas != 2 {
|
||||||
return fmt.Errorf("Wrong nubmer of instances")
|
return fmt.Errorf("Wrong nubmer of instances")
|
||||||
|
|
@ -26,7 +27,7 @@ func deploymentUpdated(cluster *Cluster, err error) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func objectsAreSaved(cluster *Cluster, err error) error {
|
func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error {
|
||||||
if cluster.ConnectionPooler == nil {
|
if cluster.ConnectionPooler == nil {
|
||||||
return fmt.Errorf("Connection pooler resources are empty")
|
return fmt.Errorf("Connection pooler resources are empty")
|
||||||
}
|
}
|
||||||
|
|
@ -42,7 +43,7 @@ func objectsAreSaved(cluster *Cluster, err error) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func objectsAreDeleted(cluster *Cluster, err error) error {
|
func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error {
|
||||||
if cluster.ConnectionPooler != nil {
|
if cluster.ConnectionPooler != nil {
|
||||||
return fmt.Errorf("Connection pooler was not deleted")
|
return fmt.Errorf("Connection pooler was not deleted")
|
||||||
}
|
}
|
||||||
|
|
@ -50,6 +51,16 @@ func objectsAreDeleted(cluster *Cluster, err error) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func noEmptySync(cluster *Cluster, err error, reason SyncReason) error {
|
||||||
|
for _, msg := range reason {
|
||||||
|
if strings.HasPrefix(msg, "update [] from '<nil>' to '") {
|
||||||
|
return fmt.Errorf("There is an empty reason, %s", msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func TestConnectionPoolerSynchronization(t *testing.T) {
|
func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
testName := "Test connection pooler synchronization"
|
testName := "Test connection pooler synchronization"
|
||||||
var cluster = New(
|
var cluster = New(
|
||||||
|
|
@ -68,7 +79,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
NumberOfInstances: int32ToPointer(1),
|
NumberOfInstances: int32ToPointer(1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
cluster.Statefulset = &appsv1.StatefulSet{
|
cluster.Statefulset = &appsv1.StatefulSet{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
|
@ -91,15 +102,15 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
|
|
||||||
clusterNewDefaultsMock := *cluster
|
clusterNewDefaultsMock := *cluster
|
||||||
clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
||||||
cluster.OpConfig.ConnectionPooler.Image = "pooler:2.0"
|
|
||||||
cluster.OpConfig.ConnectionPooler.NumberOfInstances = int32ToPointer(2)
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
subTest string
|
subTest string
|
||||||
oldSpec *acidv1.Postgresql
|
oldSpec *acidv1.Postgresql
|
||||||
newSpec *acidv1.Postgresql
|
newSpec *acidv1.Postgresql
|
||||||
cluster *Cluster
|
cluster *Cluster
|
||||||
check func(cluster *Cluster, err error) error
|
defaultImage string
|
||||||
|
defaultInstances int32
|
||||||
|
check func(cluster *Cluster, err error, reason SyncReason) error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
subTest: "create if doesn't exist",
|
subTest: "create if doesn't exist",
|
||||||
|
|
@ -114,6 +125,8 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterMissingObjects,
|
cluster: &clusterMissingObjects,
|
||||||
|
defaultImage: "pooler:1.0",
|
||||||
|
defaultInstances: 1,
|
||||||
check: objectsAreSaved,
|
check: objectsAreSaved,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -127,6 +140,8 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterMissingObjects,
|
cluster: &clusterMissingObjects,
|
||||||
|
defaultImage: "pooler:1.0",
|
||||||
|
defaultInstances: 1,
|
||||||
check: objectsAreSaved,
|
check: objectsAreSaved,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -140,6 +155,8 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterMissingObjects,
|
cluster: &clusterMissingObjects,
|
||||||
|
defaultImage: "pooler:1.0",
|
||||||
|
defaultInstances: 1,
|
||||||
check: objectsAreSaved,
|
check: objectsAreSaved,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -153,6 +170,8 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
Spec: acidv1.PostgresSpec{},
|
Spec: acidv1.PostgresSpec{},
|
||||||
},
|
},
|
||||||
cluster: &clusterMock,
|
cluster: &clusterMock,
|
||||||
|
defaultImage: "pooler:1.0",
|
||||||
|
defaultInstances: 1,
|
||||||
check: objectsAreDeleted,
|
check: objectsAreDeleted,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -164,6 +183,8 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
Spec: acidv1.PostgresSpec{},
|
Spec: acidv1.PostgresSpec{},
|
||||||
},
|
},
|
||||||
cluster: &clusterDirtyMock,
|
cluster: &clusterDirtyMock,
|
||||||
|
defaultImage: "pooler:1.0",
|
||||||
|
defaultInstances: 1,
|
||||||
check: objectsAreDeleted,
|
check: objectsAreDeleted,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -183,6 +204,8 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterMock,
|
cluster: &clusterMock,
|
||||||
|
defaultImage: "pooler:1.0",
|
||||||
|
defaultInstances: 1,
|
||||||
check: deploymentUpdated,
|
check: deploymentUpdated,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -198,13 +221,39 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterNewDefaultsMock,
|
cluster: &clusterNewDefaultsMock,
|
||||||
|
defaultImage: "pooler:2.0",
|
||||||
|
defaultInstances: 2,
|
||||||
check: deploymentUpdated,
|
check: deploymentUpdated,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
subTest: "there is no sync from nil to an empty spec",
|
||||||
|
oldSpec: &acidv1.Postgresql{
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
EnableConnectionPooler: boolToPointer(true),
|
||||||
|
ConnectionPooler: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
newSpec: &acidv1.Postgresql{
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
EnableConnectionPooler: boolToPointer(true),
|
||||||
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
cluster: &clusterMock,
|
||||||
|
defaultImage: "pooler:1.0",
|
||||||
|
defaultInstances: 1,
|
||||||
|
check: noEmptySync,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
err := tt.cluster.syncConnectionPooler(tt.oldSpec, tt.newSpec, mockInstallLookupFunction)
|
tt.cluster.OpConfig.ConnectionPooler.Image = tt.defaultImage
|
||||||
|
tt.cluster.OpConfig.ConnectionPooler.NumberOfInstances =
|
||||||
|
int32ToPointer(tt.defaultInstances)
|
||||||
|
|
||||||
if err := tt.check(tt.cluster, err); err != nil {
|
reason, err := tt.cluster.syncConnectionPooler(tt.oldSpec,
|
||||||
|
tt.newSpec, mockInstallLookupFunction)
|
||||||
|
|
||||||
|
if err := tt.check(tt.cluster, err, reason); err != nil {
|
||||||
t.Errorf("%s [%s]: Could not synchronize, %+v",
|
t.Errorf("%s [%s]: Could not synchronize, %+v",
|
||||||
testName, tt.subTest, err)
|
testName, tt.subTest, err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -73,3 +73,8 @@ type ClusterStatus struct {
|
||||||
type TemplateParams map[string]interface{}
|
type TemplateParams map[string]interface{}
|
||||||
|
|
||||||
type InstallFunction func(schema string, user string) error
|
type InstallFunction func(schema string, user string) error
|
||||||
|
|
||||||
|
type SyncReason []string
|
||||||
|
|
||||||
|
// no sync happened, empty value
|
||||||
|
var NoSync SyncReason = []string{}
|
||||||
|
|
|
||||||
|
|
@ -530,3 +530,22 @@ func (c *Cluster) needConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool {
|
||||||
func (c *Cluster) needConnectionPooler() bool {
|
func (c *Cluster) needConnectionPooler() bool {
|
||||||
return c.needConnectionPoolerWorker(&c.Spec)
|
return c.needConnectionPoolerWorker(&c.Spec)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Earlier arguments take priority
|
||||||
|
func mergeContainers(containers ...[]v1.Container) ([]v1.Container, []string) {
|
||||||
|
containerNameTaken := map[string]bool{}
|
||||||
|
result := make([]v1.Container, 0)
|
||||||
|
conflicts := make([]string, 0)
|
||||||
|
|
||||||
|
for _, containerArray := range containers {
|
||||||
|
for _, container := range containerArray {
|
||||||
|
if _, taken := containerNameTaken[container.Name]; taken {
|
||||||
|
conflicts = append(conflicts, container.Name)
|
||||||
|
} else {
|
||||||
|
containerNameTaken[container.Name] = true
|
||||||
|
result = append(result, container)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result, conflicts
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -7,24 +7,24 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
|
||||||
"k8s.io/client-go/tools/cache"
|
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
"github.com/zalando/postgres-operator/pkg/apiserver"
|
"github.com/zalando/postgres-operator/pkg/apiserver"
|
||||||
"github.com/zalando/postgres-operator/pkg/cluster"
|
"github.com/zalando/postgres-operator/pkg/cluster"
|
||||||
|
acidv1informer "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/acid.zalan.do/v1"
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
"github.com/zalando/postgres-operator/pkg/util"
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/ringlog"
|
"github.com/zalando/postgres-operator/pkg/util/ringlog"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
acidv1informer "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/acid.zalan.do/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
|
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Controller represents operator controller
|
// Controller represents operator controller
|
||||||
|
|
@ -36,6 +36,9 @@ type Controller struct {
|
||||||
KubeClient k8sutil.KubernetesClient
|
KubeClient k8sutil.KubernetesClient
|
||||||
apiserver *apiserver.Server
|
apiserver *apiserver.Server
|
||||||
|
|
||||||
|
eventRecorder record.EventRecorder
|
||||||
|
eventBroadcaster record.EventBroadcaster
|
||||||
|
|
||||||
stopCh chan struct{}
|
stopCh chan struct{}
|
||||||
|
|
||||||
controllerID string
|
controllerID string
|
||||||
|
|
@ -67,10 +70,21 @@ type Controller struct {
|
||||||
func NewController(controllerConfig *spec.ControllerConfig, controllerId string) *Controller {
|
func NewController(controllerConfig *spec.ControllerConfig, controllerId string) *Controller {
|
||||||
logger := logrus.New()
|
logger := logrus.New()
|
||||||
|
|
||||||
|
var myComponentName = "postgres-operator"
|
||||||
|
if controllerId != "" {
|
||||||
|
myComponentName += "/" + controllerId
|
||||||
|
}
|
||||||
|
|
||||||
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
|
eventBroadcaster.StartLogging(logger.Infof)
|
||||||
|
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: myComponentName})
|
||||||
|
|
||||||
c := &Controller{
|
c := &Controller{
|
||||||
config: *controllerConfig,
|
config: *controllerConfig,
|
||||||
opConfig: &config.Config{},
|
opConfig: &config.Config{},
|
||||||
logger: logger.WithField("pkg", "controller"),
|
logger: logger.WithField("pkg", "controller"),
|
||||||
|
eventRecorder: recorder,
|
||||||
|
eventBroadcaster: eventBroadcaster,
|
||||||
controllerID: controllerId,
|
controllerID: controllerId,
|
||||||
curWorkerCluster: sync.Map{},
|
curWorkerCluster: sync.Map{},
|
||||||
clusterWorkers: make(map[spec.NamespacedName]uint32),
|
clusterWorkers: make(map[spec.NamespacedName]uint32),
|
||||||
|
|
@ -93,6 +107,11 @@ func (c *Controller) initClients() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Fatalf("could not create kubernetes clients: %v", err)
|
c.logger.Fatalf("could not create kubernetes clients: %v", err)
|
||||||
}
|
}
|
||||||
|
c.eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: c.KubeClient.EventsGetter.Events("")})
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Fatalf("could not setup kubernetes event sink: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) initOperatorConfig() {
|
func (c *Controller) initOperatorConfig() {
|
||||||
|
|
@ -159,6 +178,11 @@ func (c *Controller) warnOnDeprecatedOperatorParameters() {
|
||||||
c.logger.Warningf("Operator configuration parameter 'enable_load_balancer' is deprecated and takes no effect. " +
|
c.logger.Warningf("Operator configuration parameter 'enable_load_balancer' is deprecated and takes no effect. " +
|
||||||
"Consider using the 'enable_master_load_balancer' or 'enable_replica_load_balancer' instead.")
|
"Consider using the 'enable_master_load_balancer' or 'enable_replica_load_balancer' instead.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(c.opConfig.SidecarImages) > 0 {
|
||||||
|
c.logger.Warningf("Operator configuration parameter 'sidecar_docker_images' is deprecated. " +
|
||||||
|
"Consider using 'sidecars' instead.")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) initPodServiceAccount() {
|
func (c *Controller) initPodServiceAccount() {
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
|
|
||||||
// general config
|
// general config
|
||||||
result.EnableCRDValidation = fromCRD.EnableCRDValidation
|
result.EnableCRDValidation = fromCRD.EnableCRDValidation
|
||||||
|
result.EnableLazySpiloUpgrade = fromCRD.EnableLazySpiloUpgrade
|
||||||
result.EtcdHost = fromCRD.EtcdHost
|
result.EtcdHost = fromCRD.EtcdHost
|
||||||
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
||||||
result.DockerImage = fromCRD.DockerImage
|
result.DockerImage = fromCRD.DockerImage
|
||||||
|
|
@ -44,7 +45,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.RepairPeriod = time.Duration(fromCRD.RepairPeriod)
|
result.RepairPeriod = time.Duration(fromCRD.RepairPeriod)
|
||||||
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
|
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
|
||||||
result.ShmVolume = fromCRD.ShmVolume
|
result.ShmVolume = fromCRD.ShmVolume
|
||||||
result.Sidecars = fromCRD.Sidecars
|
result.SidecarImages = fromCRD.SidecarImages
|
||||||
|
result.SidecarContainers = fromCRD.SidecarContainers
|
||||||
|
|
||||||
// user config
|
// user config
|
||||||
result.SuperUsername = fromCRD.PostgresUsersConfiguration.SuperUsername
|
result.SuperUsername = fromCRD.PostgresUsersConfiguration.SuperUsername
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,7 @@ import (
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
|
@ -157,7 +158,7 @@ func (c *Controller) acquireInitialListOfClusters() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) *cluster.Cluster {
|
func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) *cluster.Cluster {
|
||||||
cl := cluster.New(c.makeClusterConfig(), c.KubeClient, *pgSpec, lg)
|
cl := cluster.New(c.makeClusterConfig(), c.KubeClient, *pgSpec, lg, c.eventRecorder)
|
||||||
cl.Run(c.stopCh)
|
cl.Run(c.stopCh)
|
||||||
teamName := strings.ToLower(cl.Spec.TeamID)
|
teamName := strings.ToLower(cl.Spec.TeamID)
|
||||||
|
|
||||||
|
|
@ -236,6 +237,7 @@ func (c *Controller) processEvent(event ClusterEvent) {
|
||||||
if err := cl.Create(); err != nil {
|
if err := cl.Create(); err != nil {
|
||||||
cl.Error = fmt.Sprintf("could not create cluster: %v", err)
|
cl.Error = fmt.Sprintf("could not create cluster: %v", err)
|
||||||
lg.Error(cl.Error)
|
lg.Error(cl.Error)
|
||||||
|
c.eventRecorder.Eventf(cl.GetReference(), v1.EventTypeWarning, "Create", "%v", cl.Error)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -274,6 +276,8 @@ func (c *Controller) processEvent(event ClusterEvent) {
|
||||||
|
|
||||||
c.curWorkerCluster.Store(event.WorkerID, cl)
|
c.curWorkerCluster.Store(event.WorkerID, cl)
|
||||||
cl.Delete()
|
cl.Delete()
|
||||||
|
// Fixme - no error handling for delete ?
|
||||||
|
// c.eventRecorder.Eventf(cl.GetReference, v1.EventTypeWarning, "Delete", "%v", cl.Error)
|
||||||
|
|
||||||
func() {
|
func() {
|
||||||
defer c.clustersMu.Unlock()
|
defer c.clustersMu.Unlock()
|
||||||
|
|
@ -304,6 +308,7 @@ func (c *Controller) processEvent(event ClusterEvent) {
|
||||||
c.curWorkerCluster.Store(event.WorkerID, cl)
|
c.curWorkerCluster.Store(event.WorkerID, cl)
|
||||||
if err := cl.Sync(event.NewSpec); err != nil {
|
if err := cl.Sync(event.NewSpec); err != nil {
|
||||||
cl.Error = fmt.Sprintf("could not sync cluster: %v", err)
|
cl.Error = fmt.Sprintf("could not sync cluster: %v", err)
|
||||||
|
c.eventRecorder.Eventf(cl.GetReference(), v1.EventTypeWarning, "Sync", "%v", cl.Error)
|
||||||
lg.Error(cl.Error)
|
lg.Error(cl.Error)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,10 @@
|
||||||
package controller
|
package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -31,6 +31,7 @@ const (
|
||||||
RoleOriginInfrastructure
|
RoleOriginInfrastructure
|
||||||
RoleOriginTeamsAPI
|
RoleOriginTeamsAPI
|
||||||
RoleOriginSystem
|
RoleOriginSystem
|
||||||
|
RoleOriginBootstrap
|
||||||
RoleConnectionPooler
|
RoleConnectionPooler
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -180,6 +181,8 @@ func (r RoleOrigin) String() string {
|
||||||
return "teams API role"
|
return "teams API role"
|
||||||
case RoleOriginSystem:
|
case RoleOriginSystem:
|
||||||
return "system role"
|
return "system role"
|
||||||
|
case RoleOriginBootstrap:
|
||||||
|
return "bootstrapped role"
|
||||||
case RoleConnectionPooler:
|
case RoleConnectionPooler:
|
||||||
return "connection pooler role"
|
return "connection pooler role"
|
||||||
default:
|
default:
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,7 @@ import (
|
||||||
|
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CRD describes CustomResourceDefinition specific configuration parameters
|
// CRD describes CustomResourceDefinition specific configuration parameters
|
||||||
|
|
@ -110,8 +111,10 @@ type Config struct {
|
||||||
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
||||||
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
|
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
|
||||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p2"`
|
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p115"`
|
||||||
Sidecars map[string]string `name:"sidecar_docker_images"`
|
// deprecated in favour of SidecarContainers
|
||||||
|
SidecarImages map[string]string `name:"sidecar_docker_images"`
|
||||||
|
SidecarContainers []v1.Container `name:"sidecars"`
|
||||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||||
// value of this string must be valid JSON or YAML; see initPodServiceAccount
|
// value of this string must be valid JSON or YAML; see initPodServiceAccount
|
||||||
PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""`
|
PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""`
|
||||||
|
|
@ -155,6 +158,7 @@ type Config struct {
|
||||||
PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""`
|
PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""`
|
||||||
SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"`
|
SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"`
|
||||||
EnableUnusedPVCDeletion bool `name:"enable_unused_pvc_deletion" default:"false"`
|
EnableUnusedPVCDeletion bool `name:"enable_unused_pvc_deletion" default:"false"`
|
||||||
|
EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustMarshal marshals the config or panics
|
// MustMarshal marshals the config or panics
|
||||||
|
|
|
||||||
|
|
@ -14,4 +14,8 @@ const (
|
||||||
RoleFlagCreateDB = "CREATEDB"
|
RoleFlagCreateDB = "CREATEDB"
|
||||||
RoleFlagReplication = "REPLICATION"
|
RoleFlagReplication = "REPLICATION"
|
||||||
RoleFlagByPassRLS = "BYPASSRLS"
|
RoleFlagByPassRLS = "BYPASSRLS"
|
||||||
|
OwnerRoleNameSuffix = "_owner"
|
||||||
|
ReaderRoleNameSuffix = "_reader"
|
||||||
|
WriterRoleNameSuffix = "_writer"
|
||||||
|
UserRoleNameSuffix = "_user"
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -45,6 +45,7 @@ type KubernetesClient struct {
|
||||||
corev1.NodesGetter
|
corev1.NodesGetter
|
||||||
corev1.NamespacesGetter
|
corev1.NamespacesGetter
|
||||||
corev1.ServiceAccountsGetter
|
corev1.ServiceAccountsGetter
|
||||||
|
corev1.EventsGetter
|
||||||
appsv1.StatefulSetsGetter
|
appsv1.StatefulSetsGetter
|
||||||
appsv1.DeploymentsGetter
|
appsv1.DeploymentsGetter
|
||||||
rbacv1.RoleBindingsGetter
|
rbacv1.RoleBindingsGetter
|
||||||
|
|
@ -142,6 +143,7 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) {
|
||||||
kubeClient.RESTClient = client.CoreV1().RESTClient()
|
kubeClient.RESTClient = client.CoreV1().RESTClient()
|
||||||
kubeClient.RoleBindingsGetter = client.RbacV1()
|
kubeClient.RoleBindingsGetter = client.RbacV1()
|
||||||
kubeClient.CronJobsGetter = client.BatchV1beta1()
|
kubeClient.CronJobsGetter = client.BatchV1beta1()
|
||||||
|
kubeClient.EventsGetter = client.CoreV1()
|
||||||
|
|
||||||
apiextClient, err := apiextclient.NewForConfig(cfg)
|
apiextClient, err := apiextclient.NewForConfig(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ package patroni
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
|
|
@ -11,7 +12,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -25,6 +26,7 @@ const (
|
||||||
type Interface interface {
|
type Interface interface {
|
||||||
Switchover(master *v1.Pod, candidate string) error
|
Switchover(master *v1.Pod, candidate string) error
|
||||||
SetPostgresParameters(server *v1.Pod, options map[string]string) error
|
SetPostgresParameters(server *v1.Pod, options map[string]string) error
|
||||||
|
GetPatroniMemberState(pod *v1.Pod) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Patroni API client
|
// Patroni API client
|
||||||
|
|
@ -123,3 +125,36 @@ func (p *Patroni) SetPostgresParameters(server *v1.Pod, parameters map[string]st
|
||||||
}
|
}
|
||||||
return p.httpPostOrPatch(http.MethodPatch, apiURLString+configPath, buf)
|
return p.httpPostOrPatch(http.MethodPatch, apiURLString+configPath, buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//GetPatroniMemberState returns a state of member of a Patroni cluster
|
||||||
|
func (p *Patroni) GetPatroniMemberState(server *v1.Pod) (string, error) {
|
||||||
|
|
||||||
|
apiURLString, err := apiURL(server)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
response, err := p.httpClient.Get(apiURLString)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("could not perform Get request: %v", err)
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(response.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("could not read response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
data := make(map[string]interface{})
|
||||||
|
err = json.Unmarshal(body, &data)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
state, ok := data["state"].(string)
|
||||||
|
if !ok {
|
||||||
|
return "", errors.New("Patroni Get call response contains wrong type for 'state' field")
|
||||||
|
}
|
||||||
|
|
||||||
|
return state, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -73,26 +73,44 @@ func (strategy DefaultUserSyncStrategy) ProduceSyncRequests(dbUsers spec.PgUserM
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecuteSyncRequests makes actual database changes from the requests passed in its arguments.
|
// ExecuteSyncRequests makes actual database changes from the requests passed in its arguments.
|
||||||
func (strategy DefaultUserSyncStrategy) ExecuteSyncRequests(reqs []spec.PgSyncUserRequest, db *sql.DB) error {
|
func (strategy DefaultUserSyncStrategy) ExecuteSyncRequests(requests []spec.PgSyncUserRequest, db *sql.DB) error {
|
||||||
for _, r := range reqs {
|
var reqretries []spec.PgSyncUserRequest
|
||||||
switch r.Kind {
|
var errors []string
|
||||||
|
for _, request := range requests {
|
||||||
|
switch request.Kind {
|
||||||
case spec.PGSyncUserAdd:
|
case spec.PGSyncUserAdd:
|
||||||
if err := strategy.createPgUser(r.User, db); err != nil {
|
if err := strategy.createPgUser(request.User, db); err != nil {
|
||||||
return fmt.Errorf("could not create user %q: %v", r.User.Name, err)
|
reqretries = append(reqretries, request)
|
||||||
|
errors = append(errors, fmt.Sprintf("could not create user %q: %v", request.User.Name, err))
|
||||||
}
|
}
|
||||||
case spec.PGsyncUserAlter:
|
case spec.PGsyncUserAlter:
|
||||||
if err := strategy.alterPgUser(r.User, db); err != nil {
|
if err := strategy.alterPgUser(request.User, db); err != nil {
|
||||||
return fmt.Errorf("could not alter user %q: %v", r.User.Name, err)
|
reqretries = append(reqretries, request)
|
||||||
|
errors = append(errors, fmt.Sprintf("could not alter user %q: %v", request.User.Name, err))
|
||||||
}
|
}
|
||||||
case spec.PGSyncAlterSet:
|
case spec.PGSyncAlterSet:
|
||||||
if err := strategy.alterPgUserSet(r.User, db); err != nil {
|
if err := strategy.alterPgUserSet(request.User, db); err != nil {
|
||||||
return fmt.Errorf("could not set custom user %q parameters: %v", r.User.Name, err)
|
reqretries = append(reqretries, request)
|
||||||
|
errors = append(errors, fmt.Sprintf("could not set custom user %q parameters: %v", request.User.Name, err))
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unrecognized operation: %v", r.Kind)
|
return fmt.Errorf("unrecognized operation: %v", request.Kind)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// creating roles might fail if group role members are created before the parent role
|
||||||
|
// retry adding roles as long as the number of failed attempts is shrinking
|
||||||
|
if len(reqretries) > 0 {
|
||||||
|
if len(reqretries) < len(requests) {
|
||||||
|
if err := strategy.ExecuteSyncRequests(reqretries, db); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("could not execute sync requests for users: %v", errors)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (strategy DefaultUserSyncStrategy) alterPgUserSet(user spec.PgUser, db *sql.DB) (err error) {
|
func (strategy DefaultUserSyncStrategy) alterPgUserSet(user spec.PgUser, db *sql.DB) (err error) {
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue