merge with master

This commit is contained in:
Felix Kunde 2021-08-17 17:37:32 +02:00
commit 99aa89643b
35 changed files with 1018 additions and 878 deletions

View File

@ -7,6 +7,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ template "postgres-operator-ui.fullname" . }} name: {{ template "postgres-operator-ui.fullname" . }}
namespace: {{ .Release.Namespace }}
spec: spec:
replicas: 1 replicas: 1
selector: selector:

View File

@ -1,7 +1,10 @@
{{- if .Values.ingress.enabled -}} {{- if .Values.ingress.enabled -}}
{{- $fullName := include "postgres-operator-ui.fullname" . -}} {{- $fullName := include "postgres-operator-ui.fullname" . -}}
{{- $svcPort := .Values.service.port -}} {{- $svcPort := .Values.service.port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1 apiVersion: networking.k8s.io/v1beta1
{{- else -}} {{- else -}}
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
@ -9,6 +12,7 @@ apiVersion: extensions/v1beta1
kind: Ingress kind: Ingress
metadata: metadata:
name: {{ $fullName }} name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
@ -36,9 +40,18 @@ spec:
paths: paths:
{{- range .paths }} {{- range .paths }}
- path: {{ . }} - path: {{ . }}
{{ if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion -}}
pathType: ImplementationSpecific
backend:
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else -}}
backend: backend:
serviceName: {{ $fullName }} serviceName: {{ $fullName }}
servicePort: {{ $svcPort }} servicePort: {{ $svcPort }}
{{- end -}}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}

View File

@ -7,6 +7,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ template "postgres-operator-ui.fullname" . }} name: {{ template "postgres-operator-ui.fullname" . }}
namespace: {{ .Release.Namespace }}
spec: spec:
ports: ports:
- port: {{ .Values.service.port }} - port: {{ .Values.service.port }}

View File

@ -3,6 +3,7 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: {{ include "postgres-operator-ui.serviceAccountName" . }} name: {{ include "postgres-operator-ui.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}

View File

@ -173,6 +173,9 @@ spec:
enable_init_containers: enable_init_containers:
type: boolean type: boolean
default: true default: true
enable_cross_namespace_secret:
type: boolean
default: false
enable_pod_antiaffinity: enable_pod_antiaffinity:
type: boolean type: boolean
default: false default: false

View File

@ -223,6 +223,97 @@ spec:
items: items:
type: string type: string
pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$'
nodeAffinity:
type: object
properties:
preferredDuringSchedulingIgnoredDuringExecution:
type: array
items:
type: object
required:
- weight
- preference
properties:
preference:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
weight:
format: int32
type: integer
requiredDuringSchedulingIgnoredDuringExecution:
type: object
required:
- nodeSelectorTerms
properties:
nodeSelectorTerms:
type: array
items:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
numberOfInstances: numberOfInstances:
type: integer type: integer
minimum: 0 minimum: 0
@ -396,97 +487,6 @@ spec:
type: string type: string
caSecretName: caSecretName:
type: string type: string
nodeAffinity:
type: object
properties:
preferredDuringSchedulingIgnoredDuringExecution:
type: array
items:
type: object
required:
- weight
- preference
properties:
preference:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
weight:
format: int32
type: integer
requiredDuringSchedulingIgnoredDuringExecution:
type: object
required:
- nodeSelectorTerms
properties:
nodeSelectorTerms:
type: array
items:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
tolerations: tolerations:
type: array type: array
items: items:
@ -515,8 +515,6 @@ spec:
type: integer type: integer
useLoadBalancer: # deprecated useLoadBalancer: # deprecated
type: boolean type: boolean
enableNamespacedSecret:
type: boolean
users: users:
type: object type: object
additionalProperties: additionalProperties:

View File

@ -3,6 +3,7 @@ apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: {{ template "postgres-operator.fullname" . }} name: {{ template "postgres-operator.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ template "postgres-operator.name" . }} app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
helm.sh/chart: {{ template "postgres-operator.chart" . }} helm.sh/chart: {{ template "postgres-operator.chart" . }}

View File

@ -7,6 +7,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ template "postgres-operator.fullname" . }} name: {{ template "postgres-operator.fullname" . }}
namespace: {{ .Release.Namespace }}
spec: spec:
replicas: 1 replicas: 1
selector: selector:

View File

@ -3,6 +3,7 @@ apiVersion: "acid.zalan.do/v1"
kind: OperatorConfiguration kind: OperatorConfiguration
metadata: metadata:
name: {{ template "postgres-operator.fullname" . }} name: {{ template "postgres-operator.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ template "postgres-operator.name" . }} app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
helm.sh/chart: {{ template "postgres-operator.chart" . }} helm.sh/chart: {{ template "postgres-operator.chart" . }}

View File

@ -9,6 +9,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ .Values.podPriorityClassName }} name: {{ .Values.podPriorityClassName }}
namespace: {{ .Release.Namespace }}
preemptionPolicy: PreemptLowerPriority preemptionPolicy: PreemptLowerPriority
globalDefault: false globalDefault: false
value: 1000000 value: 1000000

View File

@ -7,6 +7,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ template "postgres-operator.fullname" . }} name: {{ template "postgres-operator.fullname" . }}
namespace: {{ .Release.Namespace }}
spec: spec:
type: ClusterIP type: ClusterIP
ports: ports:

View File

@ -3,6 +3,7 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: {{ include "postgres-operator.serviceAccountName" . }} name: {{ include "postgres-operator.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ template "postgres-operator.name" . }} app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
helm.sh/chart: {{ template "postgres-operator.chart" . }} helm.sh/chart: {{ template "postgres-operator.chart" . }}

View File

@ -97,6 +97,8 @@ configKubernetes:
# - deployment-time # - deployment-time
# - downscaler/* # - downscaler/*
# allow user secrets in other namespaces than the Postgres cluster
enable_cross_namespace_secret: false
# enables initContainers to run actions before Spilo is started # enables initContainers to run actions before Spilo is started
enable_init_containers: true enable_init_containers: true
# toggles pod anti affinity on the Postgres pods # toggles pod anti affinity on the Postgres pods
@ -151,7 +153,7 @@ configKubernetes:
# template for database user secrets generated by the operator, # template for database user secrets generated by the operator,
# here username contains the namespace in the format namespace.username # here username contains the namespace in the format namespace.username
# if the user is in different namespace than cluster and cross namespace secrets # if the user is in different namespace than cluster and cross namespace secrets
# are enabled via EnableNamespacedSecret flag. # are enabled via `enable_cross_namespace_secret` flag in the configuration.
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
# set user and group for the spilo container (required to run Spilo as non-root process) # set user and group for the spilo container (required to run Spilo as non-root process)
# spilo_runasuser: 101 # spilo_runasuser: 101

View File

@ -14,7 +14,7 @@ solutions:
* [kind](https://kind.sigs.k8s.io/) and [k3d](https://k3d.io), which allows creating multi-nodes K8s * [kind](https://kind.sigs.k8s.io/) and [k3d](https://k3d.io), which allows creating multi-nodes K8s
clusters running on Docker (requires Docker) clusters running on Docker (requires Docker)
To interact with the K8s infrastructure install it's CLI runtime [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-via-curl). To interact with the K8s infrastructure install its CLI runtime [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-via-curl).
This quickstart assumes that you have started minikube or created a local kind This quickstart assumes that you have started minikube or created a local kind
cluster. Note that you can also use built-in K8s support in the Docker Desktop cluster. Note that you can also use built-in K8s support in the Docker Desktop
@ -85,6 +85,8 @@ The chart works with both Helm 2 and Helm 3. The `crd-install` hook from v2 will
be skipped with warning when using v3. Documentation for installing applications be skipped with warning when using v3. Documentation for installing applications
with Helm 2 can be found in the [v2 docs](https://v2.helm.sh/docs/). with Helm 2 can be found in the [v2 docs](https://v2.helm.sh/docs/).
The chart is also hosted at: https://opensource.zalando.com/postgres-operator/charts/postgres-operator/
## Check if Postgres Operator is running ## Check if Postgres Operator is running
Starting the operator may take a few seconds. Check if the operator pod is Starting the operator may take a few seconds. Check if the operator pod is

View File

@ -264,6 +264,13 @@ configuration they are grouped under the `kubernetes` key.
[admin docs](../administrator.md#pod-disruption-budget) for more information. [admin docs](../administrator.md#pod-disruption-budget) for more information.
Default is true. Default is true.
* **enable_cross_namespace_secrets**
To allow secrets in a different namespace other than the Postgres cluster
namespace. Once enabled, specify the namespace in the user name under the
`users` section in the form `{namespace}.{username}`. The operator will then
create the user secret in that namespace. The part after the first `.` is
considered to be the user name. The default is `false`.
* **enable_init_containers** * **enable_init_containers**
global option to allow for creating init containers in the cluster manifest to global option to allow for creating init containers in the cluster manifest to
run actions before Spilo is started. Default is true. run actions before Spilo is started. Default is true.
@ -275,13 +282,12 @@ configuration they are grouped under the `kubernetes` key.
* **secret_name_template** * **secret_name_template**
a template for the name of the database user secrets generated by the a template for the name of the database user secrets generated by the
operator. `{namespace}` is replaced with name of the namespace (if cross operator. `{namespace}` is replaced with name of the namespace if
namespace secrets are enabled via EnableNamespacedSecret flag, otherwise the `enable_cross_namespace_secret` is set, otherwise the
secret is in cluster's namespace and in that case it is not present in secret secret is in cluster's namespace. `{username}` is replaced with name of the
name), `{username}` is replaced with name of the secret, `{cluster}` with the secret, `{cluster}` with the name of the cluster, `{tprkind}` with the kind
name of the cluster, `{tprkind}` with the kind of CRD (formerly known as TPR) of CRD (formerly known as TPR) and `{tprgroup}` with the group of the CRD.
and `{tprgroup}` with the group of the CRD. No other placeholders are allowed. No other placeholders are allowed. The default is
The default is
`{namespace}.{username}.{cluster}.credentials.{tprkind}.{tprgroup}`. `{namespace}.{username}.{cluster}.credentials.{tprkind}.{tprgroup}`.
* **cluster_domain** * **cluster_domain**

View File

@ -140,7 +140,7 @@ At the moment it is not possible to define membership of the manifest role in
other roles. other roles.
To define the secrets for the users in a different namespace than that of the cluster, To define the secrets for the users in a different namespace than that of the cluster,
one can use the flag `EnableNamespacedSecret` and declare the namespace for the one can set `enable_cross_namespace_secret` and declare the namespace for the
secrets in the manifest in the following manner, secrets in the manifest in the following manner,
```yaml ```yaml

View File

@ -156,6 +156,10 @@ class K8s:
while not get_services(): while not get_services():
time.sleep(self.RETRY_TIMEOUT_SEC) time.sleep(self.RETRY_TIMEOUT_SEC)
def count_pods_with_rolling_update_flag(self, labels, namespace='default'):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
return len(list(filter(lambda x: "zalando-postgres-operator-rolling-update-required" in x.metadata.annotations, pods)))
def count_pods_with_label(self, labels, namespace='default'): def count_pods_with_label(self, labels, namespace='default'):
return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items) return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items)
@ -189,6 +193,7 @@ class K8s:
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
pod_phase = 'Failing over' pod_phase = 'Failing over'
new_pod_node = '' new_pod_node = ''
pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace)
while (pod_phase != 'Running') or (new_pod_node not in failover_targets): while (pod_phase != 'Running') or (new_pod_node not in failover_targets):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
@ -196,6 +201,10 @@ class K8s:
new_pod_node = pods[0].spec.node_name new_pod_node = pods[0].spec.node_name
pod_phase = pods[0].status.phase pod_phase = pods[0].status.phase
time.sleep(self.RETRY_TIMEOUT_SEC) time.sleep(self.RETRY_TIMEOUT_SEC)
while pods_with_update_flag != 0:
pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace)
time.sleep(self.RETRY_TIMEOUT_SEC)
def wait_for_namespace_creation(self, namespace='default'): def wait_for_namespace_creation(self, namespace='default'):
ns_found = False ns_found = False
@ -243,6 +252,13 @@ class K8s:
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) stderr=subprocess.PIPE)
def patroni_rest(self, pod, path):
r = self.exec_with_kubectl(pod, "curl localhost:8008/" + path)
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "{":
return None
return json.loads(r.stdout.decode())
def get_patroni_state(self, pod): def get_patroni_state(self, pod):
r = self.exec_with_kubectl(pod, "patronictl list -f json") r = self.exec_with_kubectl(pod, "patronictl list -f json")
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[": if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[":
@ -423,6 +439,10 @@ class K8sBase:
while not get_services(): while not get_services():
time.sleep(self.RETRY_TIMEOUT_SEC) time.sleep(self.RETRY_TIMEOUT_SEC)
def count_pods_with_rolling_update_flag(self, labels, namespace='default'):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
return len(list(filter(lambda x: "zalando-postgres-operator-rolling-update-required" in x.metadata.annotations, pods)))
def count_pods_with_label(self, labels, namespace='default'): def count_pods_with_label(self, labels, namespace='default'):
return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items) return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items)
@ -456,6 +476,7 @@ class K8sBase:
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
pod_phase = 'Failing over' pod_phase = 'Failing over'
new_pod_node = '' new_pod_node = ''
pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace)
while (pod_phase != 'Running') or (new_pod_node not in failover_targets): while (pod_phase != 'Running') or (new_pod_node not in failover_targets):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
@ -464,6 +485,10 @@ class K8sBase:
pod_phase = pods[0].status.phase pod_phase = pods[0].status.phase
time.sleep(self.RETRY_TIMEOUT_SEC) time.sleep(self.RETRY_TIMEOUT_SEC)
while pods_with_update_flag != 0:
pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace)
time.sleep(self.RETRY_TIMEOUT_SEC)
def get_logical_backup_job(self, namespace='default'): def get_logical_backup_job(self, namespace='default'):
return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo") return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo")
@ -496,6 +521,13 @@ class K8sBase:
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) stderr=subprocess.PIPE)
def patroni_rest(self, pod, path):
r = self.exec_with_kubectl(pod, "curl localhost:8008/" + path)
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "{":
return None
return json.loads(r.stdout.decode())
def get_patroni_state(self, pod): def get_patroni_state(self, pod):
r = self.exec_with_kubectl(pod, "patronictl list -f json") r = self.exec_with_kubectl(pod, "patronictl list -f json")
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[": if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[":

File diff suppressed because it is too large Load Diff

View File

@ -12,7 +12,6 @@ spec:
dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.0-p7 dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.0-p7
teamId: "acid" teamId: "acid"
numberOfInstances: 2 numberOfInstances: 2
enableNamespacedSecret: False
users: # Application/Robot users users: # Application/Robot users
zalando: zalando:
- superuser - superuser

View File

@ -36,6 +36,7 @@ data:
# downscaler_annotations: "deployment-time,downscaler/*" # downscaler_annotations: "deployment-time,downscaler/*"
# enable_admin_role_for_users: "true" # enable_admin_role_for_users: "true"
# enable_crd_validation: "true" # enable_crd_validation: "true"
# enable_cross_namespace_secret: "false"
# enable_database_access: "true" # enable_database_access: "true"
enable_ebs_gp3_migration: "false" enable_ebs_gp3_migration: "false"
# enable_ebs_gp3_migration_max_size: "1000" # enable_ebs_gp3_migration_max_size: "1000"

View File

@ -45,6 +45,7 @@ configuration:
# downscaler_annotations: # downscaler_annotations:
# - deployment-time # - deployment-time
# - downscaler/* # - downscaler/*
# enable_cross_namespace_secret: "false"
enable_init_containers: true enable_init_containers: true
enable_pod_antiaffinity: false enable_pod_antiaffinity: false
enable_pod_disruption_budget: true enable_pod_disruption_budget: true

View File

@ -219,6 +219,97 @@ spec:
items: items:
type: string type: string
pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$'
nodeAffinity:
type: object
properties:
preferredDuringSchedulingIgnoredDuringExecution:
type: array
items:
type: object
required:
- weight
- preference
properties:
preference:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
weight:
format: int32
type: integer
requiredDuringSchedulingIgnoredDuringExecution:
type: object
required:
- nodeSelectorTerms
properties:
nodeSelectorTerms:
type: array
items:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
numberOfInstances: numberOfInstances:
type: integer type: integer
minimum: 0 minimum: 0
@ -392,97 +483,6 @@ spec:
type: string type: string
caSecretName: caSecretName:
type: string type: string
nodeAffinity:
type: object
properties:
preferredDuringSchedulingIgnoredDuringExecution:
type: array
items:
type: object
required:
- weight
- preference
properties:
preference:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
weight:
format: int32
type: integer
requiredDuringSchedulingIgnoredDuringExecution:
type: object
required:
- nodeSelectorTerms
properties:
nodeSelectorTerms:
type: array
items:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
tolerations: tolerations:
type: array type: array
items: items:

View File

@ -341,6 +341,91 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
}, },
}, },
}, },
"nodeAffinity": {
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"preferredDuringSchedulingIgnoredDuringExecution": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
Required: []string{"preference", "weight"},
Properties: map[string]apiextv1.JSONSchemaProps{
"preference": {
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"matchExpressions": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
"matchFields": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
},
},
"weight": {
Type: "integer",
Format: "int32",
},
},
},
},
},
"requiredDuringSchedulingIgnoredDuringExecution": {
Type: "object",
Required: []string{"nodeSelectorTerms"},
Properties: map[string]apiextv1.JSONSchemaProps{
"nodeSelectorTerms": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"matchExpressions": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
"matchFields": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
},
},
},
},
},
},
},
},
"numberOfInstances": { "numberOfInstances": {
Type: "integer", Type: "integer",
Minimum: &min0, Minimum: &min0,
@ -596,91 +681,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
}, },
}, },
}, },
"nodeAffinity": {
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"preferredDuringSchedulingIgnoredDuringExecution": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
Required: []string{"preference", "weight"},
Properties: map[string]apiextv1.JSONSchemaProps{
"preference": {
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"matchExpressions": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
"matchFields": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
},
},
"weight": {
Type: "integer",
Format: "int32",
},
},
},
},
},
"requiredDuringSchedulingIgnoredDuringExecution": {
Type: "object",
Required: []string{"nodeSelectorTerms"},
Properties: map[string]apiextv1.JSONSchemaProps{
"nodeSelectorTerms": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"matchExpressions": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
"matchFields": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
},
},
},
},
},
},
},
},
"tolerations": { "tolerations": {
Type: "array", Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{ Items: &apiextv1.JSONSchemaPropsOrArray{
@ -730,9 +730,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
Type: "boolean", Type: "boolean",
Description: "Deprecated", Description: "Deprecated",
}, },
"enableNamespacedSecret": {
Type: "boolean",
},
"users": { "users": {
Type: "object", Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
@ -1029,6 +1026,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
}, },
}, },
}, },
"enable_cross_namespace_secret": {
Type: "boolean",
},
"enable_init_containers": { "enable_init_containers": {
Type: "boolean", Type: "boolean",
}, },

View File

@ -81,7 +81,7 @@ func (ps *PostgresStatus) UnmarshalJSON(data []byte) error {
if err != nil { if err != nil {
metaErr := json.Unmarshal(data, &status) metaErr := json.Unmarshal(data, &status)
if metaErr != nil { if metaErr != nil {
return fmt.Errorf("Could not parse status: %v; err %v", string(data), metaErr) return fmt.Errorf("could not parse status: %v; err %v", string(data), metaErr)
} }
tmp.PostgresClusterStatus = status tmp.PostgresClusterStatus = status
} }

View File

@ -91,6 +91,7 @@ type KubernetesMetaConfiguration struct {
EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"` EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"`
PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"` PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"`
PodManagementPolicy string `json:"pod_management_policy,omitempty"` PodManagementPolicy string `json:"pod_management_policy,omitempty"`
EnableCrossNamespaceSecret bool `json:"enable_cross_namespace_secret,omitempty"`
} }
// PostgresPodResourcesDefaults defines the spec of default resources // PostgresPodResourcesDefaults defines the spec of default resources

View File

@ -53,28 +53,27 @@ type PostgresSpec struct {
// load balancers' source ranges are the same for master and replica services // load balancers' source ranges are the same for master and replica services
AllowedSourceRanges []string `json:"allowedSourceRanges"` AllowedSourceRanges []string `json:"allowedSourceRanges"`
NumberOfInstances int32 `json:"numberOfInstances"` NumberOfInstances int32 `json:"numberOfInstances"`
EnableNamespacedSecret *bool `json:"enableNamespacedSecret,omitempty"` Users map[string]UserFlags `json:"users,omitempty"`
Users map[string]UserFlags `json:"users,omitempty"` MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"` Clone *CloneDescription `json:"clone,omitempty"`
Clone *CloneDescription `json:"clone,omitempty"` ClusterName string `json:"-"`
ClusterName string `json:"-"` Databases map[string]string `json:"databases,omitempty"`
Databases map[string]string `json:"databases,omitempty"` PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"` SchedulerName *string `json:"schedulerName,omitempty"`
SchedulerName *string `json:"schedulerName,omitempty"` NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"`
NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"` Tolerations []v1.Toleration `json:"tolerations,omitempty"`
Tolerations []v1.Toleration `json:"tolerations,omitempty"` Sidecars []Sidecar `json:"sidecars,omitempty"`
Sidecars []Sidecar `json:"sidecars,omitempty"` InitContainers []v1.Container `json:"initContainers,omitempty"`
InitContainers []v1.Container `json:"initContainers,omitempty"` PodPriorityClassName string `json:"podPriorityClassName,omitempty"`
PodPriorityClassName string `json:"podPriorityClassName,omitempty"` ShmVolume *bool `json:"enableShmVolume,omitempty"`
ShmVolume *bool `json:"enableShmVolume,omitempty"` EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"` LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"` StandbyCluster *StandbyDescription `json:"standby,omitempty"`
StandbyCluster *StandbyDescription `json:"standby,omitempty"` PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
PodAnnotations map[string]string `json:"podAnnotations,omitempty"` ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` TLS *TLSDescription `json:"tls,omitempty"`
TLS *TLSDescription `json:"tls,omitempty"` AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"`
AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"`
// deprecated json tags // deprecated json tags
InitContainersOld []v1.Container `json:"init_containers,omitempty"` InitContainersOld []v1.Container `json:"init_containers,omitempty"`

View File

@ -614,11 +614,6 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
*out = make([]string, len(*in)) *out = make([]string, len(*in))
copy(*out, *in) copy(*out, *in)
} }
if in.EnableNamespacedSecret != nil {
in, out := &in.EnableNamespacedSecret, &out.EnableNamespacedSecret
*out = new(bool)
**out = **in
}
if in.Users != nil { if in.Users != nil {
in, out := &in.Users, &out.Users in, out := &in.Users, &out.Users
*out = make(map[string]UserFlags, len(*in)) *out = make(map[string]UserFlags, len(*in))

View File

@ -1163,8 +1163,7 @@ func (c *Cluster) initRobotUsers() error {
namespace := c.Namespace namespace := c.Namespace
//if namespaced secrets are allowed //if namespaced secrets are allowed
if c.Postgresql.Spec.EnableNamespacedSecret != nil && if c.Config.OpConfig.EnableCrossNamespaceSecret {
*c.Postgresql.Spec.EnableNamespacedSecret {
if strings.Contains(username, ".") { if strings.Contains(username, ".") {
splits := strings.Split(username, ".") splits := strings.Split(username, ".")
namespace = splits[0] namespace = splits[0]

View File

@ -1024,7 +1024,6 @@ func TestCrossNamespacedSecrets(t *testing.T) {
Volume: acidv1.Volume{ Volume: acidv1.Volume{
Size: "1Gi", Size: "1Gi",
}, },
EnableNamespacedSecret: boolToPointer(true),
Users: map[string]acidv1.UserFlags{ Users: map[string]acidv1.UserFlags{
"appspace.db_user": {}, "appspace.db_user": {},
"db_user": {}, "db_user": {},
@ -1052,6 +1051,7 @@ func TestCrossNamespacedSecrets(t *testing.T) {
DefaultMemoryLimit: "300Mi", DefaultMemoryLimit: "300Mi",
PodRoleLabel: "spilo-role", PodRoleLabel: "spilo-role",
}, },
EnableCrossNamespaceSecret: true,
}, },
}, client, pg, logger, eventRecorder) }, client, pg, logger, eventRecorder)

View File

@ -2,7 +2,9 @@ package cluster
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"reflect"
"regexp" "regexp"
"strings" "strings"
"time" "time"
@ -261,14 +263,18 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
} }
func (c *Cluster) syncStatefulSet() error { func (c *Cluster) syncStatefulSet() error {
var instancesRestartRequired bool var (
masterPod *v1.Pod
postgresConfig map[string]interface{}
instanceRestartRequired bool
)
podsToRecreate := make([]v1.Pod, 0) podsToRecreate := make([]v1.Pod, 0)
switchoverCandidates := make([]spec.NamespacedName, 0) switchoverCandidates := make([]spec.NamespacedName, 0)
pods, err := c.listPods() pods, err := c.listPods()
if err != nil { if err != nil {
c.logger.Infof("could not list pods of the statefulset: %v", err) c.logger.Warnf("could not list pods of the statefulset: %v", err)
} }
// NB: Be careful to consider the codepath that acts on podsRollingUpdateRequired before returning early. // NB: Be careful to consider the codepath that acts on podsRollingUpdateRequired before returning early.
@ -381,20 +387,50 @@ func (c *Cluster) syncStatefulSet() error {
// Apply special PostgreSQL parameters that can only be set via the Patroni API. // Apply special PostgreSQL parameters that can only be set via the Patroni API.
// it is important to do it after the statefulset pods are there, but before the rolling update // it is important to do it after the statefulset pods are there, but before the rolling update
// since those parameters require PostgreSQL restart. // since those parameters require PostgreSQL restart.
instancesRestartRequired, err = c.checkAndSetGlobalPostgreSQLConfiguration() pods, err = c.listPods()
if err != nil { if err != nil {
return fmt.Errorf("could not set cluster-wide PostgreSQL configuration options: %v", err) c.logger.Warnf("could not get list of pods to apply special PostgreSQL parameters only to be set via Patroni API: %v", err)
} }
if instancesRestartRequired { // get Postgres config, compare with manifest and update via Patroni PATCH endpoint if it differs
c.logger.Debugln("restarting Postgres server within pods") // Patroni's config endpoint is just a "proxy" to DCS. It is enough to patch it only once and it doesn't matter which pod is used.
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "restarting Postgres server within pods") for i, pod := range pods {
if err := c.restartInstances(); err != nil { podName := util.NameFromMeta(pods[i].ObjectMeta)
c.logger.Warningf("could not restart Postgres server within pods: %v", err) config, err := c.patroni.GetConfig(&pod)
if err != nil {
c.logger.Warningf("could not get Postgres config from pod %s: %v", podName, err)
continue
} }
c.logger.Infof("Postgres server successfuly restarted on all pods") instanceRestartRequired, err = c.checkAndSetGlobalPostgreSQLConfiguration(&pod, config)
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Postgres server restart done - all instances have been restarted") if err != nil {
c.logger.Warningf("could not set PostgreSQL configuration options for pod %s: %v", podName, err)
continue
}
break
} }
// if the config update requires a restart, call Patroni restart for replicas first, then master
if instanceRestartRequired {
c.logger.Debug("restarting Postgres server within pods")
ttl, ok := postgresConfig["ttl"].(int32)
if !ok {
ttl = 30
}
for i, pod := range pods {
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
if role == Master {
masterPod = &pods[i]
continue
}
c.restartInstance(&pod)
time.Sleep(time.Duration(ttl) * time.Second)
}
if masterPod != nil {
c.restartInstance(masterPod)
}
}
// if we get here we also need to re-create the pods (either leftovers from the old // if we get here we also need to re-create the pods (either leftovers from the old
// statefulset or those that got their configuration from the outdated statefulset) // statefulset or those that got their configuration from the outdated statefulset)
if len(podsToRecreate) > 0 { if len(podsToRecreate) > 0 {
@ -408,55 +444,19 @@ func (c *Cluster) syncStatefulSet() error {
return nil return nil
} }
func (c *Cluster) restartInstances() error { func (c *Cluster) restartInstance(pod *v1.Pod) {
c.setProcessName("starting to restart Postgres servers") podName := util.NameFromMeta(pod.ObjectMeta)
ls := c.labelsSet(false) role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
namespace := c.Namespace
listOptions := metav1.ListOptions{ c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", fmt.Sprintf("restarting Postgres server within %s pod %s", role, pod.Name))
LabelSelector: ls.String(),
if err := c.patroni.Restart(pod); err != nil {
c.logger.Warningf("could not restart Postgres server within %s pod %s: %v", role, podName, err)
return
} }
pods, err := c.KubeClient.Pods(namespace).List(context.TODO(), listOptions) c.logger.Debugf("Postgres server successfuly restarted in %s pod %s", role, podName)
if err != nil { c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", fmt.Sprintf("Postgres server restart done for %s pod %s", role, pod.Name))
return fmt.Errorf("could not get the list of pods: %v", err)
}
c.logger.Infof("there are %d pods in the cluster which resquire Postgres server restart", len(pods.Items))
var (
masterPod *v1.Pod
)
for i, pod := range pods.Items {
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
if role == Master {
masterPod = &pods.Items[i]
continue
}
podName := util.NameFromMeta(pods.Items[i].ObjectMeta)
config, err := c.patroni.GetConfig(&pod)
if err != nil {
return fmt.Errorf("could not get config for pod %s: %v", podName, err)
}
ttl, ok := config["ttl"].(int32)
if !ok {
ttl = 30
}
if err = c.patroni.Restart(&pod); err != nil {
return fmt.Errorf("could not restart Postgres server on pod %s: %v", podName, err)
}
time.Sleep(time.Duration(ttl) * time.Second)
}
if masterPod != nil {
podName := util.NameFromMeta(masterPod.ObjectMeta)
if err = c.patroni.Restart(masterPod); err != nil {
return fmt.Errorf("could not restart postgres server on masterPod %s: %v", podName, err)
}
}
return nil
} }
// AnnotationsToPropagate get the annotations to update if required // AnnotationsToPropagate get the annotations to update if required
@ -492,48 +492,77 @@ func (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[stri
} }
// checkAndSetGlobalPostgreSQLConfiguration checks whether cluster-wide API parameters // checkAndSetGlobalPostgreSQLConfiguration checks whether cluster-wide API parameters
// (like max_connections) has changed and if necessary sets it via the Patroni API // (like max_connections) have changed and if necessary sets it via the Patroni API
func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration() (bool, error) { func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, patroniConfig map[string]interface{}) (bool, error) {
var ( configToSet := make(map[string]interface{})
err error parametersToSet := make(map[string]string)
pods []v1.Pod effectivePgParameters := make(map[string]interface{})
restartRequired bool
)
// we need to extract those options from the cluster manifest. // read effective Patroni config if set
optionsToSet := make(map[string]string) if patroniConfig != nil {
pgOptions := c.Spec.Parameters effectivePostgresql := patroniConfig["postgresql"].(map[string]interface{})
effectivePgParameters = effectivePostgresql[patroniPGParametersParameterName].(map[string]interface{})
}
for k, v := range pgOptions { // compare parameters under postgresql section with c.Spec.Postgresql.Parameters from manifest
if isBootstrapOnlyParameter(k) { desiredPgParameters := c.Spec.Parameters
optionsToSet[k] = v for desiredOption, desiredValue := range desiredPgParameters {
effectiveValue := effectivePgParameters[desiredOption]
if isBootstrapOnlyParameter(desiredOption) && (effectiveValue != desiredValue) {
parametersToSet[desiredOption] = desiredValue
} }
} }
if len(optionsToSet) == 0 { if len(parametersToSet) > 0 {
return restartRequired, nil configToSet["postgresql"] = map[string]interface{}{patroniPGParametersParameterName: parametersToSet}
} }
if pods, err = c.listPods(); err != nil { // compare other options from config with c.Spec.Patroni from manifest
return restartRequired, err desiredPatroniConfig := c.Spec.Patroni
if desiredPatroniConfig.LoopWait > 0 && desiredPatroniConfig.LoopWait != uint32(patroniConfig["loop_wait"].(float64)) {
configToSet["loop_wait"] = desiredPatroniConfig.LoopWait
} }
if len(pods) == 0 { if desiredPatroniConfig.MaximumLagOnFailover > 0 && desiredPatroniConfig.MaximumLagOnFailover != float32(patroniConfig["maximum_lag_on_failover"].(float64)) {
return restartRequired, fmt.Errorf("could not call Patroni API: cluster has no pods") configToSet["maximum_lag_on_failover"] = desiredPatroniConfig.MaximumLagOnFailover
} }
if desiredPatroniConfig.PgHba != nil && !reflect.DeepEqual(desiredPatroniConfig.PgHba, (patroniConfig["pg_hba"])) {
configToSet["pg_hba"] = desiredPatroniConfig.PgHba
}
if desiredPatroniConfig.RetryTimeout > 0 && desiredPatroniConfig.RetryTimeout != uint32(patroniConfig["retry_timeout"].(float64)) {
configToSet["retry_timeout"] = desiredPatroniConfig.RetryTimeout
}
if desiredPatroniConfig.Slots != nil && !reflect.DeepEqual(desiredPatroniConfig.Slots, patroniConfig["slots"]) {
configToSet["slots"] = desiredPatroniConfig.Slots
}
if desiredPatroniConfig.SynchronousMode != patroniConfig["synchronous_mode"] {
configToSet["synchronous_mode"] = desiredPatroniConfig.SynchronousMode
}
if desiredPatroniConfig.SynchronousModeStrict != patroniConfig["synchronous_mode_strict"] {
configToSet["synchronous_mode_strict"] = desiredPatroniConfig.SynchronousModeStrict
}
if desiredPatroniConfig.TTL > 0 && desiredPatroniConfig.TTL != uint32(patroniConfig["ttl"].(float64)) {
configToSet["ttl"] = desiredPatroniConfig.TTL
}
if len(configToSet) == 0 {
return false, nil
}
configToSetJson, err := json.Marshal(configToSet)
if err != nil {
c.logger.Debugf("could not convert config patch to JSON: %v", err)
}
// try all pods until the first one that is successful, as it doesn't matter which pod // try all pods until the first one that is successful, as it doesn't matter which pod
// carries the request to change configuration through // carries the request to change configuration through
for _, pod := range pods { podName := util.NameFromMeta(pod.ObjectMeta)
podName := util.NameFromMeta(pod.ObjectMeta) c.logger.Debugf("patching Postgres config via Patroni API on pod %s with following options: %s",
c.logger.Debugf("calling Patroni API on a pod %s to set the following Postgres options: %v", podName, configToSetJson)
podName, optionsToSet) if err = c.patroni.SetConfig(pod, configToSet); err != nil {
if err = c.patroni.SetPostgresParameters(&pod, optionsToSet); err == nil { return true, fmt.Errorf("could not patch postgres parameters with a pod %s: %v", podName, err)
restartRequired = true
return restartRequired, nil
}
c.logger.Warningf("could not patch postgres parameters with a pod %s: %v", podName, err)
} }
return restartRequired, fmt.Errorf("could not reach Patroni API to set Postgres options: failed on every pod (%d total)",
len(pods)) return true, nil
} }
func (c *Cluster) syncSecrets() error { func (c *Cluster) syncSecrets() error {
@ -622,11 +651,6 @@ func (c *Cluster) syncRoles() (err error) {
// create list of database roles to query // create list of database roles to query
for _, u := range c.pgUsers { for _, u := range c.pgUsers {
pgRole := u.Name pgRole := u.Name
if u.Namespace != c.Namespace && u.Namespace != "" {
// to avoid the conflict of having multiple users of same name
// but each in different namespace.
pgRole = fmt.Sprintf("%s.%s", u.Name, u.Namespace)
}
userNames = append(userNames, pgRole) userNames = append(userNames, pgRole)
// add team member role name with rename suffix in case we need to rename it back // add team member role name with rename suffix in case we need to rename it back
if u.Origin == spec.RoleOriginTeamsAPI && c.OpConfig.EnableTeamMemberDeprecation { if u.Origin == spec.RoleOriginTeamsAPI && c.OpConfig.EnableTeamMemberDeprecation {

View File

@ -96,13 +96,13 @@ func (c *Cluster) syncUnderlyingEBSVolume() error {
var modifySize *int64 var modifySize *int64
var modifyType *string var modifyType *string
if targetValue.Iops != nil { if targetValue.Iops != nil && *targetValue.Iops >= int64(3000) {
if volume.Iops != *targetValue.Iops { if volume.Iops != *targetValue.Iops {
modifyIops = targetValue.Iops modifyIops = targetValue.Iops
} }
} }
if targetValue.Throughput != nil { if targetValue.Throughput != nil && *targetValue.Throughput >= int64(125) {
if volume.Throughput != *targetValue.Throughput { if volume.Throughput != *targetValue.Throughput {
modifyThroughput = targetValue.Throughput modifyThroughput = targetValue.Throughput
} }

View File

@ -82,6 +82,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.EnableSidecars = util.CoalesceBool(fromCRD.Kubernetes.EnableSidecars, util.True()) result.EnableSidecars = util.CoalesceBool(fromCRD.Kubernetes.EnableSidecars, util.True())
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate
result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName
result.EnableCrossNamespaceSecret = fromCRD.Kubernetes.EnableCrossNamespaceSecret
result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName
if fromCRD.Kubernetes.InfrastructureRolesDefs != nil { if fromCRD.Kubernetes.InfrastructureRolesDefs != nil {

View File

@ -207,6 +207,7 @@ type Config struct {
PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""` PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""`
SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"` SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"`
EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"` EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"`
EnableCrossNamespaceSecret bool `name:"enable_cross_namespace_secret" default:"false"`
EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"true"` EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"true"`
EnableSpiloWalPathCompat bool `name:"enable_spilo_wal_path_compat" default:"false"` EnableSpiloWalPathCompat bool `name:"enable_spilo_wal_path_compat" default:"false"`
MajorVersionUpgradeMode string `name:"major_version_upgrade_mode" default:"off"` MajorVersionUpgradeMode string `name:"major_version_upgrade_mode" default:"off"`

View File

@ -32,6 +32,7 @@ type Interface interface {
GetMemberData(server *v1.Pod) (MemberData, error) GetMemberData(server *v1.Pod) (MemberData, error)
Restart(server *v1.Pod) error Restart(server *v1.Pod) error
GetConfig(server *v1.Pod) (map[string]interface{}, error) GetConfig(server *v1.Pod) (map[string]interface{}, error)
SetConfig(server *v1.Pod, config map[string]interface{}) error
} }
// Patroni API client // Patroni API client
@ -163,6 +164,20 @@ func (p *Patroni) SetPostgresParameters(server *v1.Pod, parameters map[string]st
return p.httpPostOrPatch(http.MethodPatch, apiURLString+configPath, buf) return p.httpPostOrPatch(http.MethodPatch, apiURLString+configPath, buf)
} }
//SetConfig sets Patroni options via Patroni patch API call.
func (p *Patroni) SetConfig(server *v1.Pod, config map[string]interface{}) error {
buf := &bytes.Buffer{}
err := json.NewEncoder(buf).Encode(config)
if err != nil {
return fmt.Errorf("could not encode json: %v", err)
}
apiURLString, err := apiURL(server)
if err != nil {
return err
}
return p.httpPostOrPatch(http.MethodPatch, apiURLString+configPath, buf)
}
// MemberDataPatroni child element // MemberDataPatroni child element
type MemberDataPatroni struct { type MemberDataPatroni struct {
Version string `json:"version"` Version string `json:"version"`

View File

@ -1,4 +1,4 @@
apiVersion: "networking.k8s.io/v1beta1" apiVersion: "networking.k8s.io/v1"
kind: "Ingress" kind: "Ingress"
metadata: metadata:
name: "postgres-operator-ui" name: "postgres-operator-ui"
@ -10,6 +10,10 @@ spec:
- host: "ui.example.org" - host: "ui.example.org"
http: http:
paths: paths:
- backend: - path: /
serviceName: "postgres-operator-ui" pathType: ImplementationSpecific
servicePort: 80 backend:
service:
name: "postgres-operator-ui"
port:
number: 80