merge with master
This commit is contained in:
commit
99aa89643b
|
|
@ -7,6 +7,7 @@ metadata:
|
|||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
name: {{ template "postgres-operator-ui.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,10 @@
|
|||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "postgres-operator-ui.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
|
||||
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
|
|
@ -9,6 +12,7 @@ apiVersion: extensions/v1beta1
|
|||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
|
||||
|
|
@ -36,9 +40,18 @@ spec:
|
|||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ . }}
|
||||
{{ if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion -}}
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
{{- else -}}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ metadata:
|
|||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
name: {{ template "postgres-operator-ui.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ apiVersion: v1
|
|||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
|
||||
|
|
|
|||
|
|
@ -173,6 +173,9 @@ spec:
|
|||
enable_init_containers:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_cross_namespace_secret:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_pod_antiaffinity:
|
||||
type: boolean
|
||||
default: false
|
||||
|
|
|
|||
|
|
@ -223,6 +223,97 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$'
|
||||
nodeAffinity:
|
||||
type: object
|
||||
properties:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- weight
|
||||
- preference
|
||||
properties:
|
||||
preference:
|
||||
type: object
|
||||
properties:
|
||||
matchExpressions:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
matchFields:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
weight:
|
||||
format: int32
|
||||
type: integer
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
type: object
|
||||
required:
|
||||
- nodeSelectorTerms
|
||||
properties:
|
||||
nodeSelectorTerms:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
matchExpressions:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
matchFields:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
numberOfInstances:
|
||||
type: integer
|
||||
minimum: 0
|
||||
|
|
@ -396,97 +487,6 @@ spec:
|
|||
type: string
|
||||
caSecretName:
|
||||
type: string
|
||||
nodeAffinity:
|
||||
type: object
|
||||
properties:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- weight
|
||||
- preference
|
||||
properties:
|
||||
preference:
|
||||
type: object
|
||||
properties:
|
||||
matchExpressions:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
matchFields:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
weight:
|
||||
format: int32
|
||||
type: integer
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
type: object
|
||||
required:
|
||||
- nodeSelectorTerms
|
||||
properties:
|
||||
nodeSelectorTerms:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
matchExpressions:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
matchFields:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
tolerations:
|
||||
type: array
|
||||
items:
|
||||
|
|
@ -515,8 +515,6 @@ spec:
|
|||
type: integer
|
||||
useLoadBalancer: # deprecated
|
||||
type: boolean
|
||||
enableNamespacedSecret:
|
||||
type: boolean
|
||||
users:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ apiVersion: v1
|
|||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "postgres-operator.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ metadata:
|
|||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
name: {{ template "postgres-operator.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ apiVersion: "acid.zalan.do/v1"
|
|||
kind: OperatorConfiguration
|
||||
metadata:
|
||||
name: {{ template "postgres-operator.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ metadata:
|
|||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
name: {{ .Values.podPriorityClassName }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
preemptionPolicy: PreemptLowerPriority
|
||||
globalDefault: false
|
||||
value: 1000000
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ metadata:
|
|||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
name: {{ template "postgres-operator.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ apiVersion: v1
|
|||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "postgres-operator.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||
|
|
|
|||
|
|
@ -97,6 +97,8 @@ configKubernetes:
|
|||
# - deployment-time
|
||||
# - downscaler/*
|
||||
|
||||
# allow user secrets in other namespaces than the Postgres cluster
|
||||
enable_cross_namespace_secret: false
|
||||
# enables initContainers to run actions before Spilo is started
|
||||
enable_init_containers: true
|
||||
# toggles pod anti affinity on the Postgres pods
|
||||
|
|
@ -151,7 +153,7 @@ configKubernetes:
|
|||
# template for database user secrets generated by the operator,
|
||||
# here username contains the namespace in the format namespace.username
|
||||
# if the user is in different namespace than cluster and cross namespace secrets
|
||||
# are enabled via EnableNamespacedSecret flag.
|
||||
# are enabled via `enable_cross_namespace_secret` flag in the configuration.
|
||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
# set user and group for the spilo container (required to run Spilo as non-root process)
|
||||
# spilo_runasuser: 101
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ solutions:
|
|||
* [kind](https://kind.sigs.k8s.io/) and [k3d](https://k3d.io), which allows creating multi-nodes K8s
|
||||
clusters running on Docker (requires Docker)
|
||||
|
||||
To interact with the K8s infrastructure install it's CLI runtime [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-via-curl).
|
||||
To interact with the K8s infrastructure install its CLI runtime [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-via-curl).
|
||||
|
||||
This quickstart assumes that you have started minikube or created a local kind
|
||||
cluster. Note that you can also use built-in K8s support in the Docker Desktop
|
||||
|
|
@ -85,6 +85,8 @@ The chart works with both Helm 2 and Helm 3. The `crd-install` hook from v2 will
|
|||
be skipped with warning when using v3. Documentation for installing applications
|
||||
with Helm 2 can be found in the [v2 docs](https://v2.helm.sh/docs/).
|
||||
|
||||
The chart is also hosted at: https://opensource.zalando.com/postgres-operator/charts/postgres-operator/
|
||||
|
||||
## Check if Postgres Operator is running
|
||||
|
||||
Starting the operator may take a few seconds. Check if the operator pod is
|
||||
|
|
|
|||
|
|
@ -264,6 +264,13 @@ configuration they are grouped under the `kubernetes` key.
|
|||
[admin docs](../administrator.md#pod-disruption-budget) for more information.
|
||||
Default is true.
|
||||
|
||||
* **enable_cross_namespace_secrets**
|
||||
To allow secrets in a different namespace other than the Postgres cluster
|
||||
namespace. Once enabled, specify the namespace in the user name under the
|
||||
`users` section in the form `{namespace}.{username}`. The operator will then
|
||||
create the user secret in that namespace. The part after the first `.` is
|
||||
considered to be the user name. The default is `false`.
|
||||
|
||||
* **enable_init_containers**
|
||||
global option to allow for creating init containers in the cluster manifest to
|
||||
run actions before Spilo is started. Default is true.
|
||||
|
|
@ -275,13 +282,12 @@ configuration they are grouped under the `kubernetes` key.
|
|||
|
||||
* **secret_name_template**
|
||||
a template for the name of the database user secrets generated by the
|
||||
operator. `{namespace}` is replaced with name of the namespace (if cross
|
||||
namespace secrets are enabled via EnableNamespacedSecret flag, otherwise the
|
||||
secret is in cluster's namespace and in that case it is not present in secret
|
||||
name), `{username}` is replaced with name of the secret, `{cluster}` with the
|
||||
name of the cluster, `{tprkind}` with the kind of CRD (formerly known as TPR)
|
||||
and `{tprgroup}` with the group of the CRD. No other placeholders are allowed.
|
||||
The default is
|
||||
operator. `{namespace}` is replaced with name of the namespace if
|
||||
`enable_cross_namespace_secret` is set, otherwise the
|
||||
secret is in cluster's namespace. `{username}` is replaced with name of the
|
||||
secret, `{cluster}` with the name of the cluster, `{tprkind}` with the kind
|
||||
of CRD (formerly known as TPR) and `{tprgroup}` with the group of the CRD.
|
||||
No other placeholders are allowed. The default is
|
||||
`{namespace}.{username}.{cluster}.credentials.{tprkind}.{tprgroup}`.
|
||||
|
||||
* **cluster_domain**
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ At the moment it is not possible to define membership of the manifest role in
|
|||
other roles.
|
||||
|
||||
To define the secrets for the users in a different namespace than that of the cluster,
|
||||
one can use the flag `EnableNamespacedSecret` and declare the namespace for the
|
||||
one can set `enable_cross_namespace_secret` and declare the namespace for the
|
||||
secrets in the manifest in the following manner,
|
||||
|
||||
```yaml
|
||||
|
|
|
|||
|
|
@ -156,6 +156,10 @@ class K8s:
|
|||
while not get_services():
|
||||
time.sleep(self.RETRY_TIMEOUT_SEC)
|
||||
|
||||
def count_pods_with_rolling_update_flag(self, labels, namespace='default'):
|
||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
return len(list(filter(lambda x: "zalando-postgres-operator-rolling-update-required" in x.metadata.annotations, pods)))
|
||||
|
||||
def count_pods_with_label(self, labels, namespace='default'):
|
||||
return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items)
|
||||
|
||||
|
|
@ -189,6 +193,7 @@ class K8s:
|
|||
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
||||
pod_phase = 'Failing over'
|
||||
new_pod_node = ''
|
||||
pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace)
|
||||
|
||||
while (pod_phase != 'Running') or (new_pod_node not in failover_targets):
|
||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
|
|
@ -196,6 +201,10 @@ class K8s:
|
|||
new_pod_node = pods[0].spec.node_name
|
||||
pod_phase = pods[0].status.phase
|
||||
time.sleep(self.RETRY_TIMEOUT_SEC)
|
||||
|
||||
while pods_with_update_flag != 0:
|
||||
pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace)
|
||||
time.sleep(self.RETRY_TIMEOUT_SEC)
|
||||
|
||||
def wait_for_namespace_creation(self, namespace='default'):
|
||||
ns_found = False
|
||||
|
|
@ -243,6 +252,13 @@ class K8s:
|
|||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
def patroni_rest(self, pod, path):
|
||||
r = self.exec_with_kubectl(pod, "curl localhost:8008/" + path)
|
||||
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "{":
|
||||
return None
|
||||
|
||||
return json.loads(r.stdout.decode())
|
||||
|
||||
def get_patroni_state(self, pod):
|
||||
r = self.exec_with_kubectl(pod, "patronictl list -f json")
|
||||
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[":
|
||||
|
|
@ -423,6 +439,10 @@ class K8sBase:
|
|||
while not get_services():
|
||||
time.sleep(self.RETRY_TIMEOUT_SEC)
|
||||
|
||||
def count_pods_with_rolling_update_flag(self, labels, namespace='default'):
|
||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
return len(list(filter(lambda x: "zalando-postgres-operator-rolling-update-required" in x.metadata.annotations, pods)))
|
||||
|
||||
def count_pods_with_label(self, labels, namespace='default'):
|
||||
return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items)
|
||||
|
||||
|
|
@ -456,6 +476,7 @@ class K8sBase:
|
|||
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
||||
pod_phase = 'Failing over'
|
||||
new_pod_node = ''
|
||||
pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace)
|
||||
|
||||
while (pod_phase != 'Running') or (new_pod_node not in failover_targets):
|
||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
|
|
@ -464,6 +485,10 @@ class K8sBase:
|
|||
pod_phase = pods[0].status.phase
|
||||
time.sleep(self.RETRY_TIMEOUT_SEC)
|
||||
|
||||
while pods_with_update_flag != 0:
|
||||
pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace)
|
||||
time.sleep(self.RETRY_TIMEOUT_SEC)
|
||||
|
||||
def get_logical_backup_job(self, namespace='default'):
|
||||
return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo")
|
||||
|
||||
|
|
@ -496,6 +521,13 @@ class K8sBase:
|
|||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
def patroni_rest(self, pod, path):
|
||||
r = self.exec_with_kubectl(pod, "curl localhost:8008/" + path)
|
||||
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "{":
|
||||
return None
|
||||
|
||||
return json.loads(r.stdout.decode())
|
||||
|
||||
def get_patroni_state(self, pod):
|
||||
r = self.exec_with_kubectl(pod, "patronictl list -f json")
|
||||
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[":
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -12,7 +12,6 @@ spec:
|
|||
dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.0-p7
|
||||
teamId: "acid"
|
||||
numberOfInstances: 2
|
||||
enableNamespacedSecret: False
|
||||
users: # Application/Robot users
|
||||
zalando:
|
||||
- superuser
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ data:
|
|||
# downscaler_annotations: "deployment-time,downscaler/*"
|
||||
# enable_admin_role_for_users: "true"
|
||||
# enable_crd_validation: "true"
|
||||
# enable_cross_namespace_secret: "false"
|
||||
# enable_database_access: "true"
|
||||
enable_ebs_gp3_migration: "false"
|
||||
# enable_ebs_gp3_migration_max_size: "1000"
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@ configuration:
|
|||
# downscaler_annotations:
|
||||
# - deployment-time
|
||||
# - downscaler/*
|
||||
# enable_cross_namespace_secret: "false"
|
||||
enable_init_containers: true
|
||||
enable_pod_antiaffinity: false
|
||||
enable_pod_disruption_budget: true
|
||||
|
|
|
|||
|
|
@ -219,6 +219,97 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$'
|
||||
nodeAffinity:
|
||||
type: object
|
||||
properties:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- weight
|
||||
- preference
|
||||
properties:
|
||||
preference:
|
||||
type: object
|
||||
properties:
|
||||
matchExpressions:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
matchFields:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
weight:
|
||||
format: int32
|
||||
type: integer
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
type: object
|
||||
required:
|
||||
- nodeSelectorTerms
|
||||
properties:
|
||||
nodeSelectorTerms:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
matchExpressions:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
matchFields:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
numberOfInstances:
|
||||
type: integer
|
||||
minimum: 0
|
||||
|
|
@ -392,97 +483,6 @@ spec:
|
|||
type: string
|
||||
caSecretName:
|
||||
type: string
|
||||
nodeAffinity:
|
||||
type: object
|
||||
properties:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- weight
|
||||
- preference
|
||||
properties:
|
||||
preference:
|
||||
type: object
|
||||
properties:
|
||||
matchExpressions:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
matchFields:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
weight:
|
||||
format: int32
|
||||
type: integer
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
type: object
|
||||
required:
|
||||
- nodeSelectorTerms
|
||||
properties:
|
||||
nodeSelectorTerms:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
matchExpressions:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
matchFields:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
tolerations:
|
||||
type: array
|
||||
items:
|
||||
|
|
|
|||
|
|
@ -341,6 +341,91 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
},
|
||||
"nodeAffinity": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
Required: []string{"preference", "weight"},
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"preference": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"matchExpressions": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||
Allows: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"matchFields": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||
Allows: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"weight": {
|
||||
Type: "integer",
|
||||
Format: "int32",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
Type: "object",
|
||||
Required: []string{"nodeSelectorTerms"},
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"nodeSelectorTerms": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"matchExpressions": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||
Allows: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"matchFields": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||
Allows: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"numberOfInstances": {
|
||||
Type: "integer",
|
||||
Minimum: &min0,
|
||||
|
|
@ -596,91 +681,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
},
|
||||
"nodeAffinity": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
Required: []string{"preference", "weight"},
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"preference": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"matchExpressions": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||
Allows: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"matchFields": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||
Allows: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"weight": {
|
||||
Type: "integer",
|
||||
Format: "int32",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
Type: "object",
|
||||
Required: []string{"nodeSelectorTerms"},
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"nodeSelectorTerms": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"matchExpressions": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||
Allows: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"matchFields": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||
Allows: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"tolerations": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
|
|
@ -730,9 +730,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
Type: "boolean",
|
||||
Description: "Deprecated",
|
||||
},
|
||||
"enableNamespacedSecret": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"users": {
|
||||
Type: "object",
|
||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||
|
|
@ -1029,6 +1026,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
},
|
||||
"enable_cross_namespace_secret": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"enable_init_containers": {
|
||||
Type: "boolean",
|
||||
},
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ func (ps *PostgresStatus) UnmarshalJSON(data []byte) error {
|
|||
if err != nil {
|
||||
metaErr := json.Unmarshal(data, &status)
|
||||
if metaErr != nil {
|
||||
return fmt.Errorf("Could not parse status: %v; err %v", string(data), metaErr)
|
||||
return fmt.Errorf("could not parse status: %v; err %v", string(data), metaErr)
|
||||
}
|
||||
tmp.PostgresClusterStatus = status
|
||||
}
|
||||
|
|
|
|||
|
|
@ -91,6 +91,7 @@ type KubernetesMetaConfiguration struct {
|
|||
EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"`
|
||||
PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"`
|
||||
PodManagementPolicy string `json:"pod_management_policy,omitempty"`
|
||||
EnableCrossNamespaceSecret bool `json:"enable_cross_namespace_secret,omitempty"`
|
||||
}
|
||||
|
||||
// PostgresPodResourcesDefaults defines the spec of default resources
|
||||
|
|
|
|||
|
|
@ -53,28 +53,27 @@ type PostgresSpec struct {
|
|||
// load balancers' source ranges are the same for master and replica services
|
||||
AllowedSourceRanges []string `json:"allowedSourceRanges"`
|
||||
|
||||
NumberOfInstances int32 `json:"numberOfInstances"`
|
||||
EnableNamespacedSecret *bool `json:"enableNamespacedSecret,omitempty"`
|
||||
Users map[string]UserFlags `json:"users,omitempty"`
|
||||
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
||||
Clone *CloneDescription `json:"clone,omitempty"`
|
||||
ClusterName string `json:"-"`
|
||||
Databases map[string]string `json:"databases,omitempty"`
|
||||
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
|
||||
SchedulerName *string `json:"schedulerName,omitempty"`
|
||||
NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"`
|
||||
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
|
||||
Sidecars []Sidecar `json:"sidecars,omitempty"`
|
||||
InitContainers []v1.Container `json:"initContainers,omitempty"`
|
||||
PodPriorityClassName string `json:"podPriorityClassName,omitempty"`
|
||||
ShmVolume *bool `json:"enableShmVolume,omitempty"`
|
||||
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
|
||||
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
|
||||
StandbyCluster *StandbyDescription `json:"standby,omitempty"`
|
||||
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
|
||||
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
|
||||
TLS *TLSDescription `json:"tls,omitempty"`
|
||||
AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"`
|
||||
NumberOfInstances int32 `json:"numberOfInstances"`
|
||||
Users map[string]UserFlags `json:"users,omitempty"`
|
||||
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
||||
Clone *CloneDescription `json:"clone,omitempty"`
|
||||
ClusterName string `json:"-"`
|
||||
Databases map[string]string `json:"databases,omitempty"`
|
||||
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
|
||||
SchedulerName *string `json:"schedulerName,omitempty"`
|
||||
NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"`
|
||||
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
|
||||
Sidecars []Sidecar `json:"sidecars,omitempty"`
|
||||
InitContainers []v1.Container `json:"initContainers,omitempty"`
|
||||
PodPriorityClassName string `json:"podPriorityClassName,omitempty"`
|
||||
ShmVolume *bool `json:"enableShmVolume,omitempty"`
|
||||
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
|
||||
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
|
||||
StandbyCluster *StandbyDescription `json:"standby,omitempty"`
|
||||
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
|
||||
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
|
||||
TLS *TLSDescription `json:"tls,omitempty"`
|
||||
AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"`
|
||||
|
||||
// deprecated json tags
|
||||
InitContainersOld []v1.Container `json:"init_containers,omitempty"`
|
||||
|
|
|
|||
|
|
@ -614,11 +614,6 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
|||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.EnableNamespacedSecret != nil {
|
||||
in, out := &in.EnableNamespacedSecret, &out.EnableNamespacedSecret
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.Users != nil {
|
||||
in, out := &in.Users, &out.Users
|
||||
*out = make(map[string]UserFlags, len(*in))
|
||||
|
|
|
|||
|
|
@ -1163,8 +1163,7 @@ func (c *Cluster) initRobotUsers() error {
|
|||
namespace := c.Namespace
|
||||
|
||||
//if namespaced secrets are allowed
|
||||
if c.Postgresql.Spec.EnableNamespacedSecret != nil &&
|
||||
*c.Postgresql.Spec.EnableNamespacedSecret {
|
||||
if c.Config.OpConfig.EnableCrossNamespaceSecret {
|
||||
if strings.Contains(username, ".") {
|
||||
splits := strings.Split(username, ".")
|
||||
namespace = splits[0]
|
||||
|
|
|
|||
|
|
@ -1024,7 +1024,6 @@ func TestCrossNamespacedSecrets(t *testing.T) {
|
|||
Volume: acidv1.Volume{
|
||||
Size: "1Gi",
|
||||
},
|
||||
EnableNamespacedSecret: boolToPointer(true),
|
||||
Users: map[string]acidv1.UserFlags{
|
||||
"appspace.db_user": {},
|
||||
"db_user": {},
|
||||
|
|
@ -1052,6 +1051,7 @@ func TestCrossNamespacedSecrets(t *testing.T) {
|
|||
DefaultMemoryLimit: "300Mi",
|
||||
PodRoleLabel: "spilo-role",
|
||||
},
|
||||
EnableCrossNamespaceSecret: true,
|
||||
},
|
||||
}, client, pg, logger, eventRecorder)
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,9 @@ package cluster
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -261,14 +263,18 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
|
|||
}
|
||||
|
||||
func (c *Cluster) syncStatefulSet() error {
|
||||
var instancesRestartRequired bool
|
||||
var (
|
||||
masterPod *v1.Pod
|
||||
postgresConfig map[string]interface{}
|
||||
instanceRestartRequired bool
|
||||
)
|
||||
|
||||
podsToRecreate := make([]v1.Pod, 0)
|
||||
switchoverCandidates := make([]spec.NamespacedName, 0)
|
||||
|
||||
pods, err := c.listPods()
|
||||
if err != nil {
|
||||
c.logger.Infof("could not list pods of the statefulset: %v", err)
|
||||
c.logger.Warnf("could not list pods of the statefulset: %v", err)
|
||||
}
|
||||
|
||||
// NB: Be careful to consider the codepath that acts on podsRollingUpdateRequired before returning early.
|
||||
|
|
@ -381,20 +387,50 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
// Apply special PostgreSQL parameters that can only be set via the Patroni API.
|
||||
// it is important to do it after the statefulset pods are there, but before the rolling update
|
||||
// since those parameters require PostgreSQL restart.
|
||||
instancesRestartRequired, err = c.checkAndSetGlobalPostgreSQLConfiguration()
|
||||
pods, err = c.listPods()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not set cluster-wide PostgreSQL configuration options: %v", err)
|
||||
c.logger.Warnf("could not get list of pods to apply special PostgreSQL parameters only to be set via Patroni API: %v", err)
|
||||
}
|
||||
|
||||
if instancesRestartRequired {
|
||||
c.logger.Debugln("restarting Postgres server within pods")
|
||||
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "restarting Postgres server within pods")
|
||||
if err := c.restartInstances(); err != nil {
|
||||
c.logger.Warningf("could not restart Postgres server within pods: %v", err)
|
||||
// get Postgres config, compare with manifest and update via Patroni PATCH endpoint if it differs
|
||||
// Patroni's config endpoint is just a "proxy" to DCS. It is enough to patch it only once and it doesn't matter which pod is used.
|
||||
for i, pod := range pods {
|
||||
podName := util.NameFromMeta(pods[i].ObjectMeta)
|
||||
config, err := c.patroni.GetConfig(&pod)
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not get Postgres config from pod %s: %v", podName, err)
|
||||
continue
|
||||
}
|
||||
c.logger.Infof("Postgres server successfuly restarted on all pods")
|
||||
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Postgres server restart done - all instances have been restarted")
|
||||
instanceRestartRequired, err = c.checkAndSetGlobalPostgreSQLConfiguration(&pod, config)
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not set PostgreSQL configuration options for pod %s: %v", podName, err)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// if the config update requires a restart, call Patroni restart for replicas first, then master
|
||||
if instanceRestartRequired {
|
||||
c.logger.Debug("restarting Postgres server within pods")
|
||||
ttl, ok := postgresConfig["ttl"].(int32)
|
||||
if !ok {
|
||||
ttl = 30
|
||||
}
|
||||
for i, pod := range pods {
|
||||
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
|
||||
if role == Master {
|
||||
masterPod = &pods[i]
|
||||
continue
|
||||
}
|
||||
c.restartInstance(&pod)
|
||||
time.Sleep(time.Duration(ttl) * time.Second)
|
||||
}
|
||||
|
||||
if masterPod != nil {
|
||||
c.restartInstance(masterPod)
|
||||
}
|
||||
}
|
||||
|
||||
// if we get here we also need to re-create the pods (either leftovers from the old
|
||||
// statefulset or those that got their configuration from the outdated statefulset)
|
||||
if len(podsToRecreate) > 0 {
|
||||
|
|
@ -408,55 +444,19 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) restartInstances() error {
|
||||
c.setProcessName("starting to restart Postgres servers")
|
||||
ls := c.labelsSet(false)
|
||||
namespace := c.Namespace
|
||||
func (c *Cluster) restartInstance(pod *v1.Pod) {
|
||||
podName := util.NameFromMeta(pod.ObjectMeta)
|
||||
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
|
||||
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: ls.String(),
|
||||
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", fmt.Sprintf("restarting Postgres server within %s pod %s", role, pod.Name))
|
||||
|
||||
if err := c.patroni.Restart(pod); err != nil {
|
||||
c.logger.Warningf("could not restart Postgres server within %s pod %s: %v", role, podName, err)
|
||||
return
|
||||
}
|
||||
|
||||
pods, err := c.KubeClient.Pods(namespace).List(context.TODO(), listOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get the list of pods: %v", err)
|
||||
}
|
||||
c.logger.Infof("there are %d pods in the cluster which resquire Postgres server restart", len(pods.Items))
|
||||
|
||||
var (
|
||||
masterPod *v1.Pod
|
||||
)
|
||||
for i, pod := range pods.Items {
|
||||
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
|
||||
|
||||
if role == Master {
|
||||
masterPod = &pods.Items[i]
|
||||
continue
|
||||
}
|
||||
|
||||
podName := util.NameFromMeta(pods.Items[i].ObjectMeta)
|
||||
config, err := c.patroni.GetConfig(&pod)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get config for pod %s: %v", podName, err)
|
||||
}
|
||||
ttl, ok := config["ttl"].(int32)
|
||||
if !ok {
|
||||
ttl = 30
|
||||
}
|
||||
if err = c.patroni.Restart(&pod); err != nil {
|
||||
return fmt.Errorf("could not restart Postgres server on pod %s: %v", podName, err)
|
||||
}
|
||||
time.Sleep(time.Duration(ttl) * time.Second)
|
||||
}
|
||||
|
||||
if masterPod != nil {
|
||||
podName := util.NameFromMeta(masterPod.ObjectMeta)
|
||||
if err = c.patroni.Restart(masterPod); err != nil {
|
||||
return fmt.Errorf("could not restart postgres server on masterPod %s: %v", podName, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
c.logger.Debugf("Postgres server successfuly restarted in %s pod %s", role, podName)
|
||||
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", fmt.Sprintf("Postgres server restart done for %s pod %s", role, pod.Name))
|
||||
}
|
||||
|
||||
// AnnotationsToPropagate get the annotations to update if required
|
||||
|
|
@ -492,48 +492,77 @@ func (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[stri
|
|||
}
|
||||
|
||||
// checkAndSetGlobalPostgreSQLConfiguration checks whether cluster-wide API parameters
|
||||
// (like max_connections) has changed and if necessary sets it via the Patroni API
|
||||
func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration() (bool, error) {
|
||||
var (
|
||||
err error
|
||||
pods []v1.Pod
|
||||
restartRequired bool
|
||||
)
|
||||
// (like max_connections) have changed and if necessary sets it via the Patroni API
|
||||
func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, patroniConfig map[string]interface{}) (bool, error) {
|
||||
configToSet := make(map[string]interface{})
|
||||
parametersToSet := make(map[string]string)
|
||||
effectivePgParameters := make(map[string]interface{})
|
||||
|
||||
// we need to extract those options from the cluster manifest.
|
||||
optionsToSet := make(map[string]string)
|
||||
pgOptions := c.Spec.Parameters
|
||||
// read effective Patroni config if set
|
||||
if patroniConfig != nil {
|
||||
effectivePostgresql := patroniConfig["postgresql"].(map[string]interface{})
|
||||
effectivePgParameters = effectivePostgresql[patroniPGParametersParameterName].(map[string]interface{})
|
||||
}
|
||||
|
||||
for k, v := range pgOptions {
|
||||
if isBootstrapOnlyParameter(k) {
|
||||
optionsToSet[k] = v
|
||||
// compare parameters under postgresql section with c.Spec.Postgresql.Parameters from manifest
|
||||
desiredPgParameters := c.Spec.Parameters
|
||||
for desiredOption, desiredValue := range desiredPgParameters {
|
||||
effectiveValue := effectivePgParameters[desiredOption]
|
||||
if isBootstrapOnlyParameter(desiredOption) && (effectiveValue != desiredValue) {
|
||||
parametersToSet[desiredOption] = desiredValue
|
||||
}
|
||||
}
|
||||
|
||||
if len(optionsToSet) == 0 {
|
||||
return restartRequired, nil
|
||||
if len(parametersToSet) > 0 {
|
||||
configToSet["postgresql"] = map[string]interface{}{patroniPGParametersParameterName: parametersToSet}
|
||||
}
|
||||
|
||||
if pods, err = c.listPods(); err != nil {
|
||||
return restartRequired, err
|
||||
// compare other options from config with c.Spec.Patroni from manifest
|
||||
desiredPatroniConfig := c.Spec.Patroni
|
||||
if desiredPatroniConfig.LoopWait > 0 && desiredPatroniConfig.LoopWait != uint32(patroniConfig["loop_wait"].(float64)) {
|
||||
configToSet["loop_wait"] = desiredPatroniConfig.LoopWait
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
return restartRequired, fmt.Errorf("could not call Patroni API: cluster has no pods")
|
||||
if desiredPatroniConfig.MaximumLagOnFailover > 0 && desiredPatroniConfig.MaximumLagOnFailover != float32(patroniConfig["maximum_lag_on_failover"].(float64)) {
|
||||
configToSet["maximum_lag_on_failover"] = desiredPatroniConfig.MaximumLagOnFailover
|
||||
}
|
||||
if desiredPatroniConfig.PgHba != nil && !reflect.DeepEqual(desiredPatroniConfig.PgHba, (patroniConfig["pg_hba"])) {
|
||||
configToSet["pg_hba"] = desiredPatroniConfig.PgHba
|
||||
}
|
||||
if desiredPatroniConfig.RetryTimeout > 0 && desiredPatroniConfig.RetryTimeout != uint32(patroniConfig["retry_timeout"].(float64)) {
|
||||
configToSet["retry_timeout"] = desiredPatroniConfig.RetryTimeout
|
||||
}
|
||||
if desiredPatroniConfig.Slots != nil && !reflect.DeepEqual(desiredPatroniConfig.Slots, patroniConfig["slots"]) {
|
||||
configToSet["slots"] = desiredPatroniConfig.Slots
|
||||
}
|
||||
if desiredPatroniConfig.SynchronousMode != patroniConfig["synchronous_mode"] {
|
||||
configToSet["synchronous_mode"] = desiredPatroniConfig.SynchronousMode
|
||||
}
|
||||
if desiredPatroniConfig.SynchronousModeStrict != patroniConfig["synchronous_mode_strict"] {
|
||||
configToSet["synchronous_mode_strict"] = desiredPatroniConfig.SynchronousModeStrict
|
||||
}
|
||||
if desiredPatroniConfig.TTL > 0 && desiredPatroniConfig.TTL != uint32(patroniConfig["ttl"].(float64)) {
|
||||
configToSet["ttl"] = desiredPatroniConfig.TTL
|
||||
}
|
||||
|
||||
if len(configToSet) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
configToSetJson, err := json.Marshal(configToSet)
|
||||
if err != nil {
|
||||
c.logger.Debugf("could not convert config patch to JSON: %v", err)
|
||||
}
|
||||
|
||||
// try all pods until the first one that is successful, as it doesn't matter which pod
|
||||
// carries the request to change configuration through
|
||||
for _, pod := range pods {
|
||||
podName := util.NameFromMeta(pod.ObjectMeta)
|
||||
c.logger.Debugf("calling Patroni API on a pod %s to set the following Postgres options: %v",
|
||||
podName, optionsToSet)
|
||||
if err = c.patroni.SetPostgresParameters(&pod, optionsToSet); err == nil {
|
||||
restartRequired = true
|
||||
return restartRequired, nil
|
||||
}
|
||||
c.logger.Warningf("could not patch postgres parameters with a pod %s: %v", podName, err)
|
||||
podName := util.NameFromMeta(pod.ObjectMeta)
|
||||
c.logger.Debugf("patching Postgres config via Patroni API on pod %s with following options: %s",
|
||||
podName, configToSetJson)
|
||||
if err = c.patroni.SetConfig(pod, configToSet); err != nil {
|
||||
return true, fmt.Errorf("could not patch postgres parameters with a pod %s: %v", podName, err)
|
||||
}
|
||||
return restartRequired, fmt.Errorf("could not reach Patroni API to set Postgres options: failed on every pod (%d total)",
|
||||
len(pods))
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) syncSecrets() error {
|
||||
|
|
@ -622,11 +651,6 @@ func (c *Cluster) syncRoles() (err error) {
|
|||
// create list of database roles to query
|
||||
for _, u := range c.pgUsers {
|
||||
pgRole := u.Name
|
||||
if u.Namespace != c.Namespace && u.Namespace != "" {
|
||||
// to avoid the conflict of having multiple users of same name
|
||||
// but each in different namespace.
|
||||
pgRole = fmt.Sprintf("%s.%s", u.Name, u.Namespace)
|
||||
}
|
||||
userNames = append(userNames, pgRole)
|
||||
// add team member role name with rename suffix in case we need to rename it back
|
||||
if u.Origin == spec.RoleOriginTeamsAPI && c.OpConfig.EnableTeamMemberDeprecation {
|
||||
|
|
|
|||
|
|
@ -96,13 +96,13 @@ func (c *Cluster) syncUnderlyingEBSVolume() error {
|
|||
var modifySize *int64
|
||||
var modifyType *string
|
||||
|
||||
if targetValue.Iops != nil {
|
||||
if targetValue.Iops != nil && *targetValue.Iops >= int64(3000) {
|
||||
if volume.Iops != *targetValue.Iops {
|
||||
modifyIops = targetValue.Iops
|
||||
}
|
||||
}
|
||||
|
||||
if targetValue.Throughput != nil {
|
||||
if targetValue.Throughput != nil && *targetValue.Throughput >= int64(125) {
|
||||
if volume.Throughput != *targetValue.Throughput {
|
||||
modifyThroughput = targetValue.Throughput
|
||||
}
|
||||
|
|
|
|||
|
|
@ -82,6 +82,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.EnableSidecars = util.CoalesceBool(fromCRD.Kubernetes.EnableSidecars, util.True())
|
||||
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate
|
||||
result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName
|
||||
result.EnableCrossNamespaceSecret = fromCRD.Kubernetes.EnableCrossNamespaceSecret
|
||||
|
||||
result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName
|
||||
if fromCRD.Kubernetes.InfrastructureRolesDefs != nil {
|
||||
|
|
|
|||
|
|
@ -207,6 +207,7 @@ type Config struct {
|
|||
PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""`
|
||||
SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"`
|
||||
EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"`
|
||||
EnableCrossNamespaceSecret bool `name:"enable_cross_namespace_secret" default:"false"`
|
||||
EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"true"`
|
||||
EnableSpiloWalPathCompat bool `name:"enable_spilo_wal_path_compat" default:"false"`
|
||||
MajorVersionUpgradeMode string `name:"major_version_upgrade_mode" default:"off"`
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ type Interface interface {
|
|||
GetMemberData(server *v1.Pod) (MemberData, error)
|
||||
Restart(server *v1.Pod) error
|
||||
GetConfig(server *v1.Pod) (map[string]interface{}, error)
|
||||
SetConfig(server *v1.Pod, config map[string]interface{}) error
|
||||
}
|
||||
|
||||
// Patroni API client
|
||||
|
|
@ -163,6 +164,20 @@ func (p *Patroni) SetPostgresParameters(server *v1.Pod, parameters map[string]st
|
|||
return p.httpPostOrPatch(http.MethodPatch, apiURLString+configPath, buf)
|
||||
}
|
||||
|
||||
//SetConfig sets Patroni options via Patroni patch API call.
|
||||
func (p *Patroni) SetConfig(server *v1.Pod, config map[string]interface{}) error {
|
||||
buf := &bytes.Buffer{}
|
||||
err := json.NewEncoder(buf).Encode(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not encode json: %v", err)
|
||||
}
|
||||
apiURLString, err := apiURL(server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return p.httpPostOrPatch(http.MethodPatch, apiURLString+configPath, buf)
|
||||
}
|
||||
|
||||
// MemberDataPatroni child element
|
||||
type MemberDataPatroni struct {
|
||||
Version string `json:"version"`
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: "networking.k8s.io/v1beta1"
|
||||
apiVersion: "networking.k8s.io/v1"
|
||||
kind: "Ingress"
|
||||
metadata:
|
||||
name: "postgres-operator-ui"
|
||||
|
|
@ -10,6 +10,10 @@ spec:
|
|||
- host: "ui.example.org"
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: "postgres-operator-ui"
|
||||
servicePort: 80
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
service:
|
||||
name: "postgres-operator-ui"
|
||||
port:
|
||||
number: 80
|
||||
|
|
|
|||
Loading…
Reference in New Issue