merge with master and resolve conflicts

This commit is contained in:
Felix Kunde 2020-03-02 14:15:43 +01:00
commit 6102278f19
101 changed files with 2740 additions and 889 deletions

View File

@ -1,6 +1,6 @@
The MIT License (MIT) The MIT License (MIT)
Copyright (c) 2019 Zalando SE Copyright (c) 2020 Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,21 @@
apiVersion: v1
name: postgres-operator-ui
version: 1.4.0
appVersion: 1.4.0
home: https://github.com/zalando/postgres-operator
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
keywords:
- postgres
- operator
- ui
- cloud-native
- patroni
- spilo
maintainers:
- name: Zalando
email: opensource@zalando.de
- name: siku4
email: sk@sik-net.de
sources:
- https://github.com/zalando/postgres-operator
engine: gotpl

View File

@ -0,0 +1,29 @@
apiVersion: v1
entries:
postgres-operator-ui:
- apiVersion: v1
appVersion: 1.4.0
created: "2020-02-24T15:32:47.610967635+01:00"
description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: 00e0eff7056d56467cd5c975657fbb76c8d01accd25a4b7aca81bc42aeac961d
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- ui
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
- email: sk@sik-net.de
name: siku4
name: postgres-operator-ui
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-ui-1.4.0.tgz
version: 1.4.0
generated: "2020-02-24T15:32:47.610348278+01:00"

View File

@ -0,0 +1,3 @@
To verify that postgres-operator has started, run:
kubectl --namespace={{ .Release.Namespace }} get pods -l "app.kubernetes.io/name={{ template "postgres-operator-ui.name" . }}"

View File

@ -0,0 +1,39 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "postgres-operator-ui.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "postgres-operator-ui.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a service account name.
*/}}
{{- define "postgres-operator-ui.serviceAccountName" -}}
{{ default (include "postgres-operator-ui.fullname" .) .Values.serviceAccount.name }}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "postgres-operator-ui.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@ -0,0 +1,52 @@
{{ if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups:
- acid.zalan.do
resources:
- postgresqls
verbs:
- create
- delete
- get
- list
- patch
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- get
- list
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
{{ end }}

View File

@ -0,0 +1,19 @@
{{ if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{ end }}

View File

@ -0,0 +1,69 @@
apiVersion: "apps/v1"
kind: "Deployment"
metadata:
labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ template "postgres-operator-ui.fullname" . }}
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
team: "acid" # Parameterize?
spec:
serviceAccountName: {{ include "postgres-operator-ui.serviceAccountName" . }}
containers:
- name: "service"
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: 8081
protocol: "TCP"
readinessProbe:
httpGet:
path: "/health"
port: 8081
initialDelaySeconds: 5
timeoutSeconds: 1
resources:
{{- toYaml .Values.resources | nindent 12 }}
env:
- name: "APP_URL"
value: "http://localhost:8081"
- name: "OPERATOR_API_URL"
value: {{ .Values.envs.operatorApiUrl }}
- name: "TARGET_NAMESPACE"
value: {{ .Values.envs.targetNamespace }}
- name: "TEAMS"
value: |-
[
"acid"
]
- name: "OPERATOR_UI_CONFIG"
value: |-
{
"docs_link":"https://postgres-operator.readthedocs.io/en/latest/",
"dns_format_string": "{1}-{0}.{2}",
"databases_visible": true,
"master_load_balancer_visible": true,
"nat_gateways_visible": false,
"replica_load_balancer_visible": true,
"resources_visible": true,
"users_visible": true,
"postgresql_versions": [
"12",
"11",
"10",
"9.6",
"9.5"
]
}

View File

@ -0,0 +1,44 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "postgres-operator-ui.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ . }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ template "postgres-operator-ui.fullname" . }}
spec:
ports:
- port: {{ .Values.service.port }}
targetPort: 8081
protocol: TCP
selector:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
type: {{ .Values.service.type }}

View File

@ -0,0 +1,11 @@
{{ if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{ end }}

View File

@ -0,0 +1,58 @@
# Default values for postgres-operator-ui.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
# configure ui image
image:
registry: registry.opensource.zalan.do
repository: acid/postgres-operator-ui
tag: v1.4.0
pullPolicy: "IfNotPresent"
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# configure UI pod resources
resources:
limits:
cpu: 300m
memory: 3000Mi
requests:
cpu: 100m
memory: 100Mi
# configure UI ENVs
envs:
# IMPORTANT: While operator chart and UI chart are idendependent, this is the interface between
# UI and operator API. Insert the service name of the operator API here!
operatorApiUrl: "http://postgres-operator:8080"
targetNamespace: "default"
# configure UI service
service:
type: "ClusterIP"
port: "8080"
# configure UI ingress. If needed: "enabled: true"
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: ui.example.org
paths: [""]
tls: []
# - secretName: ui-tls
# hosts:
# - ui.exmaple.org

View File

@ -1,7 +1,7 @@
apiVersion: v1 apiVersion: v1
name: postgres-operator name: postgres-operator
version: 1.2.0 version: 1.4.0
appVersion: 1.2.0 appVersion: 1.4.0
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
keywords: keywords:
@ -13,8 +13,6 @@ keywords:
maintainers: maintainers:
- name: Zalando - name: Zalando
email: opensource@zalando.de email: opensource@zalando.de
- name: kimxogus
email: kgyoo8232@gmail.com
sources: sources:
- https://github.com/zalando/postgres-operator - https://github.com/zalando/postgres-operator
engine: gotpl engine: gotpl

View File

@ -121,6 +121,8 @@ spec:
type: array type: array
items: items:
type: string type: string
master_pod_move_timeout:
type: string
node_readiness_label: node_readiness_label:
type: object type: object
additionalProperties: additionalProperties:
@ -138,10 +140,16 @@ spec:
enum: enum:
- "ordered_ready" - "ordered_ready"
- "parallel" - "parallel"
pod_priority_class_name:
type: string
pod_role_label: pod_role_label:
type: string type: string
pod_service_account_definition:
type: string
pod_service_account_name: pod_service_account_name:
type: string type: string
pod_service_account_role_binding_definition:
type: string
pod_terminate_grace_period: pod_terminate_grace_period:
type: string type: string
secret_name_template: secret_name_template:
@ -171,6 +179,12 @@ spec:
default_memory_request: default_memory_request:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
min_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
min_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
timeouts: timeouts:
type: object type: object
properties: properties:
@ -189,16 +203,16 @@ spec:
load_balancer: load_balancer:
type: object type: object
properties: properties:
custom_service_annotations:
type: object
additionalProperties:
type: string
db_hosted_zone: db_hosted_zone:
type: string type: string
enable_master_load_balancer: enable_master_load_balancer:
type: boolean type: boolean
enable_replica_load_balancer: enable_replica_load_balancer:
type: boolean type: boolean
custom_service_annotations:
type: object
additionalProperties:
type: string
master_dns_name_format: master_dns_name_format:
type: string type: string
replica_dns_name_format: replica_dns_name_format:
@ -221,21 +235,23 @@ spec:
logical_backup: logical_backup:
type: object type: object
properties: properties:
logical_backup_schedule:
type: string
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
logical_backup_docker_image: logical_backup_docker_image:
type: string type: string
logical_backup_s3_access_key_id:
type: string
logical_backup_s3_bucket: logical_backup_s3_bucket:
type: string type: string
logical_backup_s3_endpoint: logical_backup_s3_endpoint:
type: string type: string
logical_backup_s3_sse: logical_backup_s3_region:
type: string
logical_backup_s3_access_key_id:
type: string type: string
logical_backup_s3_secret_access_key: logical_backup_s3_secret_access_key:
type: string type: string
logical_backup_s3_sse:
type: string
logical_backup_schedule:
type: string
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
debug: debug:
type: object type: object
properties: properties:

View File

@ -94,7 +94,7 @@ spec:
s3_secret_access_key: s3_secret_access_key:
type: string type: string
s3_force_path_style: s3_force_path_style:
type: string type: boolean
s3_wal_path: s3_wal_path:
type: string type: string
timestamp: timestamp:
@ -266,6 +266,10 @@ spec:
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
# Note: the value specified here must not be zero or be higher # Note: the value specified here must not be zero or be higher
# than the corresponding limit. # than the corresponding limit.
serviceAnnotations:
type: object
additionalProperties:
type: string
sidecars: sidecars:
type: array type: array
nullable: true nullable: true

View File

@ -1,13 +1,56 @@
apiVersion: v1 apiVersion: v1
entries: entries:
postgres-operator: postgres-operator:
- apiVersion: v1
appVersion: 1.4.0
created: "2020-02-20T17:39:25.443276193+01:00"
description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: b93ccde5581deb8ed0857136b8ce74ca3f1b7240438fa4415f705764a1300bed
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-1.4.0.tgz
version: 1.4.0
- apiVersion: v1
appVersion: 1.3.0
created: "2020-02-20T17:39:25.441532163+01:00"
description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: 7e788fd37daec76a01f6d6f9fe5be5b54f5035e4eba0041e80a760d656537325
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-1.3.0.tgz
version: 1.3.0
- apiVersion: v1 - apiVersion: v1
appVersion: 1.2.0 appVersion: 1.2.0
created: "2019-08-13T17:33:32.735021423+02:00" created: "2020-02-20T17:39:25.440278302+01:00"
description: Postgres Operator creates and manages PostgreSQL clusters running description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes in Kubernetes
digest: d10710c7cf19f4e266e7704f5d1e98dcfc61bee3919522326c35c22ca7d2f2bf digest: d10710c7cf19f4e266e7704f5d1e98dcfc61bee3919522326c35c22ca7d2f2bf
engine: gotpl
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
keywords: keywords:
- postgres - postgres
@ -26,4 +69,4 @@ entries:
urls: urls:
- postgres-operator-1.2.0.tgz - postgres-operator-1.2.0.tgz
version: 1.2.0 version: 1.2.0
generated: "2019-08-13T17:33:32.734335398+02:00" generated: "2020-02-20T17:39:25.439168098+01:00"

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,53 @@
{{ if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: postgres-pod
labels:
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
helm.sh/chart: {{ template "postgres-operator.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
# Patroni needs to watch and manage endpoints
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
# Patroni needs to watch pods
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- patch
- update
- watch
# to let Patroni create a headless service
- apiGroups:
- ""
resources:
- services
verbs:
- create
# to run privileged pods
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- privileged
verbs:
- use
{{ end }}

View File

@ -1,5 +1,5 @@
{{ if .Values.rbac.create }} {{ if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
name: {{ include "postgres-operator.serviceAccountName" . }} name: {{ include "postgres-operator.serviceAccountName" . }}
@ -9,6 +9,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
rules: rules:
# all verbs allowed for custom operator resources
- apiGroups: - apiGroups:
- acid.zalan.do - acid.zalan.do
resources: resources:
@ -16,7 +17,15 @@ rules:
- postgresqls/status - postgresqls/status
- operatorconfigurations - operatorconfigurations
verbs: verbs:
- "*" - create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
# to create or get/update CRDs when starting up
- apiGroups: - apiGroups:
- apiextensions.k8s.io - apiextensions.k8s.io
resources: resources:
@ -26,12 +35,14 @@ rules:
- get - get
- patch - patch
- update - update
# to read configuration from ConfigMaps
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
- configmaps - configmaps
verbs: verbs:
- get - get
# to manage endpoints which are also used by Patroni
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -43,16 +54,19 @@ rules:
- get - get
- list - list
- patch - patch
- watch # needed if zalando-postgres-operator account is used for pods as well - update
- watch
# to CRUD secrets for database access
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
- secrets - secrets
verbs: verbs:
- create - create
- update
- delete - delete
- get - get
- update
# to check nodes for node readiness label
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -61,6 +75,7 @@ rules:
- get - get
- list - list
- watch - watch
# to read or delete existing PVCs. Creation via StatefulSet
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -69,6 +84,7 @@ rules:
- delete - delete
- get - get
- list - list
# to read existing PVs. Creation should be done via dynamic provisioning
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -77,6 +93,7 @@ rules:
- get - get
- list - list
- update # only for resizing AWS volumes - update # only for resizing AWS volumes
# to watch Spilo pods and do rolling updates. Creation via StatefulSet
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -85,14 +102,17 @@ rules:
- delete - delete
- get - get
- list - list
- watch
- patch - patch
- update
- watch
# to resize the filesystem in Spilo pods when increasing volume size
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
- pods/exec - pods/exec
verbs: verbs:
- create - create
# to CRUD services to point to Postgres cluster instances
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -102,6 +122,8 @@ rules:
- delete - delete
- get - get
- patch - patch
- update
# to CRUD the StatefulSet which controls the Postgres cluster instances
- apiGroups: - apiGroups:
- apps - apps
resources: resources:
@ -112,12 +134,26 @@ rules:
- get - get
- list - list
- patch - patch
# to CRUD cron jobs for logical backups
- apiGroups:
- batch
resources:
- cronjobs
verbs:
- create
- delete
- get
- list
- patch
- update
# to get namespaces operator resources can run in
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
- namespaces - namespaces
verbs: verbs:
- get - get
# to define PDBs. Update happens via delete/create
- apiGroups: - apiGroups:
- policy - policy
resources: resources:
@ -126,6 +162,7 @@ rules:
- create - create
- delete - delete
- get - get
# to create ServiceAccounts in each namespace the operator watches
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -133,30 +170,21 @@ rules:
verbs: verbs:
- get - get
- create - create
# to create role bindings to the postgres-pod service account
- apiGroups: - apiGroups:
- "rbac.authorization.k8s.io" - rbac.authorization.k8s.io
resources: resources:
- rolebindings - rolebindings
verbs: verbs:
- get - get
- create - create
# to grant privilege to run privileged pods
- apiGroups: - apiGroups:
- "rbac.authorization.k8s.io" - extensions
resources: resources:
- clusterroles - podsecuritypolicies
verbs:
- bind
resourceNames: resourceNames:
- {{ include "postgres-operator.serviceAccountName" . }} - privileged
- apiGroups:
- batch
resources:
- cronjobs # enables logical backups
verbs: verbs:
- create - use
- delete
- get
- list
- patch
- update
{{ end }} {{ end }}

View File

@ -14,8 +14,6 @@ roleRef:
name: {{ include "postgres-operator.serviceAccountName" . }} name: {{ include "postgres-operator.serviceAccountName" . }}
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
# note: the cluster role binding needs to be defined
# for every namespace the operator service account lives in.
name: {{ include "postgres-operator.serviceAccountName" . }} name: {{ include "postgres-operator.serviceAccountName" . }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
{{ end }} {{ end }}

View File

@ -9,7 +9,6 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
data: data:
pod_service_account_name: {{ include "postgres-operator.serviceAccountName" . }}
{{ toYaml .Values.configGeneral | indent 2 }} {{ toYaml .Values.configGeneral | indent 2 }}
{{ toYaml .Values.configUsers | indent 2 }} {{ toYaml .Values.configUsers | indent 2 }}
{{ toYaml .Values.configKubernetes | indent 2 }} {{ toYaml .Values.configKubernetes | indent 2 }}

View File

@ -14,7 +14,6 @@ configuration:
{{ toYaml .Values.configUsers | indent 4 }} {{ toYaml .Values.configUsers | indent 4 }}
kubernetes: kubernetes:
oauth_token_secret_name: {{ template "postgres-operator.fullname" . }} oauth_token_secret_name: {{ template "postgres-operator.fullname" . }}
pod_service_account_name: {{ include "postgres-operator.serviceAccountName" . }}
{{ toYaml .Values.configKubernetes | indent 4 }} {{ toYaml .Values.configKubernetes | indent 4 }}
postgres_pod_resources: postgres_pod_resources:
{{ toYaml .Values.configPostgresPodResources | indent 4 }} {{ toYaml .Values.configPostgresPodResources | indent 4 }}

View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
helm.sh/chart: {{ template "postgres-operator.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ template "postgres-operator.fullname" . }}
spec:
type: ClusterIP
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}

View File

@ -1,7 +1,7 @@
image: image:
registry: registry.opensource.zalan.do registry: registry.opensource.zalan.do
repository: acid/postgres-operator repository: acid/postgres-operator
tag: v1.2.0 tag: v1.4.0
pullPolicy: "IfNotPresent" pullPolicy: "IfNotPresent"
# Optionally specify an array of imagePullSecrets. # Optionally specify an array of imagePullSecrets.
@ -24,7 +24,7 @@ configGeneral:
# etcd connection string for Patroni. Empty uses K8s-native DCS. # etcd connection string for Patroni. Empty uses K8s-native DCS.
etcd_host: "" etcd_host: ""
# Spilo docker image # Spilo docker image
docker_image: registry.opensource.zalan.do/acid/spilo-11:1.6-p1 docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
# max number of instances in Postgres cluster. -1 = no limit # max number of instances in Postgres cluster. -1 = no limit
min_instances: "-1" min_instances: "-1"
# min number of instances in Postgres cluster. -1 = no limit # min number of instances in Postgres cluster. -1 = no limit
@ -93,6 +93,14 @@ configKubernetes:
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
# label assigned to the Postgres pods (and services/endpoints) # label assigned to the Postgres pods (and services/endpoints)
pod_role_label: spilo-role pod_role_label: spilo-role
# service account definition as JSON/YAML string to be used by postgres cluster pods
# pod_service_account_definition: ""
# name of service account to be used by postgres cluster pods
pod_service_account_name: "postgres-pod"
# role binding definition as JSON/YAML string to be used by pod service account
# pod_service_account_role_binding_definition: ""
# Postgres pods are terminated forcefully after this timeout # Postgres pods are terminated forcefully after this timeout
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
# template for database user secrets generated by the operator # template for database user secrets generated by the operator
@ -108,13 +116,17 @@ configKubernetes:
# configure resource requests for the Postgres pods # configure resource requests for the Postgres pods
configPostgresPodResources: configPostgresPodResources:
# CPU limits for the postgres containers # CPU limits for the postgres containers
default_cpu_limit: "3" default_cpu_limit: "1"
# cpu request value for the postgres containers # CPU request value for the postgres containers
default_cpu_request: 100m default_cpu_request: 100m
# memory limits for the postgres containers # memory limits for the postgres containers
default_memory_limit: 1Gi default_memory_limit: 500Mi
# memory request value for the postgres containers # memory request value for the postgres containers
default_memory_request: 100Mi default_memory_request: 100Mi
# hard CPU minimum required to properly run a Postgres cluster
min_cpu_limit: 250m
# hard memory minimum required to properly run a Postgres cluster
min_memory_limit: 250Mi
# timeouts related to some operator actions # timeouts related to some operator actions
configTimeouts: configTimeouts:
@ -191,6 +203,8 @@ configLogicalBackup:
logical_backup_s3_access_key_id: "" logical_backup_s3_access_key_id: ""
# S3 bucket to store backup results # S3 bucket to store backup results
logical_backup_s3_bucket: "my-bucket-url" logical_backup_s3_bucket: "my-bucket-url"
# S3 region of bucket
logical_backup_s3_region: ""
# S3 endpoint url when not using AWS # S3 endpoint url when not using AWS
logical_backup_s3_endpoint: "" logical_backup_s3_endpoint: ""
# S3 Secret Access Key # S3 Secret Access Key
@ -248,13 +262,13 @@ serviceAccount:
priorityClassName: "" priorityClassName: ""
resources: {} resources:
# limits: limits:
# cpu: 100m cpu: 500m
# memory: 300Mi memory: 500Mi
# requests: requests:
# cpu: 100m cpu: 100m
# memory: 300Mi memory: 250Mi
# Affinity for pod assignment # Affinity for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity

View File

@ -1,7 +1,7 @@
image: image:
registry: registry.opensource.zalan.do registry: registry.opensource.zalan.do
repository: acid/postgres-operator repository: acid/postgres-operator
tag: v1.2.0 tag: v1.4.0
pullPolicy: "IfNotPresent" pullPolicy: "IfNotPresent"
# Optionally specify an array of imagePullSecrets. # Optionally specify an array of imagePullSecrets.
@ -24,7 +24,7 @@ configGeneral:
# etcd connection string for Patroni. Empty uses K8s-native DCS. # etcd connection string for Patroni. Empty uses K8s-native DCS.
etcd_host: "" etcd_host: ""
# Spilo docker image # Spilo docker image
docker_image: registry.opensource.zalan.do/acid/spilo-11:1.6-p1 docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
# max number of instances in Postgres cluster. -1 = no limit # max number of instances in Postgres cluster. -1 = no limit
min_instances: -1 min_instances: -1
# min number of instances in Postgres cluster. -1 = no limit # min number of instances in Postgres cluster. -1 = no limit
@ -100,6 +100,14 @@ configKubernetes:
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
# label assigned to the Postgres pods (and services/endpoints) # label assigned to the Postgres pods (and services/endpoints)
pod_role_label: spilo-role pod_role_label: spilo-role
# service account definition as JSON/YAML string to be used by postgres cluster pods
# pod_service_account_definition: ""
# name of service account to be used by postgres cluster pods
pod_service_account_name: "postgres-pod"
# role binding definition as JSON/YAML string to be used by pod service account
# pod_service_account_role_binding_definition: ""
# Postgres pods are terminated forcefully after this timeout # Postgres pods are terminated forcefully after this timeout
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
# template for database user secrets generated by the operator # template for database user secrets generated by the operator
@ -115,13 +123,17 @@ configKubernetes:
# configure resource requests for the Postgres pods # configure resource requests for the Postgres pods
configPostgresPodResources: configPostgresPodResources:
# CPU limits for the postgres containers # CPU limits for the postgres containers
default_cpu_limit: "3" default_cpu_limit: "1"
# cpu request value for the postgres containers # CPU request value for the postgres containers
default_cpu_request: 100m default_cpu_request: 100m
# memory limits for the postgres containers # memory limits for the postgres containers
default_memory_limit: 1Gi default_memory_limit: 500Mi
# memory request value for the postgres containers # memory request value for the postgres containers
default_memory_request: 100Mi default_memory_request: 100Mi
# hard CPU minimum required to properly run a Postgres cluster
min_cpu_limit: 250m
# hard memory minimum required to properly run a Postgres cluster
min_memory_limit: 250Mi
# timeouts related to some operator actions # timeouts related to some operator actions
configTimeouts: configTimeouts:
@ -200,6 +212,8 @@ configLogicalBackup:
logical_backup_s3_access_key_id: "" logical_backup_s3_access_key_id: ""
# S3 bucket to store backup results # S3 bucket to store backup results
logical_backup_s3_bucket: "my-bucket-url" logical_backup_s3_bucket: "my-bucket-url"
# S3 region of bucket
logical_backup_s3_region: ""
# S3 endpoint url when not using AWS # S3 endpoint url when not using AWS
logical_backup_s3_endpoint: "" logical_backup_s3_endpoint: ""
# S3 Secret Access Key # S3 Secret Access Key
@ -272,13 +286,13 @@ serviceAccount:
priorityClassName: "" priorityClassName: ""
resources: {} resources:
# limits: limits:
# cpu: 100m cpu: 500m
# memory: 300Mi memory: 500Mi
# requests: requests:
# cpu: 100m cpu: 100m
# memory: 300Mi memory: 250Mi
# Affinity for pod assignment # Affinity for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity

View File

@ -66,20 +66,13 @@ pipeline:
- desc: 'Build and push Docker image' - desc: 'Build and push Docker image'
cmd: | cmd: |
cd ui cd ui
image_base='registry-write.opensource.zalan.do/acid/postgres-operator-ui' IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"}
if [[ "${CDP_TARGET_BRANCH}" == 'master' && -z "${CDP_PULL_REQUEST_NUMBER}" ]] if [[ ${CDP_TARGET_BRANCH} == "master" && ${IS_PR_BUILD} != "true" ]]
then then
image="${image_base}" IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-ui
else else
image="${image_base}-test" IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-ui-test
fi fi
image_with_tag="${image}:c${CDP_BUILD_VERSION}" export IMAGE
make docker
if docker pull "${image}" make push
then
docker build --cache-from="${image}" -t "${image_with_tag}" .
else
docker build -t "${image_with_tag}" .
fi
docker push "${image_with_tag}"

View File

@ -19,6 +19,7 @@ RUN apt-get update \
&& curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ && curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
&& apt-get update \ && apt-get update \
&& apt-get install --no-install-recommends -y \ && apt-get install --no-install-recommends -y \
postgresql-client-12 \
postgresql-client-11 \ postgresql-client-11 \
postgresql-client-10 \ postgresql-client-10 \
postgresql-client-9.6 \ postgresql-client-9.6 \
@ -28,6 +29,6 @@ RUN apt-get update \
COPY dump.sh ./ COPY dump.sh ./
ENV PG_DIR=/usr/lib/postgresql/ ENV PG_DIR=/usr/lib/postgresql
ENTRYPOINT ["/dump.sh"] ENTRYPOINT ["/dump.sh"]

View File

@ -6,12 +6,10 @@ set -o nounset
set -o pipefail set -o pipefail
IFS=$'\n\t' IFS=$'\n\t'
# make script trace visible via `kubectl logs`
set -o xtrace
ALL_DB_SIZE_QUERY="select sum(pg_database_size(datname)::numeric) from pg_database;" ALL_DB_SIZE_QUERY="select sum(pg_database_size(datname)::numeric) from pg_database;"
PG_BIN=$PG_DIR/$PG_VERSION/bin PG_BIN=$PG_DIR/$PG_VERSION/bin
DUMP_SIZE_COEFF=5 DUMP_SIZE_COEFF=5
ERRORCOUNT=0
TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
K8S_API_URL=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/api/v1 K8S_API_URL=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/api/v1
@ -42,9 +40,10 @@ function aws_upload {
[[ ! -z "$EXPECTED_SIZE" ]] && args+=("--expected-size=$EXPECTED_SIZE") [[ ! -z "$EXPECTED_SIZE" ]] && args+=("--expected-size=$EXPECTED_SIZE")
[[ ! -z "$LOGICAL_BACKUP_S3_ENDPOINT" ]] && args+=("--endpoint-url=$LOGICAL_BACKUP_S3_ENDPOINT") [[ ! -z "$LOGICAL_BACKUP_S3_ENDPOINT" ]] && args+=("--endpoint-url=$LOGICAL_BACKUP_S3_ENDPOINT")
[[ ! "$LOGICAL_BACKUP_S3_SSE" == "" ]] && args+=("--sse=$LOGICAL_BACKUP_S3_SSE") [[ ! -z "$LOGICAL_BACKUP_S3_REGION" ]] && args+=("--region=$LOGICAL_BACKUP_S3_REGION")
[[ ! -z "$LOGICAL_BACKUP_S3_SSE" ]] && args+=("--sse=$LOGICAL_BACKUP_S3_SSE")
aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}" --debug aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}"
} }
function get_pods { function get_pods {
@ -93,4 +92,9 @@ for search in "${search_strategy[@]}"; do
done done
set -x
dump | compress | aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF)) dump | compress | aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF))
[[ ${PIPESTATUS[0]} != 0 || ${PIPESTATUS[1]} != 0 || ${PIPESTATUS[2]} != 0 ]] && (( ERRORCOUNT += 1 ))
set +x
exit $ERRORCOUNT

View File

@ -11,11 +11,11 @@ switchover (planned failover) of the master to the Pod with new minor version.
The switch should usually take less than 5 seconds, still clients have to The switch should usually take less than 5 seconds, still clients have to
reconnect. reconnect.
Major version upgrades are supported via [cloning](user.md#clone-directly). The Major version upgrades are supported via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster).
new cluster manifest must have a higher `version` string than the source cluster The new cluster manifest must have a higher `version` string than the source
and will be created from a basebackup. Depending of the cluster size, downtime cluster and will be created from a basebackup. Depending of the cluster size,
in this case can be significant as writes to the database should be stopped and downtime in this case can be significant as writes to the database should be
all WAL files should be archived first before cloning is started. stopped and all WAL files should be archived first before cloning is started.
Note, that simply changing the version string in the `postgresql` manifest does Note, that simply changing the version string in the `postgresql` manifest does
not work at present and leads to errors. Neither Patroni nor Postgres Operator not work at present and leads to errors. Neither Patroni nor Postgres Operator
@ -47,6 +47,12 @@ patching the CRD manifest:
zk8 patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}' zk8 patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}'
``` ```
## Non-default cluster domain
If your cluster uses a DNS domain other than the default `cluster.local`, this
needs to be set in the operator configuration (`cluster_domain` variable). This
is used by the operator to connect to the clusters after creation.
## Namespaces ## Namespaces
### Select the namespace to deploy to ### Select the namespace to deploy to
@ -89,36 +95,13 @@ lacks access rights to any of them (except K8s system namespaces like
'list pods' execute at the cluster scope and fail at the first violation of 'list pods' execute at the cluster scope and fail at the first violation of
access rights. access rights.
The watched namespace also needs to have a (possibly different) service account
in the case database pods need to talk to the K8s API (e.g. when using
K8s-native configuration of Patroni). The operator checks that the
`pod_service_account_name` exists in the target namespace, and, if not, deploys
there the `pod_service_account_definition` from the operator
[`Config`](../pkg/util/config/config.go) with the default value of:
```yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: operator
```
In this definition, the operator overwrites the account's name to match
`pod_service_account_name` and the `default` namespace to match the target
namespace. The operator performs **no** further syncing of this account.
## Non-default cluster domain
If your cluster uses a DNS domain other than the default `cluster.local`, this
needs to be set in the operator configuration (`cluster_domain` variable). This
is used by the operator to connect to the clusters after creation.
## Role-based access control for the operator ## Role-based access control for the operator
The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml) The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml)
defines the service account, cluster roles and bindings needed for the operator defines the service account, cluster roles and bindings needed for the operator
to function under access control restrictions. To deploy the operator with this to function under access control restrictions. The file also includes a cluster
RBAC policy use: role `postgres-pod` with privileges for Patroni to watch and manage pods and
endpoints. To deploy the operator with this RBAC policies use:
```bash ```bash
kubectl create -f manifests/operatorconfiguration.crd.yaml kubectl create -f manifests/operatorconfiguration.crd.yaml
@ -128,14 +111,14 @@ kubectl create -f manifests/postgres-operator.yaml
kubectl create -f manifests/minimal-postgres-manifest.yaml kubectl create -f manifests/minimal-postgres-manifest.yaml
``` ```
### Service account and cluster roles ### Namespaced service account and role binding
Note that the service account is named `zalando-postgres-operator`. You may have For each namespace the operator watches it creates (or reads) a service account
to change the `service_account_name` in the operator ConfigMap and and role binding to be used by the Postgres Pods. The service account is bound
`serviceAccountName` in the `postgres-operator` deployment appropriately. This to the `postgres-pod` cluster role. The name and definitions of these resources
is done intentionally to avoid breaking those setups that already work with the can be [configured](reference/operator_parameters.md#kubernetes-resources).
default `operator` account. In the future the operator should ideally be run Note, that the operator performs **no** further syncing of namespaced service
under the `zalando-postgres-operator` service account. accounts and role bindings.
### Give K8s users access to create/list `postgresqls` ### Give K8s users access to create/list `postgresqls`
@ -377,6 +360,17 @@ cluster manifest. In the case any of these variables are omitted from the
manifest, the operator configuration settings `enable_master_load_balancer` and manifest, the operator configuration settings `enable_master_load_balancer` and
`enable_replica_load_balancer` apply. Note that the operator settings affect `enable_replica_load_balancer` apply. Note that the operator settings affect
all Postgresql services running in all namespaces watched by the operator. all Postgresql services running in all namespaces watched by the operator.
If load balancing is enabled two default annotations will be applied to its
services:
- `external-dns.alpha.kubernetes.io/hostname` with the value defined by the
operator configs `master_dns_name_format` and `replica_dns_name_format`.
This value can't be overwritten. If any changing in its value is needed, it
MUST be done changing the DNS format operator config parameters; and
- `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` with
a default value of "3600". This value can be overwritten with the operator
config parameter `custom_service_annotations` or the cluster parameter
`serviceAnnotations`.
To limit the range of IP addresses that can reach a load balancer, specify the To limit the range of IP addresses that can reach a load balancer, specify the
desired ranges in the `allowedSourceRanges` field (applies to both master and desired ranges in the `allowedSourceRanges` field (applies to both master and
@ -487,37 +481,71 @@ A secret can be pre-provisioned in different ways:
## Setting up the Postgres Operator UI ## Setting up the Postgres Operator UI
With the v1.2 release the Postgres Operator is shipped with a browser-based Since the v1.2 release the Postgres Operator is shipped with a browser-based
configuration user interface (UI) that simplifies managing Postgres clusters configuration user interface (UI) that simplifies managing Postgres clusters
with the operator. The UI runs with Node.js and comes with it's own Docker with the operator.
image.
Run NPM to continuously compile `tags/js` code. Basically, it creates an ### Building the UI image
`app.js` file in: `static/build/app.js`
``` The UI runs with Node.js and comes with it's own Docker
(cd ui/app && npm start) image. However, installing Node.js to build the operator UI is not required. It
``` is handled via Docker containers when running:
To build the Docker image open a shell and change to the `ui` folder. Then run:
```bash ```bash
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.2.0 . make docker
``` ```
Apply all manifests for the `ui/manifests` folder to deploy the Postgres ### Configure endpoints and options
Operator UI on K8s. For local tests you don't need the Ingress resource.
The UI talks to the K8s API server as well as the Postgres Operator [REST API](developer.md#debugging-the-operator).
K8s API server URLs are loaded from the machine's kubeconfig environment by
default. Alternatively, a list can also be passed when starting the Python
application with the `--cluster` option.
The Operator API endpoint can be configured via the `OPERATOR_API_URL`
environment variables in the [deployment manifest](../ui/manifests/deployment.yaml#L40).
You can also expose the operator API through a [service](../manifests/api-service.yaml).
Some displayed options can be disabled from UI using simple flags under the
`OPERATOR_UI_CONFIG` field in the deployment.
### Deploy the UI on K8s
Now, apply all manifests from the `ui/manifests` folder to deploy the Postgres
Operator UI on K8s. Replace the image tag in the deployment manifest if you
want to test the image you've built with `make docker`. Make sure the pods for
the operator and the UI are both running.
```bash ```bash
kubectl apply -f ui/manifests sed -e "s/\(image\:.*\:\).*$/\1$TAG/" manifests/deployment.yaml | kubectl apply -f manifests/
kubectl get all -l application=postgres-operator-ui
``` ```
Make sure the pods for the operator and the UI are both running. For local ### Local testing
testing you need to apply proxying and port forwarding so that the UI can talk
to the K8s and Postgres Operator REST API. You can use the provided For local testing you need to apply K8s proxying and operator pod port
`run_local.sh` script for this. Make sure it uses the correct URL to your K8s forwarding so that the UI can talk to the K8s and Postgres Operator REST API.
API server, e.g. for minikube it would be `https://192.168.99.100:8443`. The Ingress resource is not needed. You can use the provided `run_local.sh`
script for this. Make sure that:
* Python dependencies are installed on your machine
* the K8s API server URL is set for kubectl commands, e.g. for minikube it would usually be `https://192.168.99.100:8443`.
* the pod label selectors for port forwarding are correct
When testing with minikube you have to build the image in its docker environment
(running `make docker` doesn't do it for you). From the `ui` directory execute:
```bash ```bash
# compile and build operator UI
make docker
# build in image in minikube docker env
eval $(minikube docker-env)
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.3.0 .
# apply UI manifests next to a running Postgres Operator
kubectl apply -f manifests/
# install python dependencies to run UI locally
pip3 install -r requirements
./run_local.sh ./run_local.sh
``` ```

View File

@ -31,9 +31,13 @@ status page.
![pgui-waiting-for-master](diagrams/pgui-waiting-for-master.png "Waiting for master pod") ![pgui-waiting-for-master](diagrams/pgui-waiting-for-master.png "Waiting for master pod")
Usually, the startup should only take up to 1 minute. If you feel the process Usually, the startup should only take up to 1 minute. If you feel the process
got stuck click on the "Logs" button to inspect the operator logs. From the got stuck click on the "Logs" button to inspect the operator logs. If the logs
"Status" field in the top menu you can also retrieve the logs and queue of each look fine, but the UI seems to got stuck, check if you are have configured the
worker the operator is using. The number of concurrent workers can be same [cluster name label](../ui/manifests/deployment.yaml#L45) like for the
[operator](../manifests/configmap.yaml#L13).
From the "Status" field in the top menu you can also retrieve the logs and queue
of each worker the operator is using. The number of concurrent workers can be
[configured](reference/operator_parameters.md#general). [configured](reference/operator_parameters.md#general).
![pgui-operator-logs](diagrams/pgui-operator-logs.png "Checking operator logs") ![pgui-operator-logs](diagrams/pgui-operator-logs.png "Checking operator logs")

View File

@ -53,6 +53,7 @@ kubectl create -f manifests/operatorconfiguration.crd.yaml # registers the CRD
kubectl create -f manifests/postgresql-operator-default-configuration.yaml # configuration kubectl create -f manifests/postgresql-operator-default-configuration.yaml # configuration
kubectl create -f manifests/operator-service-account-rbac.yaml # identity and permissions kubectl create -f manifests/operator-service-account-rbac.yaml # identity and permissions
kubectl create -f manifests/postgres-operator.yaml # deployment kubectl create -f manifests/postgres-operator.yaml # deployment
kubectl create -f manifests/api-service.yaml # operator API to be used by UI
``` ```
There is a [Kustomization](https://github.com/kubernetes-sigs/kustomize) There is a [Kustomization](https://github.com/kubernetes-sigs/kustomize)
@ -106,7 +107,7 @@ kubectl create -f https://operatorhub.io/install/postgres-operator.yaml
This installs the operator in the `operators` namespace. More information can be This installs the operator in the `operators` namespace. More information can be
found on [operatorhub.io](https://operatorhub.io/operator/postgres-operator). found on [operatorhub.io](https://operatorhub.io/operator/postgres-operator).
## Create a Postgres cluster ## Check if Postgres Operator is running
Starting the operator may take a few seconds. Check if the operator pod is Starting the operator may take a few seconds. Check if the operator pod is
running before applying a Postgres cluster manifest. running before applying a Postgres cluster manifest.
@ -117,7 +118,61 @@ kubectl get pod -l name=postgres-operator
# if you've created the operator using helm chart # if you've created the operator using helm chart
kubectl get pod -l app.kubernetes.io/name=postgres-operator kubectl get pod -l app.kubernetes.io/name=postgres-operator
```
If the operator doesn't get into `Running` state, either check the latest K8s
events of the deployment or pod with `kubectl describe` or inspect the operator
logs:
```bash
kubectl logs "$(kubectl get pod -l name=postgres-operator --output='name')"
```
## Deploy the operator UI
In the following paragraphs we describe how to access and manage PostgreSQL
clusters from the command line with kubectl. But it can also be done from the
browser-based [Postgres Operator UI](operator-ui.md). Before deploying the UI
make sure the operator is running and its REST API is reachable through a
[K8s service](../manifests/api-service.yaml). The URL to this API must be
configured in the [deployment manifest](../ui/manifests/deployment.yaml#L43)
of the UI.
To deploy the UI simply apply all its manifests files or use the UI helm chart:
```bash
# manual deployment
kubectl apply -f ui/manifests/
# or helm chart
helm install postgres-operator-ui ./charts/postgres-operator-ui
```
Like with the operator, check if the UI pod gets into `Running` state:
```bash
# if you've created the operator using yaml manifests
kubectl get pod -l name=postgres-operator-ui
# if you've created the operator using helm chart
kubectl get pod -l app.kubernetes.io/name=postgres-operator-ui
```
You can now access the web interface by port forwarding the UI pod (mind the
label selector) and enter `localhost:8081` in your browser:
```bash
kubectl port-forward "$(kubectl get pod -l name=postgres-operator-ui --output='name')" 8081
```
Available option are explained in detail in the [UI docs](operator-ui.md).
## Create a Postgres cluster
If the operator pod is running it listens to new events regarding `postgresql`
resources. Now, it's time to submit your first Postgres cluster manifest.
```bash
# create a Postgres cluster # create a Postgres cluster
kubectl create -f manifests/minimal-postgres-manifest.yaml kubectl create -f manifests/minimal-postgres-manifest.yaml
``` ```

View File

@ -122,6 +122,11 @@ These parameters are grouped directly under the `spec` key in the manifest.
A map of key value pairs that gets attached as [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) A map of key value pairs that gets attached as [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
to each pod created for the database. to each pod created for the database.
* **serviceAnnotations**
A map of key value pairs that gets attached as [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
to the services created for the database cluster. Check the
[administrator docs](https://github.com/zalando/postgres-operator/blob/master/docs/administrator.md#load-balancers-and-allowed-ip-ranges)
for more information regarding default values and overwrite rules.
* **enableShmVolume** * **enableShmVolume**
Start a database pod without limitations on shm memory. By default Docker Start a database pod without limitations on shm memory. By default Docker

View File

@ -110,8 +110,10 @@ Those are top-level keys, containing both leaf keys and groups.
* **min_instances** * **min_instances**
operator will run at least the number of instances for any given Postgres operator will run at least the number of instances for any given Postgres
cluster equal to the value of this parameter. When `-1` is specified, no cluster equal to the value of this parameter. Standby clusters can still run
limits are applied. The default is `-1`. with `numberOfInstances: 1` as this is the [recommended setup](../user.md#setting-up-a-standby-cluster).
When `-1` is specified for `min_instances`, no limits are applied. The default
is `-1`.
* **resync_period** * **resync_period**
period between consecutive sync requests. The default is `30m`. period between consecutive sync requests. The default is `30m`.
@ -152,21 +154,22 @@ configuration they are grouped under the `kubernetes` key.
service account used by Patroni running on individual Pods to communicate service account used by Patroni running on individual Pods to communicate
with the operator. Required even if native Kubernetes support in Patroni is with the operator. Required even if native Kubernetes support in Patroni is
not used, because Patroni keeps pod labels in sync with the instance role. not used, because Patroni keeps pod labels in sync with the instance role.
The default is `operator`. The default is `postgres-pod`.
* **pod_service_account_definition** * **pod_service_account_definition**
The operator tries to create the pod Service Account in the namespace that On Postgres cluster creation the operator tries to create the service account
doesn't define such an account using the YAML definition provided by this for the Postgres pods if it does not exist in the namespace. The internal
option. If not defined, a simple definition that contains only the name will default service account definition (defines only the name) can be overwritten
be used. The default is empty. with this parameter. Make sure to provide a valid YAML or JSON string. The
default is empty.
* **pod_service_account_role_binding_definition** * **pod_service_account_role_binding_definition**
This definition must bind pod service account to a role with permission This definition must bind the pod service account to a role with permission
sufficient for the pods to start and for Patroni to access K8s endpoints; sufficient for the pods to start and for Patroni to access K8s endpoints;
service account on its own lacks any such rights starting with K8s v1.8. If service account on its own lacks any such rights starting with K8s v1.8. If
not explicitly defined by the user, a simple definition that binds the not explicitly defined by the user, a simple definition that binds the
account to the operator's own 'zalando-postgres-operator' cluster role will account to the 'postgres-pod' [cluster role](../../manifests/operator-service-account-rbac.yaml#L198)
be used. The default is empty. will be used. The default is empty.
* **pod_terminate_grace_period** * **pod_terminate_grace_period**
Postgres pods are [terminated forcefully](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods) Postgres pods are [terminated forcefully](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods)
@ -318,11 +321,19 @@ CRD-based configuration.
* **default_cpu_limit** * **default_cpu_limit**
CPU limits for the Postgres containers, unless overridden by cluster-specific CPU limits for the Postgres containers, unless overridden by cluster-specific
settings. The default is `3`. settings. The default is `1`.
* **default_memory_limit** * **default_memory_limit**
memory limits for the Postgres containers, unless overridden by cluster-specific memory limits for the Postgres containers, unless overridden by cluster-specific
settings. The default is `1Gi`. settings. The default is `500Mi`.
* **min_cpu_limit**
hard CPU minimum what we consider to be required to properly run Postgres
clusters with Patroni on Kubernetes. The default is `250m`.
* **min_memory_limit**
hard memory minimum what we consider to be required to properly run Postgres
clusters with Patroni on Kubernetes. The default is `250Mi`.
## Operator timeouts ## Operator timeouts
@ -380,8 +391,9 @@ In the CRD-based configuration they are grouped under the `load_balancer` key.
`false`. `false`.
* **custom_service_annotations** * **custom_service_annotations**
when load balancing is enabled, LoadBalancer service is created and This key/value map provides a list of annotations that get attached to each
this parameter takes service annotations that are applied to service. service of a cluster created by the operator. If the annotation key is also
provided by the cluster definition, the manifest value is used.
Optional. Optional.
* **master_dns_name_format** defines the DNS name string template for the * **master_dns_name_format** defines the DNS name string template for the
@ -453,8 +465,11 @@ grouped under the `logical_backup` key.
S3 bucket to store backup results. The bucket has to be present and S3 bucket to store backup results. The bucket has to be present and
accessible by Postgres pods. Default: empty. accessible by Postgres pods. Default: empty.
* **logical_backup_s3_region**
Specifies the region of the bucket which is required with some non-AWS S3 storage services. The default is empty.
* **logical_backup_s3_endpoint** * **logical_backup_s3_endpoint**
When using non-AWS S3 storage, endpoint can be set as a ENV variable. When using non-AWS S3 storage, endpoint can be set as a ENV variable. The default is empty.
* **logical_backup_s3_sse** * **logical_backup_s3_sse**
Specify server side encription that S3 storage is using. If empty string Specify server side encription that S3 storage is using. If empty string
@ -579,4 +594,4 @@ scalyr sidecar. In the CRD-based configuration they are grouped under the
CPU limit value for the Scalyr sidecar. The default is `1`. CPU limit value for the Scalyr sidecar. The default is `1`.
* **scalyr_memory_limit** * **scalyr_memory_limit**
Memory limit value for the Scalyr sidecar. The default is `1Gi`. Memory limit value for the Scalyr sidecar. The default is `500Mi`.

View File

@ -65,7 +65,7 @@ our test cluster.
```bash ```bash
# get name of master pod of acid-minimal-cluster # get name of master pod of acid-minimal-cluster
export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,version=acid-minimal-cluster,spilo-role=master) export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,cluster-name=acid-minimal-cluster,spilo-role=master)
# set up port forward # set up port forward
kubectl port-forward $PGMASTER 6432:5432 kubectl port-forward $PGMASTER 6432:5432
@ -232,11 +232,11 @@ spec:
memory: 300Mi memory: 300Mi
``` ```
The minimum limit to properly run the `postgresql` resource is `256m` for `cpu` The minimum limits to properly run the `postgresql` resource are configured to
and `256Mi` for `memory`. If a lower value is set in the manifest the operator `250m` for `cpu` and `250Mi` for `memory`. If a lower value is set in the
will cancel ADD or UPDATE events on this resource with an error. If no manifest the operator will raise the limits to the configured minimum values.
resources are defined in the manifest the operator will obtain the configured If no resources are defined in the manifest they will be obtained from the
[default requests](reference/operator_parameters.md#kubernetes-resource-requests). configured [default requests](reference/operator_parameters.md#kubernetes-resource-requests).
## Use taints and tolerations for dedicated PostgreSQL nodes ## Use taints and tolerations for dedicated PostgreSQL nodes
@ -254,29 +254,22 @@ spec:
## How to clone an existing PostgreSQL cluster ## How to clone an existing PostgreSQL cluster
You can spin up a new cluster as a clone of the existing one, using a clone You can spin up a new cluster as a clone of the existing one, using a `clone`
section in the spec. There are two options here: section in the spec. There are two options here:
* Clone directly from a source cluster using `pg_basebackup` * Clone from an S3 bucket (recommended)
* Clone from an S3 bucket * Clone directly from a source cluster
### Clone directly Note, that cloning can also be used for [major version upgrades](administrator.md#minor-and-major-version-upgrade)
of PostgreSQL.
```yaml
spec:
clone:
cluster: "acid-batman"
```
Here `cluster` is a name of a source cluster that is going to be cloned. The
cluster to clone is assumed to be running and the clone procedure invokes
`pg_basebackup` from it. The operator will setup the cluster to be cloned to
connect to the service of the source cluster by name (if the cluster is called
test, then the connection string will look like host=test port=5432), which
means that you can clone only from clusters within the same namespace.
### Clone from S3 ### Clone from S3
Cloning from S3 has the advantage that there is no impact on your production
database. A new Postgres cluster is created by restoring the data of another
source cluster. If you create it in the same Kubernetes environment, use a
different name.
```yaml ```yaml
spec: spec:
clone: clone:
@ -287,7 +280,8 @@ spec:
Here `cluster` is a name of a source cluster that is going to be cloned. A new Here `cluster` is a name of a source cluster that is going to be cloned. A new
cluster will be cloned from S3, using the latest backup before the `timestamp`. cluster will be cloned from S3, using the latest backup before the `timestamp`.
In this case, `uid` field is also mandatory - operator will use it to find a Note, that a time zone is required for `timestamp` in the format of +00:00 which
is UTC. The `uid` field is also mandatory. The operator will use it to find a
correct key inside an S3 bucket. You can find this field in the metadata of the correct key inside an S3 bucket. You can find this field in the metadata of the
source cluster: source cluster:
@ -299,9 +293,6 @@ metadata:
uid: efd12e58-5786-11e8-b5a7-06148230260c uid: efd12e58-5786-11e8-b5a7-06148230260c
``` ```
Note that timezone is required for `timestamp`. Otherwise, offset is relative
to UTC, see [RFC 3339 section 5.6) 3339 section 5.6](https://www.ietf.org/rfc/rfc3339.txt).
For non AWS S3 following settings can be set to support cloning from other S3 For non AWS S3 following settings can be set to support cloning from other S3
implementations: implementations:
@ -317,14 +308,35 @@ spec:
s3_force_path_style: true s3_force_path_style: true
``` ```
### Clone directly
Another way to get a fresh copy of your source DB cluster is via basebackup. To
use this feature simply leave out the timestamp field from the clone section.
The operator will connect to the service of the source cluster by name. If the
cluster is called test, then the connection string will look like host=test
port=5432), which means that you can clone only from clusters within the same
namespace.
```yaml
spec:
clone:
cluster: "acid-batman"
```
Be aware that on a busy source database this can result in an elevated load!
## Setting up a standby cluster ## Setting up a standby cluster
Standby clusters are like normal cluster but they are streaming from a remote Standby cluster is a [Patroni feature](https://github.com/zalando/patroni/blob/master/docs/replica_bootstrap.rst#standby-cluster)
cluster. As the first version of this feature, the only scenario covered by that first clones a database, and keeps replicating changes afterwards. As the
operator is to stream from a WAL archive of the master. Following the more replication is happening by the means of archived WAL files (stored on S3 or
popular infrastructure of using Amazon's S3 buckets, it is mentioned as the equivalent of other cloud providers), the standby cluster can exist in a
`s3_wal_path` here. To start a cluster as standby add the following `standby` different location than its source database. Unlike cloning, the PostgreSQL
section in the YAML file: version between source and target cluster has to be the same.
To start a cluster as standby, add the following `standby` section in the YAML
file and specify the S3 bucket path. An empty path will result in an error and
no statefulset will be created.
```yaml ```yaml
spec: spec:
@ -332,20 +344,65 @@ spec:
s3_wal_path: "s3 bucket path to the master" s3_wal_path: "s3 bucket path to the master"
``` ```
Things to note: At the moment, the operator only allows to stream from the WAL archive of the
master. Thus, it is recommended to deploy standby clusters with only [one pod](../manifests/standby-manifest.yaml#L10).
You can raise the instance count when detaching. Note, that the same pod role
labels like for normal clusters are used: The standby leader is labeled as
`master`.
- An empty string in the `s3_wal_path` field of the standby cluster will result ### Providing credentials of source cluster
in an error and no statefulset will be created.
- Only one pod can be deployed for stand-by cluster. A standby cluster is replicating the data (including users and passwords) from
- To manually promote the standby_cluster, use `patronictl` and remove config the source database and is read-only. The system and application users (like
entry. standby, postgres etc.) all have a password that does not match the credentials
- There is no way to transform a non-standby cluster to a standby cluster stored in secrets which are created by the operator. One solution is to create
through the operator. Adding the standby section to the manifest of a running secrets beforehand and paste in the credentials of the source cluster.
Postgres cluster will have no effect. However, it can be done through Patroni Otherwise, you will see errors in the Postgres logs saying users cannot log in
by adding the [standby_cluster](https://github.com/zalando/patroni/blob/bd2c54581abb42a7d3a3da551edf0b8732eefd27/docs/replica_bootstrap.rst#standby-cluster) and the operator logs will complain about not being able to sync resources.
section using `patronictl edit-config`. Note that the transformed standby
cluster will not be doing any streaming. It will be in standby mode and allow When you only run a standby leader, you can safely ignore this, as it will be
read-only transactions only. sorted out once the cluster is detached from the source. It is also harmless if
you dont plan it. But, when you created a standby replica, too, fix the
credentials right away. WAL files will pile up on the standby leader if no
connection can be established between standby replica(s). You can also edit the
secrets after their creation. Find them by:
```bash
kubectl get secrets --all-namespaces | grep <standby-cluster-name>
```
### Promote the standby
One big advantage of standby clusters is that they can be promoted to a proper
database cluster. This means it will stop replicating changes from the source,
and start accept writes itself. This mechanism makes it possible to move
databases from one place to another with minimal downtime. Currently, the
operator does not support promoting a standby cluster. It has to be done
manually using `patronictl edit-config` inside the postgres container of the
standby leader pod. Remove the following lines from the YAML structure and the
leader promotion happens immediately. Before doing so, make sure that the
standby is not behind the source database.
```yaml
standby_cluster:
create_replica_methods:
- bootstrap_standby_with_wale
- basebackup_fast_xlog
restore_command: envdir "/home/postgres/etc/wal-e.d/env-standby" /scripts/restore_command.sh
"%f" "%p"
```
Finally, remove the `standby` section from the postgres cluster manifest.
### Turn a normal cluster into a standby
There is no way to transform a non-standby cluster to a standby cluster through
the operator. Adding the `standby` section to the manifest of a running
Postgres cluster will have no effect. But, as explained in the previous
paragraph it can be done manually through `patronictl edit-config`. This time,
by adding the `standby_cluster` section to the Patroni configuration. However,
the transformed standby cluster will not be doing any streaming. It will be in
standby mode and allow read-only transactions only.
## Sidecar Support ## Sidecar Support

View File

@ -44,3 +44,4 @@ The current tests are all bundled in [`test_e2e.py`](tests/test_e2e.py):
* taint-based eviction of Postgres pods * taint-based eviction of Postgres pods
* invoking logical backup cron job * invoking logical backup cron job
* uniqueness of master pod * uniqueness of master pod
* custom service annotations

View File

@ -10,9 +10,9 @@ from kubernetes import client, config
class EndToEndTestCase(unittest.TestCase): class EndToEndTestCase(unittest.TestCase):
""" '''
Test interaction of the operator with multiple K8s components. Test interaction of the operator with multiple K8s components.
""" '''
# `kind` pods may stuck in the `Terminating` phase for a few minutes; hence high test timeout # `kind` pods may stuck in the `Terminating` phase for a few minutes; hence high test timeout
TEST_TIMEOUT_SEC = 600 TEST_TIMEOUT_SEC = 600
@ -20,14 +20,14 @@ class EndToEndTestCase(unittest.TestCase):
@classmethod @classmethod
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def setUpClass(cls): def setUpClass(cls):
""" '''
Deploy operator to a "kind" cluster created by run.sh using examples from /manifests. Deploy operator to a "kind" cluster created by run.sh using examples from /manifests.
This operator deployment is to be shared among all tests. This operator deployment is to be shared among all tests.
run.sh deletes the 'kind' cluster after successful run along with all operator-related entities. run.sh deletes the 'kind' cluster after successful run along with all operator-related entities.
In the case of test failure the cluster will stay to enable manual examination; In the case of test failure the cluster will stay to enable manual examination;
next invocation of "make test" will re-create it. next invocation of "make test" will re-create it.
""" '''
# set a single K8s wrapper for all tests # set a single K8s wrapper for all tests
k8s = cls.k8s = K8s() k8s = cls.k8s = K8s()
@ -55,11 +55,113 @@ class EndToEndTestCase(unittest.TestCase):
k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml") k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml")
k8s.wait_for_pod_start('spilo-role=master') k8s.wait_for_pod_start('spilo-role=master')
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_enable_load_balancer(self):
'''
Test if services are updated when enabling/disabling load balancers
'''
k8s = self.k8s
cluster_label = 'cluster-name=acid-minimal-cluster'
# enable load balancer services
pg_patch_enable_lbs = {
"spec": {
"enableMasterLoadBalancer": True,
"enableReplicaLoadBalancer": True
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs)
# wait for service recreation
time.sleep(60)
master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master')
self.assertEqual(master_svc_type, 'LoadBalancer',
"Expected LoadBalancer service type for master, found {}".format(master_svc_type))
repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica')
self.assertEqual(repl_svc_type, 'LoadBalancer',
"Expected LoadBalancer service type for replica, found {}".format(repl_svc_type))
# disable load balancer services again
pg_patch_disable_lbs = {
"spec": {
"enableMasterLoadBalancer": False,
"enableReplicaLoadBalancer": False
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs)
# wait for service recreation
time.sleep(60)
master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master')
self.assertEqual(master_svc_type, 'ClusterIP',
"Expected ClusterIP service type for master, found {}".format(master_svc_type))
repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica')
self.assertEqual(repl_svc_type, 'ClusterIP',
"Expected ClusterIP service type for replica, found {}".format(repl_svc_type))
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_min_resource_limits(self):
'''
Lower resource limits below configured minimum and let operator fix it
'''
k8s = self.k8s
cluster_label = 'cluster-name=acid-minimal-cluster'
_, failover_targets = k8s.get_pg_nodes(cluster_label)
# configure minimum boundaries for CPU and memory limits
minCPULimit = '500m'
minMemoryLimit = '500Mi'
patch_min_resource_limits = {
"configuration": {
"postgres_pod_resources": {
"min_cpu_limit": minCPULimit,
"min_memory_limit": minMemoryLimit
}
}
}
k8s.update_config(patch_min_resource_limits)
# lower resource limits below minimum
pg_patch_resources = {
"spec": {
"resources": {
"requests": {
"cpu": "10m",
"memory": "50Mi"
},
"limits": {
"cpu": "200m",
"memory": "200Mi"
}
}
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
k8s.wait_for_master_failover(failover_targets)
pods = k8s.api.core_v1.list_namespaced_pod(
'default', label_selector='spilo-role=master,' + cluster_label).items
self.assert_master_is_unique()
masterPod = pods[0]
self.assertEqual(masterPod.spec.containers[0].resources.limits['cpu'], minCPULimit,
"Expected CPU limit {}, found {}"
.format(minCPULimit, masterPod.spec.containers[0].resources.limits['cpu']))
self.assertEqual(masterPod.spec.containers[0].resources.limits['memory'], minMemoryLimit,
"Expected memory limit {}, found {}"
.format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory']))
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_multi_namespace_support(self): def test_multi_namespace_support(self):
""" '''
Create a customized Postgres cluster in a non-default namespace. Create a customized Postgres cluster in a non-default namespace.
""" '''
k8s = self.k8s k8s = self.k8s
with open("manifests/complete-postgres-manifest.yaml", 'r+') as f: with open("manifests/complete-postgres-manifest.yaml", 'r+') as f:
@ -69,14 +171,13 @@ class EndToEndTestCase(unittest.TestCase):
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml") k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
k8s.wait_for_pod_start("spilo-role=master", self.namespace) k8s.wait_for_pod_start("spilo-role=master", self.namespace)
self.assert_master_is_unique(self.namespace, clusterName="acid-test-cluster") self.assert_master_is_unique(self.namespace, "acid-test-cluster")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_scaling(self): def test_scaling(self):
""" '''
Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime. Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime.
""" '''
k8s = self.k8s k8s = self.k8s
labels = "cluster-name=acid-minimal-cluster" labels = "cluster-name=acid-minimal-cluster"
@ -90,9 +191,9 @@ class EndToEndTestCase(unittest.TestCase):
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_taint_based_eviction(self): def test_taint_based_eviction(self):
""" '''
Add taint "postgres=:NoExecute" to node with master. This must cause a failover. Add taint "postgres=:NoExecute" to node with master. This must cause a failover.
""" '''
k8s = self.k8s k8s = self.k8s
cluster_label = 'cluster-name=acid-minimal-cluster' cluster_label = 'cluster-name=acid-minimal-cluster'
@ -123,7 +224,7 @@ class EndToEndTestCase(unittest.TestCase):
# patch node and test if master is failing over to one of the expected nodes # patch node and test if master is failing over to one of the expected nodes
k8s.api.core_v1.patch_node(current_master_node, body) k8s.api.core_v1.patch_node(current_master_node, body)
k8s.wait_for_master_failover(failover_targets) k8s.wait_for_master_failover(failover_targets)
k8s.wait_for_pod_start('spilo-role=replica') k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
new_master_node, new_replica_nodes = k8s.get_pg_nodes(cluster_label) new_master_node, new_replica_nodes = k8s.get_pg_nodes(cluster_label)
self.assertNotEqual(current_master_node, new_master_node, self.assertNotEqual(current_master_node, new_master_node,
@ -142,7 +243,7 @@ class EndToEndTestCase(unittest.TestCase):
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_logical_backup_cron_job(self): def test_logical_backup_cron_job(self):
""" '''
Ensure we can (a) create the cron job at user request for a specific PG cluster Ensure we can (a) create the cron job at user request for a specific PG cluster
(b) update the cluster-wide image for the logical backup pod (b) update the cluster-wide image for the logical backup pod
(c) delete the job at user request (c) delete the job at user request
@ -150,7 +251,7 @@ class EndToEndTestCase(unittest.TestCase):
Limitations: Limitations:
(a) Does not run the actual batch job because there is no S3 mock to upload backups to (a) Does not run the actual batch job because there is no S3 mock to upload backups to
(b) Assumes 'acid-minimal-cluster' exists as defined in setUp (b) Assumes 'acid-minimal-cluster' exists as defined in setUp
""" '''
k8s = self.k8s k8s = self.k8s
@ -206,11 +307,55 @@ class EndToEndTestCase(unittest.TestCase):
self.assertEqual(0, len(jobs), self.assertEqual(0, len(jobs),
"Expected 0 logical backup jobs, found {}".format(len(jobs))) "Expected 0 logical backup jobs, found {}".format(len(jobs)))
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_service_annotations(self):
'''
Create a Postgres cluster with service annotations and check them.
'''
k8s = self.k8s
patch_custom_service_annotations = {
"configuration": {
"load_balancer": {
"custom_service_annotations": "foo:bar",
}
}
}
k8s.update_config(patch_custom_service_annotations)
pg_patch_custom_annotations = {
"spec": {
"serviceAnnotations": {
"annotation.key": "value"
}
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations)
annotations = {
"annotation.key": "value",
"foo": "bar",
}
self.assertTrue(k8s.check_service_annotations(
"cluster-name=acid-service-annotations,spilo-role=master", annotations))
self.assertTrue(k8s.check_service_annotations(
"cluster-name=acid-service-annotations,spilo-role=replica", annotations))
# clean up
unpatch_custom_service_annotations = {
"configuration": {
"load_balancer": {
"custom_service_annotations": "",
}
}
}
k8s.update_config(unpatch_custom_service_annotations)
def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"): def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"):
""" '''
Check that there is a single pod in the k8s cluster with the label "spilo-role=master" Check that there is a single pod in the k8s cluster with the label "spilo-role=master"
To be called manually after operations that affect pods To be called manually after operations that affect pods
""" '''
k8s = self.k8s k8s = self.k8s
labels = 'spilo-role=master,cluster-name=' + clusterName labels = 'spilo-role=master,cluster-name=' + clusterName
@ -236,9 +381,9 @@ class K8sApi:
class K8s: class K8s:
""" '''
Wraps around K8 api client and helper methods. Wraps around K8 api client and helper methods.
""" '''
RETRY_TIMEOUT_SEC = 5 RETRY_TIMEOUT_SEC = 5
@ -271,6 +416,23 @@ class K8s:
pod_phase = pods[0].status.phase pod_phase = pods[0].status.phase
time.sleep(self.RETRY_TIMEOUT_SEC) time.sleep(self.RETRY_TIMEOUT_SEC)
def get_service_type(self, svc_labels, namespace='default'):
svc_type = ''
svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items
for svc in svcs:
svc_type = svc.spec.type
return svc_type
def check_service_annotations(self, svc_labels, annotations, namespace='default'):
svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items
for svc in svcs:
if len(svc.metadata.annotations) != len(annotations):
return False
for key in svc.metadata.annotations:
if svc.metadata.annotations[key] != annotations[key]:
return False
return True
def wait_for_pg_to_scale(self, number_of_instances, namespace='default'): def wait_for_pg_to_scale(self, number_of_instances, namespace='default'):
body = { body = {

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: postgres-operator
spec:
type: ClusterIP
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
name: postgres-operator

View File

@ -5,11 +5,7 @@ metadata:
# labels: # labels:
# environment: demo # environment: demo
spec: spec:
dockerImage: registry.opensource.zalan.do/acid/spilo-11:1.6-p1 dockerImage: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
initContainers:
- name: date
image: busybox
command: [ "/bin/date" ]
teamId: "acid" teamId: "acid"
volume: volume:
size: 1Gi size: 1Gi
@ -25,25 +21,31 @@ spec:
- 127.0.0.1/32 - 127.0.0.1/32
databases: databases:
foo: zalando foo: zalando
# podAnnotations:
# annotation.key: value
# Expert section
enableShmVolume: true
# spiloFSGroup: 103
postgresql: postgresql:
version: "11" version: "11"
parameters: parameters: # Expert section
shared_buffers: "32MB" shared_buffers: "32MB"
max_connections: "10" max_connections: "10"
log_statement: "all" log_statement: "all"
enableShmVolume: true
# spiloFSGroup: 103
# podAnnotations:
# annotation.key: value
# serviceAnnotations:
# annotation.key: value
# podPriorityClassName: "spilo-pod-priority"
# tolerations:
# - key: postgres
# operator: Exists
# effect: NoSchedule
resources: resources:
requests: requests:
cpu: 10m cpu: 10m
memory: 100Mi memory: 100Mi
limits: limits:
cpu: 300m cpu: 500m
memory: 300Mi memory: 500Mi
patroni: patroni:
initdb: initdb:
encoding: "UTF8" encoding: "UTF8"
@ -63,6 +65,7 @@ spec:
loop_wait: &loop_wait 10 loop_wait: &loop_wait 10
retry_timeout: 10 retry_timeout: 10
maximum_lag_on_failover: 33554432 maximum_lag_on_failover: 33554432
# restore a Postgres DB with point-in-time-recovery # restore a Postgres DB with point-in-time-recovery
# with a non-empty timestamp, clone from an S3 bucket using the latest backup before the timestamp # with a non-empty timestamp, clone from an S3 bucket using the latest backup before the timestamp
# with an empty/absent timestamp, clone from an existing alive cluster using pg_basebackup # with an empty/absent timestamp, clone from an existing alive cluster using pg_basebackup
@ -75,9 +78,15 @@ spec:
# run periodic backups with k8s cron jobs # run periodic backups with k8s cron jobs
# enableLogicalBackup: true # enableLogicalBackup: true
# logicalBackupSchedule: "30 00 * * *" # logicalBackupSchedule: "30 00 * * *"
maintenanceWindows:
- 01:00-06:00 #UTC # maintenanceWindows:
- Sat:00:00-04:00 # - 01:00-06:00 #UTC
# - Sat:00:00-04:00
initContainers:
- name: date
image: busybox
command: [ "/bin/date" ]
# sidecars: # sidecars:
# - name: "telegraf-sidecar" # - name: "telegraf-sidecar"
# image: "telegraf:latest" # image: "telegraf:latest"

View File

@ -15,11 +15,11 @@ data:
# custom_pod_annotations: "keya:valuea,keyb:valueb" # custom_pod_annotations: "keya:valuea,keyb:valueb"
db_hosted_zone: db.example.com db_hosted_zone: db.example.com
debug_logging: "true" debug_logging: "true"
# default_cpu_limit: "3" # default_cpu_limit: "1"
# default_cpu_request: 100m # default_cpu_request: 100m
# default_memory_limit: 1Gi # default_memory_limit: 500Mi
# default_memory_request: 100Mi # default_memory_request: 100Mi
docker_image: registry.opensource.zalan.do/acid/spilo-11:1.6-p1 docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
# enable_admin_role_for_users: "true" # enable_admin_role_for_users: "true"
# enable_crd_validation: "true" # enable_crd_validation: "true"
# enable_database_access: "true" # enable_database_access: "true"
@ -40,6 +40,7 @@ data:
# logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" # logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup"
# logical_backup_s3_access_key_id: "" # logical_backup_s3_access_key_id: ""
# logical_backup_s3_bucket: "my-bucket-url" # logical_backup_s3_bucket: "my-bucket-url"
# logical_backup_s3_region: ""
# logical_backup_s3_endpoint: "" # logical_backup_s3_endpoint: ""
# logical_backup_s3_secret_access_key: "" # logical_backup_s3_secret_access_key: ""
# logical_backup_s3_sse: "AES256" # logical_backup_s3_sse: "AES256"
@ -48,6 +49,8 @@ data:
# master_pod_move_timeout: 10m # master_pod_move_timeout: 10m
# max_instances: "-1" # max_instances: "-1"
# min_instances: "-1" # min_instances: "-1"
# min_cpu_limit: 250m
# min_memory_limit: 250Mi
# node_readiness_label: "" # node_readiness_label: ""
# oauth_token_secret_name: postgresql-operator # oauth_token_secret_name: postgresql-operator
# pam_configuration: | # pam_configuration: |
@ -60,7 +63,9 @@ data:
pod_label_wait_timeout: 10m pod_label_wait_timeout: 10m
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
pod_role_label: spilo-role pod_role_label: spilo-role
pod_service_account_name: "zalando-postgres-operator" # pod_service_account_definition: ""
pod_service_account_name: "postgres-pod"
# pod_service_account_role_binding_definition: ""
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
# postgres_superuser_teams: "postgres_superusers" # postgres_superuser_teams: "postgres_superusers"
# protected_role_names: "admin" # protected_role_names: "admin"

View File

@ -5,3 +5,4 @@ resources:
- postgresql-operator-default-configuration.yaml - postgresql-operator-default-configuration.yaml
- operator-service-account-rbac.yaml - operator-service-account-rbac.yaml
- postgres-operator.yaml - postgres-operator.yaml
- api-service.yaml

View File

@ -1,15 +1,16 @@
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: zalando-postgres-operator name: postgres-operator
namespace: default namespace: default
--- ---
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
name: zalando-postgres-operator name: postgres-operator
rules: rules:
# all verbs allowed for custom operator resources
- apiGroups: - apiGroups:
- acid.zalan.do - acid.zalan.do
resources: resources:
@ -17,7 +18,15 @@ rules:
- postgresqls/status - postgresqls/status
- operatorconfigurations - operatorconfigurations
verbs: verbs:
- "*" - create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
# to create or get/update CRDs when starting up
- apiGroups: - apiGroups:
- apiextensions.k8s.io - apiextensions.k8s.io
resources: resources:
@ -27,12 +36,14 @@ rules:
- get - get
- patch - patch
- update - update
# to read configuration from ConfigMaps
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
- configmaps - configmaps
verbs: verbs:
- get - get
# to manage endpoints which are also used by Patroni
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -44,16 +55,19 @@ rules:
- get - get
- list - list
- patch - patch
- watch # needed if zalando-postgres-operator account is used for pods as well - update
- watch
# to CRUD secrets for database access
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
- secrets - secrets
verbs: verbs:
- create - create
- update
- delete - delete
- get - get
- update
# to check nodes for node readiness label
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -62,6 +76,7 @@ rules:
- get - get
- list - list
- watch - watch
# to read or delete existing PVCs. Creation via StatefulSet
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -70,6 +85,7 @@ rules:
- delete - delete
- get - get
- list - list
# to read existing PVs. Creation should be done via dynamic provisioning
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -78,6 +94,7 @@ rules:
- get - get
- list - list
- update # only for resizing AWS volumes - update # only for resizing AWS volumes
# to watch Spilo pods and do rolling updates. Creation via StatefulSet
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -86,14 +103,17 @@ rules:
- delete - delete
- get - get
- list - list
- watch
- patch - patch
- update
- watch
# to resize the filesystem in Spilo pods when increasing volume size
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
- pods/exec - pods/exec
verbs: verbs:
- create - create
# to CRUD services to point to Postgres cluster instances
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -103,6 +123,8 @@ rules:
- delete - delete
- get - get
- patch - patch
- update
# to CRUD the StatefulSet which controls the Postgres cluster instances
- apiGroups: - apiGroups:
- apps - apps
resources: resources:
@ -113,12 +135,26 @@ rules:
- get - get
- list - list
- patch - patch
# to CRUD cron jobs for logical backups
- apiGroups:
- batch
resources:
- cronjobs
verbs:
- create
- delete
- get
- list
- patch
- update
# to get namespaces operator resources can run in
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
- namespaces - namespaces
verbs: verbs:
- get - get
# to define PDBs. Update happens via delete/create
- apiGroups: - apiGroups:
- policy - policy
resources: resources:
@ -127,6 +163,7 @@ rules:
- create - create
- delete - delete
- get - get
# to create ServiceAccounts in each namespace the operator watches
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -134,44 +171,82 @@ rules:
verbs: verbs:
- get - get
- create - create
# to create role bindings to the postgres-pod service account
- apiGroups: - apiGroups:
- "rbac.authorization.k8s.io" - rbac.authorization.k8s.io
resources: resources:
- rolebindings - rolebindings
verbs: verbs:
- get - get
- create - create
# to grant privilege to run privileged pods
- apiGroups: - apiGroups:
- "rbac.authorization.k8s.io" - extensions
resources: resources:
- clusterroles - podsecuritypolicies
verbs:
- bind
resourceNames: resourceNames:
- zalando-postgres-operator - privileged
- apiGroups:
- batch
resources:
- cronjobs # enables logical backups
verbs: verbs:
- create - use
- delete
- get
- list
- patch
- update
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata: metadata:
name: zalando-postgres-operator name: postgres-operator
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: ClusterRole kind: ClusterRole
name: zalando-postgres-operator name: postgres-operator
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
# note: the cluster role binding needs to be defined name: postgres-operator
# for every namespace the operator service account lives in.
name: zalando-postgres-operator
namespace: default namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: postgres-pod
rules:
# Patroni needs to watch and manage endpoints
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
# Patroni needs to watch pods
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- patch
- update
- watch
# to let Patroni create a headless service
- apiGroups:
- ""
resources:
- services
verbs:
- create
# to run privileged pods
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- privileged
verbs:
- use

View File

@ -97,6 +97,8 @@ spec:
type: array type: array
items: items:
type: string type: string
master_pod_move_timeout:
type: string
node_readiness_label: node_readiness_label:
type: object type: object
additionalProperties: additionalProperties:
@ -114,10 +116,16 @@ spec:
enum: enum:
- "ordered_ready" - "ordered_ready"
- "parallel" - "parallel"
pod_priority_class_name:
type: string
pod_role_label: pod_role_label:
type: string type: string
pod_service_account_definition:
type: string
pod_service_account_name: pod_service_account_name:
type: string type: string
pod_service_account_role_binding_definition:
type: string
pod_terminate_grace_period: pod_terminate_grace_period:
type: string type: string
secret_name_template: secret_name_template:
@ -147,6 +155,12 @@ spec:
default_memory_request: default_memory_request:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
min_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
min_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
timeouts: timeouts:
type: object type: object
properties: properties:
@ -165,16 +179,16 @@ spec:
load_balancer: load_balancer:
type: object type: object
properties: properties:
custom_service_annotations:
type: object
additionalProperties:
type: string
db_hosted_zone: db_hosted_zone:
type: string type: string
enable_master_load_balancer: enable_master_load_balancer:
type: boolean type: boolean
enable_replica_load_balancer: enable_replica_load_balancer:
type: boolean type: boolean
custom_service_annotations:
type: object
additionalProperties:
type: string
master_dns_name_format: master_dns_name_format:
type: string type: string
replica_dns_name_format: replica_dns_name_format:
@ -197,21 +211,23 @@ spec:
logical_backup: logical_backup:
type: object type: object
properties: properties:
logical_backup_schedule:
type: string
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
logical_backup_docker_image: logical_backup_docker_image:
type: string type: string
logical_backup_s3_access_key_id:
type: string
logical_backup_s3_bucket: logical_backup_s3_bucket:
type: string type: string
logical_backup_s3_endpoint: logical_backup_s3_endpoint:
type: string type: string
logical_backup_s3_sse: logical_backup_s3_region:
type: string
logical_backup_s3_access_key_id:
type: string type: string
logical_backup_s3_secret_access_key: logical_backup_s3_secret_access_key:
type: string type: string
logical_backup_s3_sse:
type: string
logical_backup_schedule:
type: string
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
debug: debug:
type: object type: object
properties: properties:

View File

@ -12,17 +12,17 @@ spec:
labels: labels:
name: postgres-operator name: postgres-operator
spec: spec:
serviceAccountName: zalando-postgres-operator serviceAccountName: postgres-operator
containers: containers:
- name: postgres-operator - name: postgres-operator
image: registry.opensource.zalan.do/acid/postgres-operator:v1.2.0 image: registry.opensource.zalan.do/acid/postgres-operator:v1.4.0
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:
cpu: 500m cpu: 100m
memory: 250Mi memory: 250Mi
limits: limits:
cpu: 2000m cpu: 500m
memory: 500Mi memory: 500Mi
securityContext: securityContext:
runAsUser: 1000 runAsUser: 1000

View File

@ -5,7 +5,7 @@ metadata:
configuration: configuration:
# enable_crd_validation: true # enable_crd_validation: true
etcd_host: "" etcd_host: ""
docker_image: registry.opensource.zalan.do/acid/spilo-11:1.6-p1 docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
# enable_shm_volume: true # enable_shm_volume: true
max_instances: -1 max_instances: -1
min_instances: -1 min_instances: -1
@ -34,6 +34,7 @@ configuration:
# inherited_labels: # inherited_labels:
# - application # - application
# - environment # - environment
master_pod_move_timeout: 20m
# node_readiness_label: # node_readiness_label:
# status: ready # status: ready
oauth_token_secret_name: postgresql-operator oauth_token_secret_name: postgresql-operator
@ -41,8 +42,11 @@ configuration:
pod_antiaffinity_topology_key: "kubernetes.io/hostname" pod_antiaffinity_topology_key: "kubernetes.io/hostname"
# pod_environment_configmap: "" # pod_environment_configmap: ""
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
# pod_priority_class_name: ""
pod_role_label: spilo-role pod_role_label: spilo-role
pod_service_account_name: zalando-postgres-operator # pod_service_account_definition: ""
pod_service_account_name: postgres-pod
# pod_service_account_role_binding_definition: ""
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
# spilo_fsgroup: 103 # spilo_fsgroup: 103
@ -51,10 +55,12 @@ configuration:
# postgres: "key:postgres,operator:Exists,effect:NoSchedule" # postgres: "key:postgres,operator:Exists,effect:NoSchedule"
watched_namespace: "*" watched_namespace: "*"
postgres_pod_resources: postgres_pod_resources:
default_cpu_limit: "3" default_cpu_limit: "1"
default_cpu_request: 100m default_cpu_request: 100m
default_memory_limit: 1Gi default_memory_limit: 500Mi
default_memory_request: 100Mi default_memory_request: 100Mi
# min_cpu_limit: 250m
# min_memory_limit: 250Mi
timeouts: timeouts:
pod_label_wait_timeout: 10m pod_label_wait_timeout: 10m
pod_deletion_wait_timeout: 10m pod_deletion_wait_timeout: 10m
@ -80,10 +86,11 @@ configuration:
# wal_s3_bucket: "" # wal_s3_bucket: ""
logical_backup: logical_backup:
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup"
logical_backup_s3_access_key_id: "" # logical_backup_s3_access_key_id: ""
logical_backup_s3_bucket: "my-bucket-url" logical_backup_s3_bucket: "my-bucket-url"
logical_backup_s3_endpoint: "" # logical_backup_s3_endpoint: ""
logical_backup_s3_secret_access_key: "" # logical_backup_s3_region: ""
# logical_backup_s3_secret_access_key: ""
logical_backup_s3_sse: "AES256" logical_backup_s3_sse: "AES256"
logical_backup_schedule: "30 00 * * *" logical_backup_schedule: "30 00 * * *"
debug: debug:
@ -104,7 +111,7 @@ configuration:
log_statement: all log_statement: all
# teams_api_url: "" # teams_api_url: ""
logging_rest_api: logging_rest_api:
api_port: 8008 api_port: 8080
cluster_history_entries: 1000 cluster_history_entries: 1000
ring_log_lines: 100 ring_log_lines: 100
scalyr: scalyr:
@ -112,6 +119,6 @@ configuration:
scalyr_cpu_limit: "1" scalyr_cpu_limit: "1"
scalyr_cpu_request: 100m scalyr_cpu_request: 100m
# scalyr_image: "" # scalyr_image: ""
scalyr_memory_limit: 1Gi scalyr_memory_limit: 500Mi
scalyr_memory_request: 50Mi scalyr_memory_request: 50Mi
# scalyr_server_url: "" # scalyr_server_url: ""

View File

@ -58,7 +58,7 @@ spec:
s3_secret_access_key: s3_secret_access_key:
type: string type: string
s3_force_path_style: s3_force_path_style:
type: string type: boolean
s3_wal_path: s3_wal_path:
type: string type: string
timestamp: timestamp:
@ -230,6 +230,10 @@ spec:
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
# Note: the value specified here must not be zero or be higher # Note: the value specified here must not be zero or be higher
# than the corresponding limit. # than the corresponding limit.
serviceAnnotations:
type: object
additionalProperties:
type: string
sidecars: sidecars:
type: array type: array
nullable: true nullable: true

View File

@ -13,7 +13,3 @@ spec:
# Make this a standby cluster and provide the s3 bucket path of source cluster for continuous streaming. # Make this a standby cluster and provide the s3 bucket path of source cluster for continuous streaming.
standby: standby:
s3_wal_path: "s3://path/to/bucket/containing/wal/of/source/cluster/" s3_wal_path: "s3://path/to/bucket/containing/wal/of/source/cluster/"
maintenanceWindows:
- 01:00-06:00 #UTC
- Sat:00:00-04:00

View File

@ -11,7 +11,14 @@ rules:
- postgresqls - postgresqls
- postgresqls/status - postgresqls/status
verbs: verbs:
- "*" - create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
@ -48,4 +55,3 @@ rules:
- get - get
- list - list
- watch - watch

View File

@ -160,7 +160,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
Type: "string", Type: "string",
}, },
"s3_force_path_style": { "s3_force_path_style": {
Type: "string", Type: "boolean",
}, },
"s3_wal_path": { "s3_wal_path": {
Type: "string", Type: "string",
@ -383,6 +383,14 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
}, },
}, },
}, },
"serviceAnnotations": {
Type: "object",
AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{
Schema: &apiextv1beta1.JSONSchemaProps{
Type: "string",
},
},
},
"sidecars": { "sidecars": {
Type: "array", Type: "array",
Items: &apiextv1beta1.JSONSchemaPropsOrArray{ Items: &apiextv1beta1.JSONSchemaPropsOrArray{
@ -717,6 +725,9 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
}, },
}, },
}, },
"master_pod_move_timeout": {
Type: "string",
},
"node_readiness_label": { "node_readiness_label": {
Type: "object", Type: "object",
AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{
@ -748,12 +759,21 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
}, },
}, },
}, },
"pod_priority_class_name": {
Type: "string",
},
"pod_role_label": { "pod_role_label": {
Type: "string", Type: "string",
}, },
"pod_service_account_definition": {
Type: "string",
},
"pod_service_account_name": { "pod_service_account_name": {
Type: "string", Type: "string",
}, },
"pod_service_account_role_binding_definition": {
Type: "string",
},
"pod_terminate_grace_period": { "pod_terminate_grace_period": {
Type: "string", Type: "string",
}, },
@ -798,6 +818,14 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
Type: "string", Type: "string",
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
}, },
"min_cpu_limit": {
Type: "string",
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
},
"min_memory_limit": {
Type: "string",
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
},
}, },
}, },
"timeouts": { "timeouts": {
@ -826,6 +854,14 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
"load_balancer": { "load_balancer": {
Type: "object", Type: "object",
Properties: map[string]apiextv1beta1.JSONSchemaProps{ Properties: map[string]apiextv1beta1.JSONSchemaProps{
"custom_service_annotations": {
Type: "object",
AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{
Schema: &apiextv1beta1.JSONSchemaProps{
Type: "string",
},
},
},
"db_hosted_zone": { "db_hosted_zone": {
Type: "string", Type: "string",
}, },
@ -835,14 +871,6 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
"enable_replica_load_balancer": { "enable_replica_load_balancer": {
Type: "boolean", Type: "boolean",
}, },
"custom_service_annotations": {
Type: "object",
AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{
Schema: &apiextv1beta1.JSONSchemaProps{
Type: "string",
},
},
},
"master_dns_name_format": { "master_dns_name_format": {
Type: "string", Type: "string",
}, },
@ -877,28 +905,31 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
"logical_backup": { "logical_backup": {
Type: "object", Type: "object",
Properties: map[string]apiextv1beta1.JSONSchemaProps{ Properties: map[string]apiextv1beta1.JSONSchemaProps{
"logical_backup_schedule": {
Type: "string",
Pattern: "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$",
},
"logical_backup_docker_image": { "logical_backup_docker_image": {
Type: "string", Type: "string",
}, },
"logical_backup_s3_access_key_id": {
Type: "string",
},
"logical_backup_s3_bucket": { "logical_backup_s3_bucket": {
Type: "string", Type: "string",
}, },
"logical_backup_s3_endpoint": { "logical_backup_s3_endpoint": {
Type: "string", Type: "string",
}, },
"logical_backup_s3_sse": { "logical_backup_s3_region": {
Type: "string",
},
"logical_backup_s3_access_key_id": {
Type: "string", Type: "string",
}, },
"logical_backup_s3_secret_access_key": { "logical_backup_s3_secret_access_key": {
Type: "string", Type: "string",
}, },
"logical_backup_s3_sse": {
Type: "string",
},
"logical_backup_schedule": {
Type: "string",
Pattern: "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$",
},
}, },
}, },
"debug": { "debug": {

View File

@ -67,7 +67,7 @@ type KubernetesMetaConfiguration struct {
// TODO: use namespacedname // TODO: use namespacedname
PodEnvironmentConfigMap string `json:"pod_environment_configmap,omitempty"` PodEnvironmentConfigMap string `json:"pod_environment_configmap,omitempty"`
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
MasterPodMoveTimeout time.Duration `json:"master_pod_move_timeout,omitempty"` MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"`
EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"` EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"`
PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"` PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"`
PodManagementPolicy string `json:"pod_management_policy,omitempty"` PodManagementPolicy string `json:"pod_management_policy,omitempty"`
@ -79,6 +79,8 @@ type PostgresPodResourcesDefaults struct {
DefaultMemoryRequest string `json:"default_memory_request,omitempty"` DefaultMemoryRequest string `json:"default_memory_request,omitempty"`
DefaultCPULimit string `json:"default_cpu_limit,omitempty"` DefaultCPULimit string `json:"default_cpu_limit,omitempty"`
DefaultMemoryLimit string `json:"default_memory_limit,omitempty"` DefaultMemoryLimit string `json:"default_memory_limit,omitempty"`
MinCPULimit string `json:"min_cpu_limit,omitempty"`
MinMemoryLimit string `json:"min_memory_limit,omitempty"`
} }
// OperatorTimeouts defines the timeout of ResourceCheck, PodWait, ReadyWait // OperatorTimeouts defines the timeout of ResourceCheck, PodWait, ReadyWait
@ -118,7 +120,7 @@ type OperatorDebugConfiguration struct {
EnableDBAccess bool `json:"enable_database_access,omitempty"` EnableDBAccess bool `json:"enable_database_access,omitempty"`
} }
// TeamsAPIConfiguration defines the configration of TeamsAPI // TeamsAPIConfiguration defines the configuration of TeamsAPI
type TeamsAPIConfiguration struct { type TeamsAPIConfiguration struct {
EnableTeamsAPI bool `json:"enable_teams_api,omitempty"` EnableTeamsAPI bool `json:"enable_teams_api,omitempty"`
TeamsAPIUrl string `json:"teams_api_url,omitempty"` TeamsAPIUrl string `json:"teams_api_url,omitempty"`
@ -150,6 +152,18 @@ type ScalyrConfiguration struct {
ScalyrMemoryLimit string `json:"scalyr_memory_limit,omitempty"` ScalyrMemoryLimit string `json:"scalyr_memory_limit,omitempty"`
} }
// OperatorLogicalBackupConfiguration defines configuration for logical backup
type OperatorLogicalBackupConfiguration struct {
Schedule string `json:"logical_backup_schedule,omitempty"`
DockerImage string `json:"logical_backup_docker_image,omitempty"`
S3Bucket string `json:"logical_backup_s3_bucket,omitempty"`
S3Region string `json:"logical_backup_s3_region,omitempty"`
S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"`
S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"`
S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"`
S3SSE string `json:"logical_backup_s3_sse,omitempty"`
}
// OperatorConfigurationData defines the operation config // OperatorConfigurationData defines the operation config
type OperatorConfigurationData struct { type OperatorConfigurationData struct {
EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"` EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"`
@ -176,24 +190,5 @@ type OperatorConfigurationData struct {
LogicalBackup OperatorLogicalBackupConfiguration `json:"logical_backup"` LogicalBackup OperatorLogicalBackupConfiguration `json:"logical_backup"`
} }
// OperatorConfigurationUsers defines configration for super user
type OperatorConfigurationUsers struct {
SuperUserName string `json:"superuser_name,omitempty"`
Replication string `json:"replication_user_name,omitempty"`
ProtectedRoles []string `json:"protected_roles,omitempty"`
TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"`
}
//Duration shortens this frequently used name //Duration shortens this frequently used name
type Duration time.Duration type Duration time.Duration
// OperatorLogicalBackupConfiguration defines configration for logical backup
type OperatorLogicalBackupConfiguration struct {
Schedule string `json:"logical_backup_schedule,omitempty"`
DockerImage string `json:"logical_backup_docker_image,omitempty"`
S3Bucket string `json:"logical_backup_s3_bucket,omitempty"`
S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"`
S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"`
S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"`
S3SSE string `json:"logical_backup_s3_sse,omitempty"`
}

View File

@ -60,6 +60,7 @@ type PostgresSpec struct {
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"` LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
StandbyCluster *StandbyDescription `json:"standby"` StandbyCluster *StandbyDescription `json:"standby"`
PodAnnotations map[string]string `json:"podAnnotations"` PodAnnotations map[string]string `json:"podAnnotations"`
ServiceAnnotations map[string]string `json:"serviceAnnotations"`
// deprecated json tags // deprecated json tags
InitContainersOld []v1.Container `json:"init_containers,omitempty"` InitContainersOld []v1.Container `json:"init_containers,omitempty"`

View File

@ -13,127 +13,139 @@ import (
) )
var parseTimeTests = []struct { var parseTimeTests = []struct {
about string
in string in string
out metav1.Time out metav1.Time
err error err error
}{ }{
{"16:08", mustParseTime("16:08"), nil}, {"parse common time with minutes", "16:08", mustParseTime("16:08"), nil},
{"11:00", mustParseTime("11:00"), nil}, {"parse time with zeroed minutes", "11:00", mustParseTime("11:00"), nil},
{"23:59", mustParseTime("23:59"), nil}, {"parse corner case last minute of the day", "23:59", mustParseTime("23:59"), nil},
{"26:09", metav1.Now(), errors.New(`parsing time "26:09": hour out of range`)}, {"expect error as hour is out of range", "26:09", metav1.Now(), errors.New(`parsing time "26:09": hour out of range`)},
{"23:69", metav1.Now(), errors.New(`parsing time "23:69": minute out of range`)}, {"expect error as minute is out of range", "23:69", metav1.Now(), errors.New(`parsing time "23:69": minute out of range`)},
} }
var parseWeekdayTests = []struct { var parseWeekdayTests = []struct {
about string
in string in string
out time.Weekday out time.Weekday
err error err error
}{ }{
{"Wed", time.Wednesday, nil}, {"parse common weekday", "Wed", time.Wednesday, nil},
{"Sunday", time.Weekday(0), errors.New("incorrect weekday")}, {"expect error as weekday is invalid", "Sunday", time.Weekday(0), errors.New("incorrect weekday")},
{"", time.Weekday(0), errors.New("incorrect weekday")}, {"expect error as weekday is empty", "", time.Weekday(0), errors.New("incorrect weekday")},
} }
var clusterNames = []struct { var clusterNames = []struct {
about string
in string in string
inTeam string inTeam string
clusterName string clusterName string
err error err error
}{ }{
{"acid-test", "acid", "test", nil}, {"common team and cluster name", "acid-test", "acid", "test", nil},
{"test-my-name", "test", "my-name", nil}, {"cluster name with hyphen", "test-my-name", "test", "my-name", nil},
{"my-team-another-test", "my-team", "another-test", nil}, {"cluster and team name with hyphen", "my-team-another-test", "my-team", "another-test", nil},
{"------strange-team-cluster", "-----", "strange-team-cluster", {"expect error as cluster name is just hyphens", "------strange-team-cluster", "-----", "strange-team-cluster",
errors.New(`name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)}, errors.New(`name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)},
{"fooobar-fooobarfooobarfooobarfooobarfooobarfooobarfooobarfooobar", "fooobar", "", {"expect error as cluster name is too long", "fooobar-fooobarfooobarfooobarfooobarfooobarfooobarfooobarfooobar", "fooobar", "",
errors.New("name cannot be longer than 58 characters")}, errors.New("name cannot be longer than 58 characters")},
{"acid-test", "test", "", errors.New("name must match {TEAM}-{NAME} format")}, {"expect error as cluster name does not match {TEAM}-{NAME} format", "acid-test", "test", "", errors.New("name must match {TEAM}-{NAME} format")},
{"-test", "", "", errors.New("team name is empty")}, {"expect error as team and cluster name are empty", "-test", "", "", errors.New("team name is empty")},
{"-test", "-", "", errors.New("name must match {TEAM}-{NAME} format")}, {"expect error as cluster name is empty and team name is a hyphen", "-test", "-", "", errors.New("name must match {TEAM}-{NAME} format")},
{"", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '', team name '-'")}, {"expect error as cluster name is empty, team name is a hyphen and cluster name is empty", "", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '', team name '-'")},
{"-", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '-', team name '-'")}, {"expect error as cluster and team name are hyphens", "-", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '-', team name '-'")},
// user may specify the team part of the full cluster name differently from the team name returned by the Teams API // user may specify the team part of the full cluster name differently from the team name returned by the Teams API
// in the case the actual Teams API name is long enough, this will fail the check // in the case the actual Teams API name is long enough, this will fail the check
{"foo-bar", "qwerty", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name 'foo-bar', team name 'qwerty'")}, {"expect error as team name does not match", "foo-bar", "qwerty", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name 'foo-bar', team name 'qwerty'")},
} }
var cloneClusterDescriptions = []struct { var cloneClusterDescriptions = []struct {
about string
in *CloneDescription in *CloneDescription
err error err error
}{ }{
{&CloneDescription{"foo+bar", "", "NotEmpty", "", "", "", "", nil}, nil}, {"cluster name invalid but EndTimeSet is not empty", &CloneDescription{"foo+bar", "", "NotEmpty", "", "", "", "", nil}, nil},
{&CloneDescription{"foo+bar", "", "", "", "", "", "", nil}, {"expect error as cluster name does not match DNS-1035", &CloneDescription{"foo+bar", "", "", "", "", "", "", nil},
errors.New(`clone cluster name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)}, errors.New(`clone cluster name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)},
{&CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", "", "", "", "", "", nil}, {"expect error as cluster name is too long", &CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", "", "", "", "", "", nil},
errors.New("clone cluster name must be no longer than 63 characters")}, errors.New("clone cluster name must be no longer than 63 characters")},
{&CloneDescription{"foobar", "", "", "", "", "", "", nil}, nil}, {"common cluster name", &CloneDescription{"foobar", "", "", "", "", "", "", nil}, nil},
} }
var maintenanceWindows = []struct { var maintenanceWindows = []struct {
about string
in []byte in []byte
out MaintenanceWindow out MaintenanceWindow
err error err error
}{{[]byte(`"Tue:10:00-20:00"`), }{{"regular scenario",
[]byte(`"Tue:10:00-20:00"`),
MaintenanceWindow{ MaintenanceWindow{
Everyday: false, Everyday: false,
Weekday: time.Tuesday, Weekday: time.Tuesday,
StartTime: mustParseTime("10:00"), StartTime: mustParseTime("10:00"),
EndTime: mustParseTime("20:00"), EndTime: mustParseTime("20:00"),
}, nil}, }, nil},
{[]byte(`"Mon:10:00-10:00"`), {"starts and ends at the same time",
[]byte(`"Mon:10:00-10:00"`),
MaintenanceWindow{ MaintenanceWindow{
Everyday: false, Everyday: false,
Weekday: time.Monday, Weekday: time.Monday,
StartTime: mustParseTime("10:00"), StartTime: mustParseTime("10:00"),
EndTime: mustParseTime("10:00"), EndTime: mustParseTime("10:00"),
}, nil}, }, nil},
{[]byte(`"Sun:00:00-00:00"`), {"starts and ends 00:00 on sunday",
[]byte(`"Sun:00:00-00:00"`),
MaintenanceWindow{ MaintenanceWindow{
Everyday: false, Everyday: false,
Weekday: time.Sunday, Weekday: time.Sunday,
StartTime: mustParseTime("00:00"), StartTime: mustParseTime("00:00"),
EndTime: mustParseTime("00:00"), EndTime: mustParseTime("00:00"),
}, nil}, }, nil},
{[]byte(`"01:00-10:00"`), {"without day indication should define to sunday",
[]byte(`"01:00-10:00"`),
MaintenanceWindow{ MaintenanceWindow{
Everyday: true, Everyday: true,
Weekday: time.Sunday, Weekday: time.Sunday,
StartTime: mustParseTime("01:00"), StartTime: mustParseTime("01:00"),
EndTime: mustParseTime("10:00"), EndTime: mustParseTime("10:00"),
}, nil}, }, nil},
{[]byte(`"Mon:12:00-11:00"`), MaintenanceWindow{}, errors.New(`'From' time must be prior to the 'To' time`)}, {"expect error as 'From' is later than 'To'", []byte(`"Mon:12:00-11:00"`), MaintenanceWindow{}, errors.New(`'From' time must be prior to the 'To' time`)},
{[]byte(`"Wed:33:00-00:00"`), MaintenanceWindow{}, errors.New(`could not parse start time: parsing time "33:00": hour out of range`)}, {"expect error as 'From' is later than 'To' with 00:00 corner case", []byte(`"Mon:10:00-00:00"`), MaintenanceWindow{}, errors.New(`'From' time must be prior to the 'To' time`)},
{[]byte(`"Wed:00:00-26:00"`), MaintenanceWindow{}, errors.New(`could not parse end time: parsing time "26:00": hour out of range`)}, {"expect error as 'From' time is not valid", []byte(`"Wed:33:00-00:00"`), MaintenanceWindow{}, errors.New(`could not parse start time: parsing time "33:00": hour out of range`)},
{[]byte(`"Sunday:00:00-00:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)}, {"expect error as 'To' time is not valid", []byte(`"Wed:00:00-26:00"`), MaintenanceWindow{}, errors.New(`could not parse end time: parsing time "26:00": hour out of range`)},
{[]byte(`":00:00-10:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)}, {"expect error as weekday is not valid", []byte(`"Sunday:00:00-00:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)},
{[]byte(`"Mon:10:00-00:00"`), MaintenanceWindow{}, errors.New(`'From' time must be prior to the 'To' time`)}, {"expect error as weekday is empty", []byte(`":00:00-10:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)},
{[]byte(`"Mon:00:00:00-10:00:00"`), MaintenanceWindow{}, errors.New(`incorrect maintenance window format`)}, {"expect error as maintenance window set seconds", []byte(`"Mon:00:00:00-10:00:00"`), MaintenanceWindow{}, errors.New(`incorrect maintenance window format`)},
{[]byte(`"Mon:00:00"`), MaintenanceWindow{}, errors.New("incorrect maintenance window format")}, {"expect error as 'To' time set seconds", []byte(`"Mon:00:00-00:00:00"`), MaintenanceWindow{}, errors.New("could not parse end time: incorrect time format")},
{[]byte(`"Mon:00:00-00:00:00"`), MaintenanceWindow{}, errors.New("could not parse end time: incorrect time format")}} {"expect error as 'To' time is missing", []byte(`"Mon:00:00"`), MaintenanceWindow{}, errors.New("incorrect maintenance window format")}}
var postgresStatus = []struct { var postgresStatus = []struct {
about string
in []byte in []byte
out PostgresStatus out PostgresStatus
err error err error
}{ }{
{[]byte(`{"PostgresClusterStatus":"Running"}`), {"cluster running", []byte(`{"PostgresClusterStatus":"Running"}`),
PostgresStatus{PostgresClusterStatus: ClusterStatusRunning}, nil}, PostgresStatus{PostgresClusterStatus: ClusterStatusRunning}, nil},
{[]byte(`{"PostgresClusterStatus":""}`), {"cluster status undefined", []byte(`{"PostgresClusterStatus":""}`),
PostgresStatus{PostgresClusterStatus: ClusterStatusUnknown}, nil}, PostgresStatus{PostgresClusterStatus: ClusterStatusUnknown}, nil},
{[]byte(`"Running"`), {"cluster running without full JSON format", []byte(`"Running"`),
PostgresStatus{PostgresClusterStatus: ClusterStatusRunning}, nil}, PostgresStatus{PostgresClusterStatus: ClusterStatusRunning}, nil},
{[]byte(`""`), {"cluster status empty", []byte(`""`),
PostgresStatus{PostgresClusterStatus: ClusterStatusUnknown}, nil}} PostgresStatus{PostgresClusterStatus: ClusterStatusUnknown}, nil}}
var tmp postgresqlCopy
var unmarshalCluster = []struct { var unmarshalCluster = []struct {
about string
in []byte in []byte
out Postgresql out Postgresql
marshal []byte marshal []byte
err error err error
}{ }{
// example with simple status field
{ {
about: "example with simple status field",
in: []byte(`{ in: []byte(`{
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1", "kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`),
@ -147,12 +159,14 @@ var unmarshalCluster = []struct {
}, },
Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid}, Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid},
// This error message can vary between Go versions, so compute it for the current version. // This error message can vary between Go versions, so compute it for the current version.
Error: json.Unmarshal([]byte(`{"teamId": 0}`), &PostgresSpec{}).Error(), Error: json.Unmarshal([]byte(`{
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
}, },
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`),
err: nil}, err: nil},
// example with /status subresource
{ {
about: "example with /status subresource",
in: []byte(`{ in: []byte(`{
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1", "kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`),
@ -166,13 +180,14 @@ var unmarshalCluster = []struct {
}, },
Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid}, Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid},
// This error message can vary between Go versions, so compute it for the current version. // This error message can vary between Go versions, so compute it for the current version.
Error: json.Unmarshal([]byte(`{"teamId": 0}`), &PostgresSpec{}).Error(), Error: json.Unmarshal([]byte(`{
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
}, },
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
err: nil}, err: nil},
// example with detailed input manifest
// and deprecated pod_priority_class_name -> podPriorityClassName
{ {
about: "example with detailed input manifest and deprecated pod_priority_class_name -> podPriorityClassName",
in: []byte(`{ in: []byte(`{
"kind": "Postgresql", "kind": "Postgresql",
"apiVersion": "acid.zalan.do/v1", "apiVersion": "acid.zalan.do/v1",
@ -321,8 +336,8 @@ var unmarshalCluster = []struct {
}, },
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`), marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
err: nil}, err: nil},
// example with teamId set in input
{ {
about: "example with teamId set in input",
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "teapot-testcluster1"}, "spec": {"teamId": "acid"}}`), in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "teapot-testcluster1"}, "spec": {"teamId": "acid"}}`),
out: Postgresql{ out: Postgresql{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
@ -338,8 +353,8 @@ var unmarshalCluster = []struct {
}, },
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
err: nil}, err: nil},
// clone example
{ {
about: "example with clone",
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "clone": {"cluster": "team-batman"}}}`), in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "clone": {"cluster": "team-batman"}}}`),
out: Postgresql{ out: Postgresql{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
@ -360,8 +375,8 @@ var unmarshalCluster = []struct {
}, },
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}},"status":{"PostgresClusterStatus":""}}`), marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}},"status":{"PostgresClusterStatus":""}}`),
err: nil}, err: nil},
// standby example
{ {
about: "standby example",
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "standby": {"s3_wal_path": "s3://custom/path/to/bucket/"}}}`), in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "standby": {"s3_wal_path": "s3://custom/path/to/bucket/"}}}`),
out: Postgresql{ out: Postgresql{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
@ -382,24 +397,28 @@ var unmarshalCluster = []struct {
}, },
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"standby":{"s3_wal_path":"s3://custom/path/to/bucket/"}},"status":{"PostgresClusterStatus":""}}`), marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"standby":{"s3_wal_path":"s3://custom/path/to/bucket/"}},"status":{"PostgresClusterStatus":""}}`),
err: nil}, err: nil},
// erroneous examples
{ {
about: "expect error on malformatted JSON",
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1"`), in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1"`),
out: Postgresql{}, out: Postgresql{},
marshal: []byte{}, marshal: []byte{},
err: errors.New("unexpected end of JSON input")}, err: errors.New("unexpected end of JSON input")},
{ {
about: "expect error on JSON with field's value malformatted",
in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
out: Postgresql{}, out: Postgresql{},
marshal: []byte{}, marshal: []byte{},
err: errors.New("invalid character 'q' looking for beginning of value")}} err: errors.New("invalid character 'q' looking for beginning of value"),
},
}
var postgresqlList = []struct { var postgresqlList = []struct {
about string
in []byte in []byte
out PostgresqlList out PostgresqlList
err error err error
}{ }{
{[]byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"9.6"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`), {"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"9.6"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`),
PostgresqlList{ PostgresqlList{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "List", Kind: "List",
@ -433,20 +452,88 @@ var postgresqlList = []struct {
}}, }},
}, },
nil}, nil},
{[]byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace"`), {"expect error on malformatted JSON", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace"`),
PostgresqlList{}, PostgresqlList{},
errors.New("unexpected end of JSON input")}} errors.New("unexpected end of JSON input")}}
var annotations = []struct { var podAnnotations = []struct {
about string
in []byte in []byte
annotations map[string]string annotations map[string]string
err error err error
}{{ }{{
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"podAnnotations": {"foo": "bar"},"teamId": "acid", "clone": {"cluster": "team-batman"}}}`), about: "common annotations",
in: []byte(`{
"kind": "Postgresql",
"apiVersion": "acid.zalan.do/v1",
"metadata": {
"name": "acid-testcluster1"
},
"spec": {
"podAnnotations": {
"foo": "bar"
},
"teamId": "acid",
"clone": {
"cluster": "team-batman"
}
}
}`),
annotations: map[string]string{"foo": "bar"}, annotations: map[string]string{"foo": "bar"},
err: nil}, err: nil},
} }
var serviceAnnotations = []struct {
about string
in []byte
annotations map[string]string
err error
}{
{
about: "common single annotation",
in: []byte(`{
"kind": "Postgresql",
"apiVersion": "acid.zalan.do/v1",
"metadata": {
"name": "acid-testcluster1"
},
"spec": {
"serviceAnnotations": {
"foo": "bar"
},
"teamId": "acid",
"clone": {
"cluster": "team-batman"
}
}
}`),
annotations: map[string]string{"foo": "bar"},
err: nil,
},
{
about: "common two annotations",
in: []byte(`{
"kind": "Postgresql",
"apiVersion": "acid.zalan.do/v1",
"metadata": {
"name": "acid-testcluster1"
},
"spec": {
"serviceAnnotations": {
"foo": "bar",
"post": "gres"
},
"teamId": "acid",
"clone": {
"cluster": "team-batman"
}
}
}`),
annotations: map[string]string{"foo": "bar", "post": "gres"},
err: nil,
},
}
func mustParseTime(s string) metav1.Time { func mustParseTime(s string) metav1.Time {
v, err := time.Parse("15:04", s) v, err := time.Parse("15:04", s)
if err != nil { if err != nil {
@ -458,12 +545,13 @@ func mustParseTime(s string) metav1.Time {
func TestParseTime(t *testing.T) { func TestParseTime(t *testing.T) {
for _, tt := range parseTimeTests { for _, tt := range parseTimeTests {
t.Run(tt.about, func(t *testing.T) {
aTime, err := parseTime(tt.in) aTime, err := parseTime(tt.in)
if err != nil { if err != nil {
if tt.err == nil || err.Error() != tt.err.Error() { if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("ParseTime expected error: %v, got: %v", tt.err, err) t.Errorf("ParseTime expected error: %v, got: %v", tt.err, err)
} }
continue return
} else if tt.err != nil { } else if tt.err != nil {
t.Errorf("Expected error: %v", tt.err) t.Errorf("Expected error: %v", tt.err)
} }
@ -471,17 +559,19 @@ func TestParseTime(t *testing.T) {
if aTime != tt.out { if aTime != tt.out {
t.Errorf("Expected time: %v, got: %v", tt.out, aTime) t.Errorf("Expected time: %v, got: %v", tt.out, aTime)
} }
})
} }
} }
func TestWeekdayTime(t *testing.T) { func TestWeekdayTime(t *testing.T) {
for _, tt := range parseWeekdayTests { for _, tt := range parseWeekdayTests {
t.Run(tt.about, func(t *testing.T) {
aTime, err := parseWeekday(tt.in) aTime, err := parseWeekday(tt.in)
if err != nil { if err != nil {
if tt.err == nil || err.Error() != tt.err.Error() { if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("ParseWeekday expected error: %v, got: %v", tt.err, err) t.Errorf("ParseWeekday expected error: %v, got: %v", tt.err, err)
} }
continue return
} else if tt.err != nil { } else if tt.err != nil {
t.Errorf("Expected error: %v", tt.err) t.Errorf("Expected error: %v", tt.err)
} }
@ -489,18 +579,20 @@ func TestWeekdayTime(t *testing.T) {
if aTime != tt.out { if aTime != tt.out {
t.Errorf("Expected weekday: %v, got: %v", tt.out, aTime) t.Errorf("Expected weekday: %v, got: %v", tt.out, aTime)
} }
})
} }
} }
func TestClusterAnnotations(t *testing.T) { func TestPodAnnotations(t *testing.T) {
for _, tt := range annotations { for _, tt := range podAnnotations {
t.Run(tt.about, func(t *testing.T) {
var cluster Postgresql var cluster Postgresql
err := cluster.UnmarshalJSON(tt.in) err := cluster.UnmarshalJSON(tt.in)
if err != nil { if err != nil {
if tt.err == nil || err.Error() != tt.err.Error() { if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("Unable to marshal cluster with annotations: expected %v got %v", tt.err, err) t.Errorf("Unable to marshal cluster with podAnnotations: expected %v got %v", tt.err, err)
} }
continue return
} }
for k, v := range cluster.Spec.PodAnnotations { for k, v := range cluster.Spec.PodAnnotations {
found, expected := v, tt.annotations[k] found, expected := v, tt.annotations[k]
@ -508,28 +600,53 @@ func TestClusterAnnotations(t *testing.T) {
t.Errorf("Didn't find correct value for key %v in for podAnnotations: Expected %v found %v", k, expected, found) t.Errorf("Didn't find correct value for key %v in for podAnnotations: Expected %v found %v", k, expected, found)
} }
} }
})
}
}
func TestServiceAnnotations(t *testing.T) {
for _, tt := range serviceAnnotations {
t.Run(tt.about, func(t *testing.T) {
var cluster Postgresql
err := cluster.UnmarshalJSON(tt.in)
if err != nil {
if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("Unable to marshal cluster with serviceAnnotations: expected %v got %v", tt.err, err)
}
return
}
for k, v := range cluster.Spec.ServiceAnnotations {
found, expected := v, tt.annotations[k]
if found != expected {
t.Errorf("Didn't find correct value for key %v in for serviceAnnotations: Expected %v found %v", k, expected, found)
}
}
})
} }
} }
func TestClusterName(t *testing.T) { func TestClusterName(t *testing.T) {
for _, tt := range clusterNames { for _, tt := range clusterNames {
t.Run(tt.about, func(t *testing.T) {
name, err := extractClusterName(tt.in, tt.inTeam) name, err := extractClusterName(tt.in, tt.inTeam)
if err != nil { if err != nil {
if tt.err == nil || err.Error() != tt.err.Error() { if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("extractClusterName expected error: %v, got: %v", tt.err, err) t.Errorf("extractClusterName expected error: %v, got: %v", tt.err, err)
} }
continue return
} else if tt.err != nil { } else if tt.err != nil {
t.Errorf("Expected error: %v", tt.err) t.Errorf("Expected error: %v", tt.err)
} }
if name != tt.clusterName { if name != tt.clusterName {
t.Errorf("Expected cluserName: %q, got: %q", tt.clusterName, name) t.Errorf("Expected cluserName: %q, got: %q", tt.clusterName, name)
} }
})
} }
} }
func TestCloneClusterDescription(t *testing.T) { func TestCloneClusterDescription(t *testing.T) {
for _, tt := range cloneClusterDescriptions { for _, tt := range cloneClusterDescriptions {
t.Run(tt.about, func(t *testing.T) {
if err := validateCloneClusterDescription(tt.in); err != nil { if err := validateCloneClusterDescription(tt.in); err != nil {
if tt.err == nil || err.Error() != tt.err.Error() { if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("testCloneClusterDescription expected error: %v, got: %v", tt.err, err) t.Errorf("testCloneClusterDescription expected error: %v, got: %v", tt.err, err)
@ -537,18 +654,20 @@ func TestCloneClusterDescription(t *testing.T) {
} else if tt.err != nil { } else if tt.err != nil {
t.Errorf("Expected error: %v", tt.err) t.Errorf("Expected error: %v", tt.err)
} }
})
} }
} }
func TestUnmarshalMaintenanceWindow(t *testing.T) { func TestUnmarshalMaintenanceWindow(t *testing.T) {
for _, tt := range maintenanceWindows { for _, tt := range maintenanceWindows {
t.Run(tt.about, func(t *testing.T) {
var m MaintenanceWindow var m MaintenanceWindow
err := m.UnmarshalJSON(tt.in) err := m.UnmarshalJSON(tt.in)
if err != nil { if err != nil {
if tt.err == nil || err.Error() != tt.err.Error() { if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("MaintenanceWindow unmarshal expected error: %v, got %v", tt.err, err) t.Errorf("MaintenanceWindow unmarshal expected error: %v, got %v", tt.err, err)
} }
continue return
} else if tt.err != nil { } else if tt.err != nil {
t.Errorf("Expected error: %v", tt.err) t.Errorf("Expected error: %v", tt.err)
} }
@ -556,13 +675,15 @@ func TestUnmarshalMaintenanceWindow(t *testing.T) {
if !reflect.DeepEqual(m, tt.out) { if !reflect.DeepEqual(m, tt.out) {
t.Errorf("Expected maintenance window: %#v, got: %#v", tt.out, m) t.Errorf("Expected maintenance window: %#v, got: %#v", tt.out, m)
} }
})
} }
} }
func TestMarshalMaintenanceWindow(t *testing.T) { func TestMarshalMaintenanceWindow(t *testing.T) {
for _, tt := range maintenanceWindows { for _, tt := range maintenanceWindows {
t.Run(tt.about, func(t *testing.T) {
if tt.err != nil { if tt.err != nil {
continue return
} }
s, err := tt.out.MarshalJSON() s, err := tt.out.MarshalJSON()
@ -573,37 +694,40 @@ func TestMarshalMaintenanceWindow(t *testing.T) {
if !bytes.Equal(s, tt.in) { if !bytes.Equal(s, tt.in) {
t.Errorf("Expected Marshal: %q, got: %q", string(tt.in), string(s)) t.Errorf("Expected Marshal: %q, got: %q", string(tt.in), string(s))
} }
})
} }
} }
func TestUnmarshalPostgresStatus(t *testing.T) { func TestUnmarshalPostgresStatus(t *testing.T) {
for _, tt := range postgresStatus { for _, tt := range postgresStatus {
t.Run(tt.about, func(t *testing.T) {
var ps PostgresStatus var ps PostgresStatus
err := ps.UnmarshalJSON(tt.in) err := ps.UnmarshalJSON(tt.in)
if err != nil { if err != nil {
if tt.err == nil || err.Error() != tt.err.Error() { if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("CR status unmarshal expected error: %v, got %v", tt.err, err) t.Errorf("CR status unmarshal expected error: %v, got %v", tt.err, err)
} }
continue return
//} else if tt.err != nil {
//t.Errorf("Expected error: %v", tt.err)
} }
if !reflect.DeepEqual(ps, tt.out) { if !reflect.DeepEqual(ps, tt.out) {
t.Errorf("Expected status: %#v, got: %#v", tt.out, ps) t.Errorf("Expected status: %#v, got: %#v", tt.out, ps)
} }
})
} }
} }
func TestPostgresUnmarshal(t *testing.T) { func TestPostgresUnmarshal(t *testing.T) {
for _, tt := range unmarshalCluster { for _, tt := range unmarshalCluster {
t.Run(tt.about, func(t *testing.T) {
var cluster Postgresql var cluster Postgresql
err := cluster.UnmarshalJSON(tt.in) err := cluster.UnmarshalJSON(tt.in)
if err != nil { if err != nil {
if tt.err == nil || err.Error() != tt.err.Error() { if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("Unmarshal expected error: %v, got: %v", tt.err, err) t.Errorf("Unmarshal expected error: %v, got: %v", tt.err, err)
} }
continue return
} else if tt.err != nil { } else if tt.err != nil {
t.Errorf("Expected error: %v", tt.err) t.Errorf("Expected error: %v", tt.err)
} }
@ -611,13 +735,16 @@ func TestPostgresUnmarshal(t *testing.T) {
if !reflect.DeepEqual(cluster, tt.out) { if !reflect.DeepEqual(cluster, tt.out) {
t.Errorf("Expected Postgresql: %#v, got %#v", tt.out, cluster) t.Errorf("Expected Postgresql: %#v, got %#v", tt.out, cluster)
} }
})
} }
} }
func TestMarshal(t *testing.T) { func TestMarshal(t *testing.T) {
for _, tt := range unmarshalCluster { for _, tt := range unmarshalCluster {
t.Run(tt.about, func(t *testing.T) {
if tt.err != nil { if tt.err != nil {
continue return
} }
// Unmarshal and marshal example to capture api changes // Unmarshal and marshal example to capture api changes
@ -627,7 +754,7 @@ func TestMarshal(t *testing.T) {
if tt.err == nil || err.Error() != tt.err.Error() { if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("Backwards compatibility unmarshal expected error: %v, got: %v", tt.err, err) t.Errorf("Backwards compatibility unmarshal expected error: %v, got: %v", tt.err, err)
} }
continue return
} }
expected, err := json.Marshal(cluster) expected, err := json.Marshal(cluster)
if err != nil { if err != nil {
@ -641,11 +768,14 @@ func TestMarshal(t *testing.T) {
if !bytes.Equal(m, expected) { if !bytes.Equal(m, expected) {
t.Errorf("Marshal Postgresql \nexpected: %q, \ngot: %q", string(expected), string(m)) t.Errorf("Marshal Postgresql \nexpected: %q, \ngot: %q", string(expected), string(m))
} }
})
} }
} }
func TestPostgresMeta(t *testing.T) { func TestPostgresMeta(t *testing.T) {
for _, tt := range unmarshalCluster { for _, tt := range unmarshalCluster {
t.Run(tt.about, func(t *testing.T) {
if a := tt.out.GetObjectKind(); a != &tt.out.TypeMeta { if a := tt.out.GetObjectKind(); a != &tt.out.TypeMeta {
t.Errorf("GetObjectKindMeta \nexpected: %v, \ngot: %v", tt.out.TypeMeta, a) t.Errorf("GetObjectKindMeta \nexpected: %v, \ngot: %v", tt.out.TypeMeta, a)
} }
@ -653,13 +783,15 @@ func TestPostgresMeta(t *testing.T) {
if a := tt.out.GetObjectMeta(); reflect.DeepEqual(a, tt.out.ObjectMeta) { if a := tt.out.GetObjectMeta(); reflect.DeepEqual(a, tt.out.ObjectMeta) {
t.Errorf("GetObjectMeta \nexpected: %v, \ngot: %v", tt.out.ObjectMeta, a) t.Errorf("GetObjectMeta \nexpected: %v, \ngot: %v", tt.out.ObjectMeta, a)
} }
})
} }
} }
func TestPostgresListMeta(t *testing.T) { func TestPostgresListMeta(t *testing.T) {
for _, tt := range postgresqlList { for _, tt := range postgresqlList {
t.Run(tt.about, func(t *testing.T) {
if tt.err != nil { if tt.err != nil {
continue return
} }
if a := tt.out.GetObjectKind(); a != &tt.out.TypeMeta { if a := tt.out.GetObjectKind(); a != &tt.out.TypeMeta {
@ -671,17 +803,19 @@ func TestPostgresListMeta(t *testing.T) {
} }
return return
})
} }
} }
func TestPostgresqlClone(t *testing.T) { func TestPostgresqlClone(t *testing.T) {
for _, tt := range unmarshalCluster { for _, tt := range unmarshalCluster {
t.Run(tt.about, func(t *testing.T) {
cp := &tt.out cp := &tt.out
cp.Error = "" cp.Error = ""
clone := cp.Clone() clone := cp.Clone()
if !reflect.DeepEqual(clone, cp) { if !reflect.DeepEqual(clone, cp) {
t.Errorf("TestPostgresqlClone expected: \n%#v\n, got \n%#v", cp, clone) t.Errorf("TestPostgresqlClone expected: \n%#v\n, got \n%#v", cp, clone)
} }
})
} }
} }

View File

@ -1,7 +1,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
@ -300,34 +300,6 @@ func (in *OperatorConfigurationList) DeepCopyObject() runtime.Object {
return nil return nil
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OperatorConfigurationUsers) DeepCopyInto(out *OperatorConfigurationUsers) {
*out = *in
if in.ProtectedRoles != nil {
in, out := &in.ProtectedRoles, &out.ProtectedRoles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.TeamAPIRoleConfiguration != nil {
in, out := &in.TeamAPIRoleConfiguration, &out.TeamAPIRoleConfiguration
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigurationUsers.
func (in *OperatorConfigurationUsers) DeepCopy() *OperatorConfigurationUsers {
if in == nil {
return nil
}
out := new(OperatorConfigurationUsers)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OperatorDebugConfiguration) DeepCopyInto(out *OperatorDebugConfiguration) { func (in *OperatorDebugConfiguration) DeepCopyInto(out *OperatorDebugConfiguration) {
*out = *in *out = *in
@ -542,6 +514,13 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
(*out)[key] = val (*out)[key] = val
} }
} }
if in.ServiceAnnotations != nil {
in, out := &in.ServiceAnnotations, &out.ServiceAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.InitContainersOld != nil { if in.InitContainersOld != nil {
in, out := &in.InitContainersOld, &out.InitContainersOld in, out := &in.InitContainersOld, &out.InitContainersOld
*out = make([]corev1.Container, len(*in)) *out = make([]corev1.Container, len(*in))

View File

@ -29,7 +29,7 @@ import (
"github.com/zalando/postgres-operator/pkg/util/patroni" "github.com/zalando/postgres-operator/pkg/util/patroni"
"github.com/zalando/postgres-operator/pkg/util/teams" "github.com/zalando/postgres-operator/pkg/util/teams"
"github.com/zalando/postgres-operator/pkg/util/users" "github.com/zalando/postgres-operator/pkg/util/users"
rbacv1beta1 "k8s.io/api/rbac/v1beta1" rbacv1 "k8s.io/api/rbac/v1"
) )
var ( var (
@ -45,7 +45,7 @@ type Config struct {
RestConfig *rest.Config RestConfig *rest.Config
InfrastructureRoles map[string]spec.PgUser // inherited from the controller InfrastructureRoles map[string]spec.PgUser // inherited from the controller
PodServiceAccount *v1.ServiceAccount PodServiceAccount *v1.ServiceAccount
PodServiceAccountRoleBinding *rbacv1beta1.RoleBinding PodServiceAccountRoleBinding *rbacv1.RoleBinding
} }
type kubeResources struct { type kubeResources struct {
@ -227,8 +227,8 @@ func (c *Cluster) Create() error {
c.setStatus(acidv1.ClusterStatusCreating) c.setStatus(acidv1.ClusterStatusCreating)
if err = c.validateResources(&c.Spec); err != nil { if err = c.enforceMinResourceLimits(&c.Spec); err != nil {
return fmt.Errorf("insufficient resource limits specified: %v", err) return fmt.Errorf("could not enforce minimum resource limits: %v", err)
} }
for _, role := range []PostgresRole{Master, Replica} { for _, role := range []PostgresRole{Master, Replica} {
@ -495,38 +495,38 @@ func compareResourcesAssumeFirstNotNil(a *v1.ResourceRequirements, b *v1.Resourc
} }
func (c *Cluster) validateResources(spec *acidv1.PostgresSpec) error { func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
// setting limits too low can cause unnecessary evictions / OOM kills
const (
cpuMinLimit = "256m"
memoryMinLimit = "256Mi"
)
var ( var (
isSmaller bool isSmaller bool
err error err error
) )
// setting limits too low can cause unnecessary evictions / OOM kills
minCPULimit := c.OpConfig.MinCPULimit
minMemoryLimit := c.OpConfig.MinMemoryLimit
cpuLimit := spec.Resources.ResourceLimits.CPU cpuLimit := spec.Resources.ResourceLimits.CPU
if cpuLimit != "" { if cpuLimit != "" {
isSmaller, err = util.IsSmallerQuantity(cpuLimit, cpuMinLimit) isSmaller, err = util.IsSmallerQuantity(cpuLimit, minCPULimit)
if err != nil { if err != nil {
return fmt.Errorf("error validating CPU limit: %v", err) return fmt.Errorf("could not compare defined CPU limit %s with configured minimum value %s: %v", cpuLimit, minCPULimit, err)
} }
if isSmaller { if isSmaller {
return fmt.Errorf("defined CPU limit %s is below required minimum %s to properly run postgresql resource", cpuLimit, cpuMinLimit) c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit)
spec.Resources.ResourceLimits.CPU = minCPULimit
} }
} }
memoryLimit := spec.Resources.ResourceLimits.Memory memoryLimit := spec.Resources.ResourceLimits.Memory
if memoryLimit != "" { if memoryLimit != "" {
isSmaller, err = util.IsSmallerQuantity(memoryLimit, memoryMinLimit) isSmaller, err = util.IsSmallerQuantity(memoryLimit, minMemoryLimit)
if err != nil { if err != nil {
return fmt.Errorf("error validating memory limit: %v", err) return fmt.Errorf("could not compare defined memory limit %s with configured minimum value %s: %v", memoryLimit, minMemoryLimit, err)
} }
if isSmaller { if isSmaller {
return fmt.Errorf("defined memory limit %s is below required minimum %s to properly run postgresql resource", memoryLimit, memoryMinLimit) c.logger.Warningf("defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit)
spec.Resources.ResourceLimits.Memory = minMemoryLimit
} }
} }
@ -543,7 +543,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
c.mu.Lock() c.mu.Lock()
defer c.mu.Unlock() defer c.mu.Unlock()
oldStatus := c.Status
c.setStatus(acidv1.ClusterStatusUpdating) c.setStatus(acidv1.ClusterStatusUpdating)
c.setSpec(newSpec) c.setSpec(newSpec)
@ -555,22 +554,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
} }
}() }()
if err := c.validateResources(&newSpec.Spec); err != nil {
err = fmt.Errorf("insufficient resource limits specified: %v", err)
// cancel update only when (already too low) pod resources were edited
// if cluster was successfully running before the update, continue but log a warning
isCPULimitSmaller, err2 := util.IsSmallerQuantity(newSpec.Spec.Resources.ResourceLimits.CPU, oldSpec.Spec.Resources.ResourceLimits.CPU)
isMemoryLimitSmaller, err3 := util.IsSmallerQuantity(newSpec.Spec.Resources.ResourceLimits.Memory, oldSpec.Spec.Resources.ResourceLimits.Memory)
if oldStatus.Running() && !isCPULimitSmaller && !isMemoryLimitSmaller && err2 == nil && err3 == nil {
c.logger.Warning(err)
} else {
updateFailed = true
return err
}
}
if oldSpec.Spec.PgVersion != newSpec.Spec.PgVersion { // PG versions comparison if oldSpec.Spec.PgVersion != newSpec.Spec.PgVersion { // PG versions comparison
c.logger.Warningf("postgresql version change(%q -> %q) has no effect", oldSpec.Spec.PgVersion, newSpec.Spec.PgVersion) c.logger.Warningf("postgresql version change(%q -> %q) has no effect", oldSpec.Spec.PgVersion, newSpec.Spec.PgVersion)
//we need that hack to generate statefulset with the old version //we need that hack to generate statefulset with the old version
@ -616,6 +599,12 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
// Statefulset // Statefulset
func() { func() {
if err := c.enforceMinResourceLimits(&c.Spec); err != nil {
c.logger.Errorf("could not sync resources: %v", err)
updateFailed = true
return
}
oldSs, err := c.generateStatefulSet(&oldSpec.Spec) oldSs, err := c.generateStatefulSet(&oldSpec.Spec)
if err != nil { if err != nil {
c.logger.Errorf("could not generate old statefulset spec: %v", err) c.logger.Errorf("could not generate old statefulset spec: %v", err)
@ -623,6 +612,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
return return
} }
// update newSpec to for latter comparison with oldSpec
c.enforceMinResourceLimits(&newSpec.Spec)
newSs, err := c.generateStatefulSet(&newSpec.Spec) newSs, err := c.generateStatefulSet(&newSpec.Spec)
if err != nil { if err != nil {
c.logger.Errorf("could not generate new statefulset spec: %v", err) c.logger.Errorf("could not generate new statefulset spec: %v", err)

View File

@ -355,6 +355,12 @@ func TestPodAnnotations(t *testing.T) {
database: map[string]string{"foo": "bar"}, database: map[string]string{"foo": "bar"},
merged: map[string]string{"foo": "bar"}, merged: map[string]string{"foo": "bar"},
}, },
{
subTest: "Both Annotations",
operator: map[string]string{"foo": "bar"},
database: map[string]string{"post": "gres"},
merged: map[string]string{"foo": "bar", "post": "gres"},
},
{ {
subTest: "Database Config overrides Operator Config Annotations", subTest: "Database Config overrides Operator Config Annotations",
operator: map[string]string{"foo": "bar", "global": "foo"}, operator: map[string]string{"foo": "bar", "global": "foo"},
@ -382,3 +388,319 @@ func TestPodAnnotations(t *testing.T) {
} }
} }
} }
func TestServiceAnnotations(t *testing.T) {
enabled := true
disabled := false
tests := []struct {
about string
role PostgresRole
enableMasterLoadBalancerSpec *bool
enableMasterLoadBalancerOC bool
enableReplicaLoadBalancerSpec *bool
enableReplicaLoadBalancerOC bool
operatorAnnotations map[string]string
clusterAnnotations map[string]string
expect map[string]string
}{
//MASTER
{
about: "Master with no annotations and EnableMasterLoadBalancer disabled on spec and OperatorConfig",
role: "master",
enableMasterLoadBalancerSpec: &disabled,
enableMasterLoadBalancerOC: false,
operatorAnnotations: make(map[string]string),
clusterAnnotations: make(map[string]string),
expect: make(map[string]string),
},
{
about: "Master with no annotations and EnableMasterLoadBalancer enabled on spec",
role: "master",
enableMasterLoadBalancerSpec: &enabled,
enableMasterLoadBalancerOC: false,
operatorAnnotations: make(map[string]string),
clusterAnnotations: make(map[string]string),
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
},
},
{
about: "Master with no annotations and EnableMasterLoadBalancer enabled only on operator config",
role: "master",
enableMasterLoadBalancerSpec: &disabled,
enableMasterLoadBalancerOC: true,
operatorAnnotations: make(map[string]string),
clusterAnnotations: make(map[string]string),
expect: make(map[string]string),
},
{
about: "Master with no annotations and EnableMasterLoadBalancer defined only on operator config",
role: "master",
enableMasterLoadBalancerOC: true,
operatorAnnotations: make(map[string]string),
clusterAnnotations: make(map[string]string),
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
},
},
{
about: "Master with cluster annotations and load balancer enabled",
role: "master",
enableMasterLoadBalancerOC: true,
operatorAnnotations: make(map[string]string),
clusterAnnotations: map[string]string{"foo": "bar"},
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
"foo": "bar",
},
},
{
about: "Master with cluster annotations and load balancer disabled",
role: "master",
enableMasterLoadBalancerSpec: &disabled,
enableMasterLoadBalancerOC: true,
operatorAnnotations: make(map[string]string),
clusterAnnotations: map[string]string{"foo": "bar"},
expect: map[string]string{"foo": "bar"},
},
{
about: "Master with operator annotations and load balancer enabled",
role: "master",
enableMasterLoadBalancerOC: true,
operatorAnnotations: map[string]string{"foo": "bar"},
clusterAnnotations: make(map[string]string),
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
"foo": "bar",
},
},
{
about: "Master with operator annotations override default annotations",
role: "master",
enableMasterLoadBalancerOC: true,
operatorAnnotations: map[string]string{
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
},
clusterAnnotations: make(map[string]string),
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
},
},
{
about: "Master with cluster annotations override default annotations",
role: "master",
enableMasterLoadBalancerOC: true,
operatorAnnotations: make(map[string]string),
clusterAnnotations: map[string]string{
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
},
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
},
},
{
about: "Master with cluster annotations do not override external-dns annotations",
role: "master",
enableMasterLoadBalancerOC: true,
operatorAnnotations: make(map[string]string),
clusterAnnotations: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com",
},
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
},
},
{
about: "Master with operator annotations do not override external-dns annotations",
role: "master",
enableMasterLoadBalancerOC: true,
clusterAnnotations: make(map[string]string),
operatorAnnotations: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com",
},
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
},
},
// REPLICA
{
about: "Replica with no annotations and EnableReplicaLoadBalancer disabled on spec and OperatorConfig",
role: "replica",
enableReplicaLoadBalancerSpec: &disabled,
enableReplicaLoadBalancerOC: false,
operatorAnnotations: make(map[string]string),
clusterAnnotations: make(map[string]string),
expect: make(map[string]string),
},
{
about: "Replica with no annotations and EnableReplicaLoadBalancer enabled on spec",
role: "replica",
enableReplicaLoadBalancerSpec: &enabled,
enableReplicaLoadBalancerOC: false,
operatorAnnotations: make(map[string]string),
clusterAnnotations: make(map[string]string),
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
},
},
{
about: "Replica with no annotations and EnableReplicaLoadBalancer enabled only on operator config",
role: "replica",
enableReplicaLoadBalancerSpec: &disabled,
enableReplicaLoadBalancerOC: true,
operatorAnnotations: make(map[string]string),
clusterAnnotations: make(map[string]string),
expect: make(map[string]string),
},
{
about: "Replica with no annotations and EnableReplicaLoadBalancer defined only on operator config",
role: "replica",
enableReplicaLoadBalancerOC: true,
operatorAnnotations: make(map[string]string),
clusterAnnotations: make(map[string]string),
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
},
},
{
about: "Replica with cluster annotations and load balancer enabled",
role: "replica",
enableReplicaLoadBalancerOC: true,
operatorAnnotations: make(map[string]string),
clusterAnnotations: map[string]string{"foo": "bar"},
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
"foo": "bar",
},
},
{
about: "Replica with cluster annotations and load balancer disabled",
role: "replica",
enableReplicaLoadBalancerSpec: &disabled,
enableReplicaLoadBalancerOC: true,
operatorAnnotations: make(map[string]string),
clusterAnnotations: map[string]string{"foo": "bar"},
expect: map[string]string{"foo": "bar"},
},
{
about: "Replica with operator annotations and load balancer enabled",
role: "replica",
enableReplicaLoadBalancerOC: true,
operatorAnnotations: map[string]string{"foo": "bar"},
clusterAnnotations: make(map[string]string),
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
"foo": "bar",
},
},
{
about: "Replica with operator annotations override default annotations",
role: "replica",
enableReplicaLoadBalancerOC: true,
operatorAnnotations: map[string]string{
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
},
clusterAnnotations: make(map[string]string),
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
},
},
{
about: "Replica with cluster annotations override default annotations",
role: "replica",
enableReplicaLoadBalancerOC: true,
operatorAnnotations: make(map[string]string),
clusterAnnotations: map[string]string{
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
},
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
},
},
{
about: "Replica with cluster annotations do not override external-dns annotations",
role: "replica",
enableReplicaLoadBalancerOC: true,
operatorAnnotations: make(map[string]string),
clusterAnnotations: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com",
},
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
},
},
{
about: "Replica with operator annotations do not override external-dns annotations",
role: "replica",
enableReplicaLoadBalancerOC: true,
clusterAnnotations: make(map[string]string),
operatorAnnotations: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com",
},
expect: map[string]string{
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
},
},
// COMMON
{
about: "cluster annotations append to operator annotations",
role: "replica",
enableReplicaLoadBalancerOC: false,
operatorAnnotations: map[string]string{"foo": "bar"},
clusterAnnotations: map[string]string{"post": "gres"},
expect: map[string]string{"foo": "bar", "post": "gres"},
},
{
about: "cluster annotations override operator annotations",
role: "replica",
enableReplicaLoadBalancerOC: false,
operatorAnnotations: map[string]string{"foo": "bar", "post": "gres"},
clusterAnnotations: map[string]string{"post": "greSQL"},
expect: map[string]string{"foo": "bar", "post": "greSQL"},
},
}
for _, tt := range tests {
t.Run(tt.about, func(t *testing.T) {
cl.OpConfig.CustomServiceAnnotations = tt.operatorAnnotations
cl.OpConfig.EnableMasterLoadBalancer = tt.enableMasterLoadBalancerOC
cl.OpConfig.EnableReplicaLoadBalancer = tt.enableReplicaLoadBalancerOC
cl.OpConfig.MasterDNSNameFormat = "{cluster}.{team}.{hostedzone}"
cl.OpConfig.ReplicaDNSNameFormat = "{cluster}-repl.{team}.{hostedzone}"
cl.OpConfig.DbHostedZone = "db.example.com"
cl.Postgresql.Spec.ClusterName = "test"
cl.Postgresql.Spec.TeamID = "acid"
cl.Postgresql.Spec.ServiceAnnotations = tt.clusterAnnotations
cl.Postgresql.Spec.EnableMasterLoadBalancer = tt.enableMasterLoadBalancerSpec
cl.Postgresql.Spec.EnableReplicaLoadBalancer = tt.enableReplicaLoadBalancerSpec
got := cl.generateServiceAnnotations(tt.role, &cl.Postgresql.Spec)
if len(tt.expect) != len(got) {
t.Errorf("expected %d annotation(s), got %d", len(tt.expect), len(got))
return
}
for k, v := range got {
if tt.expect[k] != v {
t.Errorf("expected annotation '%v' with value '%v', got value '%v'", k, tt.expect[k], v)
}
}
})
}
}

View File

@ -1048,10 +1048,13 @@ func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
cur := spec.NumberOfInstances cur := spec.NumberOfInstances
newcur := cur newcur := cur
/* Limit the max number of pods to one, if this is standby-cluster */
if spec.StandbyCluster != nil { if spec.StandbyCluster != nil {
c.logger.Info("Standby cluster can have maximum of 1 pod") if newcur == 1 {
max = 1 min = newcur
max = newcur
} else {
c.logger.Warningf("operator only supports standby clusters with 1 pod")
}
} }
if max >= 0 && newcur > max { if max >= 0 && newcur > max {
newcur = max newcur = max
@ -1229,14 +1232,6 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *ac
} }
func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) *v1.Service { func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) *v1.Service {
var dnsName string
if role == Master {
dnsName = c.masterDNSName()
} else {
dnsName = c.replicaDNSName()
}
serviceSpec := v1.ServiceSpec{ serviceSpec := v1.ServiceSpec{
Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}}, Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
Type: v1.ServiceTypeClusterIP, Type: v1.ServiceTypeClusterIP,
@ -1246,8 +1241,6 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
serviceSpec.Selector = c.roleLabelsSet(false, role) serviceSpec.Selector = c.roleLabelsSet(false, role)
} }
var annotations map[string]string
if c.shouldCreateLoadBalancerForService(role, spec) { if c.shouldCreateLoadBalancerForService(role, spec) {
// spec.AllowedSourceRanges evaluates to the empty slice of zero length // spec.AllowedSourceRanges evaluates to the empty slice of zero length
@ -1261,18 +1254,6 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges) c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges)
serviceSpec.Type = v1.ServiceTypeLoadBalancer serviceSpec.Type = v1.ServiceTypeLoadBalancer
annotations = map[string]string{
constants.ZalandoDNSNameAnnotation: dnsName,
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
}
if len(c.OpConfig.CustomServiceAnnotations) != 0 {
c.logger.Debugf("There are custom annotations defined, creating them.")
for customAnnotationKey, customAnnotationValue := range c.OpConfig.CustomServiceAnnotations {
annotations[customAnnotationKey] = customAnnotationValue
}
}
} else if role == Replica { } else if role == Replica {
// before PR #258, the replica service was only created if allocated a LB // before PR #258, the replica service was only created if allocated a LB
// now we always create the service but warn if the LB is absent // now we always create the service but warn if the LB is absent
@ -1284,7 +1265,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
Name: c.serviceName(role), Name: c.serviceName(role),
Namespace: c.Namespace, Namespace: c.Namespace,
Labels: c.roleLabelsSet(true, role), Labels: c.roleLabelsSet(true, role),
Annotations: annotations, Annotations: c.generateServiceAnnotations(role, spec),
}, },
Spec: serviceSpec, Spec: serviceSpec,
} }
@ -1292,6 +1273,42 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
return service return service
} }
func (c *Cluster) generateServiceAnnotations(role PostgresRole, spec *acidv1.PostgresSpec) map[string]string {
annotations := make(map[string]string)
for k, v := range c.OpConfig.CustomServiceAnnotations {
annotations[k] = v
}
if spec != nil || spec.ServiceAnnotations != nil {
for k, v := range spec.ServiceAnnotations {
annotations[k] = v
}
}
if c.shouldCreateLoadBalancerForService(role, spec) {
var dnsName string
if role == Master {
dnsName = c.masterDNSName()
} else {
dnsName = c.replicaDNSName()
}
// Just set ELB Timeout annotation with default value, if it does not
// have a cutom value
if _, ok := annotations[constants.ElbTimeoutAnnotationName]; !ok {
annotations[constants.ElbTimeoutAnnotationName] = constants.ElbTimeoutAnnotationValue
}
// External DNS name annotation is not customizable
annotations[constants.ZalandoDNSNameAnnotation] = dnsName
}
if len(annotations) == 0 {
return nil
}
return annotations
}
func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubset) *v1.Endpoints { func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubset) *v1.Endpoints {
endpoints := &v1.Endpoints{ endpoints := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -1483,7 +1500,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
) )
labels := map[string]string{ labels := map[string]string{
"version": c.Name, c.OpConfig.ClusterNameLabel: c.Name,
"application": "spilo-logical-backup", "application": "spilo-logical-backup",
} }
podAffinityTerm := v1.PodAffinityTerm{ podAffinityTerm := v1.PodAffinityTerm{
@ -1588,6 +1605,10 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
Name: "LOGICAL_BACKUP_S3_BUCKET", Name: "LOGICAL_BACKUP_S3_BUCKET",
Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket, Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket,
}, },
{
Name: "LOGICAL_BACKUP_S3_REGION",
Value: c.OpConfig.LogicalBackup.LogicalBackupS3Region,
},
{ {
Name: "LOGICAL_BACKUP_S3_ENDPOINT", Name: "LOGICAL_BACKUP_S3_ENDPOINT",
Value: c.OpConfig.LogicalBackup.LogicalBackupS3Endpoint, Value: c.OpConfig.LogicalBackup.LogicalBackupS3Endpoint,

View File

@ -366,6 +366,11 @@ func (c *Cluster) createService(role PostgresRole) (*v1.Service, error) {
} }
func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error { func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error {
var (
svc *v1.Service
err error
)
c.setProcessName("updating %v service", role) c.setProcessName("updating %v service", role)
if c.Services[role] == nil { if c.Services[role] == nil {
@ -373,70 +378,6 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
} }
serviceName := util.NameFromMeta(c.Services[role].ObjectMeta) serviceName := util.NameFromMeta(c.Services[role].ObjectMeta)
endpointName := util.NameFromMeta(c.Endpoints[role].ObjectMeta)
// TODO: check if it possible to change the service type with a patch in future versions of Kubernetes
if newService.Spec.Type != c.Services[role].Spec.Type {
// service type has changed, need to replace the service completely.
// we cannot use just patch the current service, since it may contain attributes incompatible with the new type.
var (
currentEndpoint *v1.Endpoints
err error
)
if role == Master {
// for the master service we need to re-create the endpoint as well. Get the up-to-date version of
// the addresses stored in it before the service is deleted (deletion of the service removes the endpoint)
currentEndpoint, err = c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{})
if err != nil {
return fmt.Errorf("could not get current cluster %s endpoints: %v", role, err)
}
}
err = c.KubeClient.Services(serviceName.Namespace).Delete(serviceName.Name, c.deleteOptions)
if err != nil {
return fmt.Errorf("could not delete service %q: %v", serviceName, err)
}
// wait until the service is truly deleted
c.logger.Debugf("waiting for service to be deleted")
err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
func() (bool, error) {
_, err2 := c.KubeClient.Services(serviceName.Namespace).Get(serviceName.Name, metav1.GetOptions{})
if err2 == nil {
return false, nil
}
if k8sutil.ResourceNotFound(err2) {
return true, nil
}
return false, err2
})
if err != nil {
return fmt.Errorf("could not delete service %q: %v", serviceName, err)
}
// make sure we clear the stored service and endpoint status if the subsequent create fails.
c.Services[role] = nil
c.Endpoints[role] = nil
if role == Master {
// create the new endpoint using the addresses obtained from the previous one
endpointSpec := c.generateEndpoint(role, currentEndpoint.Subsets)
ep, err := c.KubeClient.Endpoints(endpointSpec.Namespace).Create(endpointSpec)
if err != nil {
return fmt.Errorf("could not create endpoint %q: %v", endpointName, err)
}
c.Endpoints[role] = ep
}
svc, err := c.KubeClient.Services(serviceName.Namespace).Create(newService)
if err != nil {
return fmt.Errorf("could not create service %q: %v", serviceName, err)
}
c.Services[role] = svc
return nil
}
// update the service annotation in order to propagate ELB notation. // update the service annotation in order to propagate ELB notation.
if len(newService.ObjectMeta.Annotations) > 0 { if len(newService.ObjectMeta.Annotations) > 0 {
@ -454,19 +395,31 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
} }
} }
// now, patch the service spec, but when disabling LoadBalancers do update instead
// patch does not work because of LoadBalancerSourceRanges field (even if set to nil)
oldServiceType := c.Services[role].Spec.Type
newServiceType := newService.Spec.Type
if newServiceType == "ClusterIP" && newServiceType != oldServiceType {
newService.ResourceVersion = c.Services[role].ResourceVersion
newService.Spec.ClusterIP = c.Services[role].Spec.ClusterIP
svc, err = c.KubeClient.Services(serviceName.Namespace).Update(newService)
if err != nil {
return fmt.Errorf("could not update service %q: %v", serviceName, err)
}
} else {
patchData, err := specPatch(newService.Spec) patchData, err := specPatch(newService.Spec)
if err != nil { if err != nil {
return fmt.Errorf("could not form patch for the service %q: %v", serviceName, err) return fmt.Errorf("could not form patch for the service %q: %v", serviceName, err)
} }
// update the service spec svc, err = c.KubeClient.Services(serviceName.Namespace).Patch(
svc, err := c.KubeClient.Services(serviceName.Namespace).Patch(
serviceName.Name, serviceName.Name,
types.MergePatchType, types.MergePatchType,
patchData, "") patchData, "")
if err != nil { if err != nil {
return fmt.Errorf("could not patch service %q: %v", serviceName, err) return fmt.Errorf("could not patch service %q: %v", serviceName, err)
} }
}
c.Services[role] = svc c.Services[role] = svc
return nil return nil

View File

@ -23,7 +23,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
c.mu.Lock() c.mu.Lock()
defer c.mu.Unlock() defer c.mu.Unlock()
oldStatus := c.Status
c.setSpec(newSpec) c.setSpec(newSpec)
defer func() { defer func() {
@ -35,16 +34,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
} }
}() }()
if err = c.validateResources(&c.Spec); err != nil {
err = fmt.Errorf("insufficient resource limits specified: %v", err)
if oldStatus.Running() {
c.logger.Warning(err)
err = nil
} else {
return err
}
}
if err = c.initUsers(); err != nil { if err = c.initUsers(); err != nil {
err = fmt.Errorf("could not init users: %v", err) err = fmt.Errorf("could not init users: %v", err)
return err return err
@ -76,6 +65,11 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
return err return err
} }
if err = c.enforceMinResourceLimits(&c.Spec); err != nil {
err = fmt.Errorf("could not enforce minimum resource limits: %v", err)
return err
}
c.logger.Debugf("syncing statefulsets") c.logger.Debugf("syncing statefulsets")
if err = c.syncStatefulSet(); err != nil { if err = c.syncStatefulSet(); err != nil {
if !k8sutil.ResourceAlreadyExists(err) { if !k8sutil.ResourceAlreadyExists(err) {
@ -122,7 +116,7 @@ func (c *Cluster) syncServices() error {
c.logger.Debugf("syncing %s service", role) c.logger.Debugf("syncing %s service", role)
if err := c.syncEndpoint(role); err != nil { if err := c.syncEndpoint(role); err != nil {
return fmt.Errorf("could not sync %s endpont: %v", role, err) return fmt.Errorf("could not sync %s endpoint: %v", role, err)
} }
if err := c.syncService(role); err != nil { if err := c.syncService(role); err != nil {

View File

@ -7,7 +7,7 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1" rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
@ -57,7 +57,7 @@ type Controller struct {
workerLogs map[uint32]ringlog.RingLogger workerLogs map[uint32]ringlog.RingLogger
PodServiceAccount *v1.ServiceAccount PodServiceAccount *v1.ServiceAccount
PodServiceAccountRoleBinding *rbacv1beta1.RoleBinding PodServiceAccountRoleBinding *rbacv1.RoleBinding
} }
// NewController creates a new controller // NewController creates a new controller
@ -161,10 +161,11 @@ func (c *Controller) initPodServiceAccount() {
if c.opConfig.PodServiceAccountDefinition == "" { if c.opConfig.PodServiceAccountDefinition == "" {
c.opConfig.PodServiceAccountDefinition = ` c.opConfig.PodServiceAccountDefinition = `
{ "apiVersion": "v1", {
"apiVersion": "v1",
"kind": "ServiceAccount", "kind": "ServiceAccount",
"metadata": { "metadata": {
"name": "operator" "name": "postgres-pod"
} }
}` }`
} }
@ -175,13 +176,13 @@ func (c *Controller) initPodServiceAccount() {
switch { switch {
case err != nil: case err != nil:
panic(fmt.Errorf("Unable to parse pod service account definition from the operator config map: %v", err)) panic(fmt.Errorf("Unable to parse pod service account definition from the operator configuration: %v", err))
case groupVersionKind.Kind != "ServiceAccount": case groupVersionKind.Kind != "ServiceAccount":
panic(fmt.Errorf("pod service account definition in the operator config map defines another type of resource: %v", groupVersionKind.Kind)) panic(fmt.Errorf("pod service account definition in the operator configuration defines another type of resource: %v", groupVersionKind.Kind))
default: default:
c.PodServiceAccount = obj.(*v1.ServiceAccount) c.PodServiceAccount = obj.(*v1.ServiceAccount)
if c.PodServiceAccount.Name != c.opConfig.PodServiceAccountName { if c.PodServiceAccount.Name != c.opConfig.PodServiceAccountName {
c.logger.Warnf("in the operator config map, the pod service account name %v does not match the name %v given in the account definition; using the former for consistency", c.opConfig.PodServiceAccountName, c.PodServiceAccount.Name) c.logger.Warnf("in the operator configuration, the pod service account name %v does not match the name %v given in the account definition; using the former for consistency", c.opConfig.PodServiceAccountName, c.PodServiceAccount.Name)
c.PodServiceAccount.Name = c.opConfig.PodServiceAccountName c.PodServiceAccount.Name = c.opConfig.PodServiceAccountName
} }
c.PodServiceAccount.Namespace = "" c.PodServiceAccount.Namespace = ""
@ -198,7 +199,7 @@ func (c *Controller) initRoleBinding() {
if c.opConfig.PodServiceAccountRoleBindingDefinition == "" { if c.opConfig.PodServiceAccountRoleBindingDefinition == "" {
c.opConfig.PodServiceAccountRoleBindingDefinition = fmt.Sprintf(` c.opConfig.PodServiceAccountRoleBindingDefinition = fmt.Sprintf(`
{ {
"apiVersion": "rbac.authorization.k8s.io/v1beta1", "apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "RoleBinding", "kind": "RoleBinding",
"metadata": { "metadata": {
"name": "%s" "name": "%s"
@ -223,11 +224,11 @@ func (c *Controller) initRoleBinding() {
switch { switch {
case err != nil: case err != nil:
panic(fmt.Errorf("Unable to parse the definition of the role binding for the pod service account definition from the operator config map: %v", err)) panic(fmt.Errorf("unable to parse the role binding definition from the operator configuration: %v", err))
case groupVersionKind.Kind != "RoleBinding": case groupVersionKind.Kind != "RoleBinding":
panic(fmt.Errorf("role binding definition in the operator config map defines another type of resource: %v", groupVersionKind.Kind)) panic(fmt.Errorf("role binding definition in the operator configuration defines another type of resource: %v", groupVersionKind.Kind))
default: default:
c.PodServiceAccountRoleBinding = obj.(*rbacv1beta1.RoleBinding) c.PodServiceAccountRoleBinding = obj.(*rbacv1.RoleBinding)
c.PodServiceAccountRoleBinding.Namespace = "" c.PodServiceAccountRoleBinding.Namespace = ""
c.logger.Info("successfully parsed") c.logger.Info("successfully parsed")

View File

@ -66,7 +66,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName
result.PodManagementPolicy = fromCRD.Kubernetes.PodManagementPolicy result.PodManagementPolicy = fromCRD.Kubernetes.PodManagementPolicy
result.MasterPodMoveTimeout = fromCRD.Kubernetes.MasterPodMoveTimeout result.MasterPodMoveTimeout = time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout)
result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity
result.PodAntiAffinityTopologyKey = fromCRD.Kubernetes.PodAntiAffinityTopologyKey result.PodAntiAffinityTopologyKey = fromCRD.Kubernetes.PodAntiAffinityTopologyKey
@ -75,6 +75,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest
result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit
result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit
result.MinCPULimit = fromCRD.PostgresPodResources.MinCPULimit
result.MinMemoryLimit = fromCRD.PostgresPodResources.MinMemoryLimit
// timeout config // timeout config
result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval) result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval)
@ -104,6 +106,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.LogicalBackupSchedule = fromCRD.LogicalBackup.Schedule result.LogicalBackupSchedule = fromCRD.LogicalBackup.Schedule
result.LogicalBackupDockerImage = fromCRD.LogicalBackup.DockerImage result.LogicalBackupDockerImage = fromCRD.LogicalBackup.DockerImage
result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket
result.LogicalBackupS3Region = fromCRD.LogicalBackup.S3Region
result.LogicalBackupS3Endpoint = fromCRD.LogicalBackup.S3Endpoint result.LogicalBackupS3Endpoint = fromCRD.LogicalBackup.S3Endpoint
result.LogicalBackupS3AccessKeyID = fromCRD.LogicalBackup.S3AccessKeyID result.LogicalBackupS3AccessKeyID = fromCRD.LogicalBackup.S3AccessKeyID
result.LogicalBackupS3SecretAccessKey = fromCRD.LogicalBackup.S3SecretAccessKey result.LogicalBackupS3SecretAccessKey = fromCRD.LogicalBackup.S3SecretAccessKey

View File

@ -505,11 +505,11 @@ func (c *Controller) submitRBACCredentials(event ClusterEvent) error {
namespace := event.NewSpec.GetNamespace() namespace := event.NewSpec.GetNamespace()
if err := c.createPodServiceAccount(namespace); err != nil { if err := c.createPodServiceAccount(namespace); err != nil {
return fmt.Errorf("could not create pod service account %v : %v", c.opConfig.PodServiceAccountName, err) return fmt.Errorf("could not create pod service account %q : %v", c.opConfig.PodServiceAccountName, err)
} }
if err := c.createRoleBindings(namespace); err != nil { if err := c.createRoleBindings(namespace); err != nil {
return fmt.Errorf("could not create role binding %v : %v", c.PodServiceAccountRoleBinding.Name, err) return fmt.Errorf("could not create role binding %q : %v", c.PodServiceAccountRoleBinding.Name, err)
} }
return nil return nil
} }
@ -520,16 +520,16 @@ func (c *Controller) createPodServiceAccount(namespace string) error {
_, err := c.KubeClient.ServiceAccounts(namespace).Get(podServiceAccountName, metav1.GetOptions{}) _, err := c.KubeClient.ServiceAccounts(namespace).Get(podServiceAccountName, metav1.GetOptions{})
if k8sutil.ResourceNotFound(err) { if k8sutil.ResourceNotFound(err) {
c.logger.Infof(fmt.Sprintf("creating pod service account in the namespace %v", namespace)) c.logger.Infof(fmt.Sprintf("creating pod service account %q in the %q namespace", podServiceAccountName, namespace))
// get a separate copy of service account // get a separate copy of service account
// to prevent a race condition when setting a namespace for many clusters // to prevent a race condition when setting a namespace for many clusters
sa := *c.PodServiceAccount sa := *c.PodServiceAccount
if _, err = c.KubeClient.ServiceAccounts(namespace).Create(&sa); err != nil { if _, err = c.KubeClient.ServiceAccounts(namespace).Create(&sa); err != nil {
return fmt.Errorf("cannot deploy the pod service account %v defined in the config map to the %v namespace: %v", podServiceAccountName, namespace, err) return fmt.Errorf("cannot deploy the pod service account %q defined in the configuration to the %q namespace: %v", podServiceAccountName, namespace, err)
} }
c.logger.Infof("successfully deployed the pod service account %v to the %v namespace", podServiceAccountName, namespace) c.logger.Infof("successfully deployed the pod service account %q to the %q namespace", podServiceAccountName, namespace)
} else if k8sutil.ResourceAlreadyExists(err) { } else if k8sutil.ResourceAlreadyExists(err) {
return nil return nil
} }
@ -545,14 +545,14 @@ func (c *Controller) createRoleBindings(namespace string) error {
_, err := c.KubeClient.RoleBindings(namespace).Get(podServiceAccountRoleBindingName, metav1.GetOptions{}) _, err := c.KubeClient.RoleBindings(namespace).Get(podServiceAccountRoleBindingName, metav1.GetOptions{})
if k8sutil.ResourceNotFound(err) { if k8sutil.ResourceNotFound(err) {
c.logger.Infof("Creating the role binding %v in the namespace %v", podServiceAccountRoleBindingName, namespace) c.logger.Infof("Creating the role binding %q in the %q namespace", podServiceAccountRoleBindingName, namespace)
// get a separate copy of role binding // get a separate copy of role binding
// to prevent a race condition when setting a namespace for many clusters // to prevent a race condition when setting a namespace for many clusters
rb := *c.PodServiceAccountRoleBinding rb := *c.PodServiceAccountRoleBinding
_, err = c.KubeClient.RoleBindings(namespace).Create(&rb) _, err = c.KubeClient.RoleBindings(namespace).Create(&rb)
if err != nil { if err != nil {
return fmt.Errorf("cannot bind the pod service account %q defined in the config map to the cluster role in the %q namespace: %v", podServiceAccountName, namespace, err) return fmt.Errorf("cannot bind the pod service account %q defined in the configuration to the cluster role in the %q namespace: %v", podServiceAccountName, namespace, err)
} }
c.logger.Infof("successfully deployed the role binding for the pod service account %q to the %q namespace", podServiceAccountName, namespace) c.logger.Infof("successfully deployed the role binding for the pod service account %q to the %q namespace", podServiceAccountName, namespace)

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2019 Compose, Zalando SE Copyright 2020 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -37,8 +37,10 @@ type Resources struct {
PodToleration map[string]string `name:"toleration" default:""` PodToleration map[string]string `name:"toleration" default:""`
DefaultCPURequest string `name:"default_cpu_request" default:"100m"` DefaultCPURequest string `name:"default_cpu_request" default:"100m"`
DefaultMemoryRequest string `name:"default_memory_request" default:"100Mi"` DefaultMemoryRequest string `name:"default_memory_request" default:"100Mi"`
DefaultCPULimit string `name:"default_cpu_limit" default:"3"` DefaultCPULimit string `name:"default_cpu_limit" default:"1"`
DefaultMemoryLimit string `name:"default_memory_limit" default:"1Gi"` DefaultMemoryLimit string `name:"default_memory_limit" default:"500Mi"`
MinCPULimit string `name:"min_cpu_limit" default:"250m"`
MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"`
PodEnvironmentConfigMap string `name:"pod_environment_configmap" default:""` PodEnvironmentConfigMap string `name:"pod_environment_configmap" default:""`
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""` NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
MaxInstances int32 `name:"max_instances" default:"-1"` MaxInstances int32 `name:"max_instances" default:"-1"`
@ -66,14 +68,15 @@ type Scalyr struct {
ScalyrCPURequest string `name:"scalyr_cpu_request" default:"100m"` ScalyrCPURequest string `name:"scalyr_cpu_request" default:"100m"`
ScalyrMemoryRequest string `name:"scalyr_memory_request" default:"50Mi"` ScalyrMemoryRequest string `name:"scalyr_memory_request" default:"50Mi"`
ScalyrCPULimit string `name:"scalyr_cpu_limit" default:"1"` ScalyrCPULimit string `name:"scalyr_cpu_limit" default:"1"`
ScalyrMemoryLimit string `name:"scalyr_memory_limit" default:"1Gi"` ScalyrMemoryLimit string `name:"scalyr_memory_limit" default:"500Mi"`
} }
// LogicalBackup defines configration for logical backup // LogicalBackup defines configuration for logical backup
type LogicalBackup struct { type LogicalBackup struct {
LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"` LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"`
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup"` LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup"`
LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""` LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""`
LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""`
LogicalBackupS3Endpoint string `name:"logical_backup_s3_endpoint" default:""` LogicalBackupS3Endpoint string `name:"logical_backup_s3_endpoint" default:""`
LogicalBackupS3AccessKeyID string `name:"logical_backup_s3_access_key_id" default:""` LogicalBackupS3AccessKeyID string `name:"logical_backup_s3_access_key_id" default:""`
LogicalBackupS3SecretAccessKey string `name:"logical_backup_s3_secret_access_key" default:""` LogicalBackupS3SecretAccessKey string `name:"logical_backup_s3_secret_access_key" default:""`
@ -90,10 +93,9 @@ type Config struct {
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-11:1.6-p1"` DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p2"`
Sidecars map[string]string `name:"sidecar_docker_images"` Sidecars map[string]string `name:"sidecar_docker_images"`
// default name `operator` enables backward compatibility with the older ServiceAccountName field PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
PodServiceAccountName string `name:"pod_service_account_name" default:"operator"`
// value of this string must be valid JSON or YAML; see initPodServiceAccount // value of this string must be valid JSON or YAML; see initPodServiceAccount
PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""` PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""`
PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""` PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""`

View File

@ -9,7 +9,6 @@ import (
batchv1beta1 "k8s.io/api/batch/v1beta1" batchv1beta1 "k8s.io/api/batch/v1beta1"
clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1"
"github.com/zalando/postgres-operator/pkg/util/constants"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policybeta1 "k8s.io/api/policy/v1beta1" policybeta1 "k8s.io/api/policy/v1beta1"
apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
@ -19,7 +18,7 @@ import (
appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1"
rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
@ -40,7 +39,7 @@ type KubernetesClient struct {
corev1.NamespacesGetter corev1.NamespacesGetter
corev1.ServiceAccountsGetter corev1.ServiceAccountsGetter
appsv1.StatefulSetsGetter appsv1.StatefulSetsGetter
rbacv1beta1.RoleBindingsGetter rbacv1.RoleBindingsGetter
policyv1beta1.PodDisruptionBudgetsGetter policyv1beta1.PodDisruptionBudgetsGetter
apiextbeta1.CustomResourceDefinitionsGetter apiextbeta1.CustomResourceDefinitionsGetter
clientbatchv1beta1.CronJobsGetter clientbatchv1beta1.CronJobsGetter
@ -104,7 +103,7 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) {
kubeClient.StatefulSetsGetter = client.AppsV1() kubeClient.StatefulSetsGetter = client.AppsV1()
kubeClient.PodDisruptionBudgetsGetter = client.PolicyV1beta1() kubeClient.PodDisruptionBudgetsGetter = client.PolicyV1beta1()
kubeClient.RESTClient = client.CoreV1().RESTClient() kubeClient.RESTClient = client.CoreV1().RESTClient()
kubeClient.RoleBindingsGetter = client.RbacV1beta1() kubeClient.RoleBindingsGetter = client.RbacV1()
kubeClient.CronJobsGetter = client.BatchV1beta1() kubeClient.CronJobsGetter = client.BatchV1beta1()
apiextClient, err := apiextclient.NewForConfig(cfg) apiextClient, err := apiextclient.NewForConfig(cfg)
@ -136,21 +135,37 @@ func SameService(cur, new *v1.Service) (match bool, reason string) {
} }
} }
oldDNSAnnotation := cur.Annotations[constants.ZalandoDNSNameAnnotation] match = true
newDNSAnnotation := new.Annotations[constants.ZalandoDNSNameAnnotation]
oldELBAnnotation := cur.Annotations[constants.ElbTimeoutAnnotationName]
newELBAnnotation := new.Annotations[constants.ElbTimeoutAnnotationName]
if oldDNSAnnotation != newDNSAnnotation { reasonPrefix := "new service's annotations doesn't match the current one:"
return false, fmt.Sprintf("new service's %q annotation value %q doesn't match the current one %q", for ann := range cur.Annotations {
constants.ZalandoDNSNameAnnotation, newDNSAnnotation, oldDNSAnnotation) if _, ok := new.Annotations[ann]; !ok {
match = false
if len(reason) == 0 {
reason = reasonPrefix
}
reason += fmt.Sprintf(" Removed '%s'.", ann)
} }
if oldELBAnnotation != newELBAnnotation {
return false, fmt.Sprintf("new service's %q annotation value %q doesn't match the current one %q",
constants.ElbTimeoutAnnotationName, oldELBAnnotation, newELBAnnotation)
} }
return true, "" for ann := range new.Annotations {
v, ok := cur.Annotations[ann]
if !ok {
if len(reason) == 0 {
reason = reasonPrefix
}
reason += fmt.Sprintf(" Added '%s' with value '%s'.", ann, new.Annotations[ann])
match = false
} else if v != new.Annotations[ann] {
if len(reason) == 0 {
reason = reasonPrefix
}
reason += fmt.Sprintf(" '%s' changed from '%s' to '%s'.", ann, v, new.Annotations[ann])
match = false
}
}
return match, reason
} }
// SamePDB compares the PodDisruptionBudgets // SamePDB compares the PodDisruptionBudgets

View File

@ -0,0 +1,311 @@
package k8sutil
import (
"strings"
"testing"
"github.com/zalando/postgres-operator/pkg/util/constants"
v1 "k8s.io/api/core/v1"
)
func newsService(ann map[string]string, svcT v1.ServiceType, lbSr []string) *v1.Service {
svc := &v1.Service{
Spec: v1.ServiceSpec{
Type: svcT,
LoadBalancerSourceRanges: lbSr,
},
}
svc.Annotations = ann
return svc
}
func TestSameService(t *testing.T) {
tests := []struct {
about string
current *v1.Service
new *v1.Service
reason string
match bool
}{
{
about: "two equal services",
current: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
},
v1.ServiceTypeClusterIP,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
new: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
},
v1.ServiceTypeClusterIP,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
match: true,
},
{
about: "services differ on service type",
current: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
},
v1.ServiceTypeClusterIP,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
new: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
match: false,
reason: `new service's type "LoadBalancer" doesn't match the current one "ClusterIP"`,
},
{
about: "services differ on lb source ranges",
current: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
new: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
},
v1.ServiceTypeLoadBalancer,
[]string{"185.249.56.0/22"}),
match: false,
reason: `new service's LoadBalancerSourceRange doesn't match the current one`,
},
{
about: "new service doesn't have lb source ranges",
current: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
new: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
},
v1.ServiceTypeLoadBalancer,
[]string{}),
match: false,
reason: `new service's LoadBalancerSourceRange doesn't match the current one`,
},
{
about: "services differ on DNS annotation",
current: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
new: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "new_clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
match: false,
reason: `new service's annotations doesn't match the current one: 'external-dns.alpha.kubernetes.io/hostname' changed from 'clstr.acid.zalan.do' to 'new_clstr.acid.zalan.do'.`,
},
{
about: "services differ on AWS ELB annotation",
current: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
new: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: "1800",
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
match: false,
reason: `new service's annotations doesn't match the current one: 'service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout' changed from '3600' to '1800'.`,
},
{
about: "service changes existing annotation",
current: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
"foo": "bar",
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
new: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
"foo": "baz",
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
match: false,
reason: `new service's annotations doesn't match the current one: 'foo' changed from 'bar' to 'baz'.`,
},
{
about: "service changes multiple existing annotations",
current: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
"foo": "bar",
"bar": "foo",
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
new: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
"foo": "baz",
"bar": "fooz",
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
match: false,
// Test just the prefix to avoid flakiness and map sorting
reason: `new service's annotations doesn't match the current one:`,
},
{
about: "service adds a new custom annotation",
current: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
new: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
"foo": "bar",
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
match: false,
reason: `new service's annotations doesn't match the current one: Added 'foo' with value 'bar'.`,
},
{
about: "service removes a custom annotation",
current: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
"foo": "bar",
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
new: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
match: false,
reason: `new service's annotations doesn't match the current one: Removed 'foo'.`,
},
{
about: "service removes a custom annotation and adds a new one",
current: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
"foo": "bar",
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
new: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
"bar": "foo",
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
match: false,
reason: `new service's annotations doesn't match the current one: Removed 'foo'. Added 'bar' with value 'foo'.`,
},
{
about: "service removes a custom annotation, adds a new one and change another",
current: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
"foo": "bar",
"zalan": "do",
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
new: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
"bar": "foo",
"zalan": "do.com",
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
match: false,
// Test just the prefix to avoid flakiness and map sorting
reason: `new service's annotations doesn't match the current one: Removed 'foo'.`,
},
{
about: "service add annotations",
current: newsService(
map[string]string{},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
new: newsService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
match: false,
// Test just the prefix to avoid flakiness and map sorting
reason: `new service's annotations doesn't match the current one: Added `,
},
}
for _, tt := range tests {
t.Run(tt.about, func(t *testing.T) {
match, reason := SameService(tt.current, tt.new)
if match && !tt.match {
t.Errorf("expected services to do not match: '%q' and '%q'", tt.current, tt.new)
return
}
if !match && tt.match {
t.Errorf("expected services to be the same: '%q' and '%q'", tt.current, tt.new)
return
}
if !match && !tt.match {
if !strings.HasPrefix(reason, tt.reason) {
t.Errorf("expected reason prefix '%s', found '%s'", tt.reason, reason)
return
}
}
})
}
}

View File

@ -1,24 +1,17 @@
.PHONY: clean test appjs docker push mock .PHONY: clean test appjs docker push mock
BINARY ?= postgres-operator-ui IMAGE ?= registry.opensource.zalan.do/acid/postgres-operator-ui
BUILD_FLAGS ?= -v
CGO_ENABLED ?= 0
ifeq ($(RACE),1)
BUILD_FLAGS += -race -a
CGO_ENABLED=1
endif
LOCAL_BUILD_FLAGS ?= $(BUILD_FLAGS)
LDFLAGS ?= -X=main.version=$(VERSION)
IMAGE ?= registry.opensource.zalan.do/acid/$(BINARY)
VERSION ?= $(shell git describe --tags --always --dirty) VERSION ?= $(shell git describe --tags --always --dirty)
TAG ?= $(VERSION) TAG ?= $(VERSION)
GITHEAD = $(shell git rev-parse --short HEAD) GITHEAD = $(shell git rev-parse --short HEAD)
GITURL = $(shell git config --get remote.origin.url) GITURL = $(shell git config --get remote.origin.url)
GITSTATU = $(shell git status --porcelain || echo 'no changes') GITSTATUS = $(shell git status --porcelain || echo 'no changes')
TTYFLAGS = $(shell test -t 0 && echo '-it') TTYFLAGS = $(shell test -t 0 && echo '-it')
ifdef CDP_PULL_REQUEST_NUMBER
CDP_TAG := -${CDP_BUILD_VERSION}
endif
default: docker default: docker
clean: clean:
@ -32,11 +25,15 @@ appjs:
docker run $(TTYFLAGS) -u $$(id -u) -v $$(pwd):/workdir -w /workdir/app node:10.1.0-alpine npm run build docker run $(TTYFLAGS) -u $$(id -u) -v $$(pwd):/workdir -w /workdir/app node:10.1.0-alpine npm run build
docker: appjs docker: appjs
docker build --build-arg "VERSION=$(VERSION)" -t "$(IMAGE):$(TAG)" . echo `(env)`
@echo 'Docker image $(IMAGE):$(TAG) can now be used.' echo "Tag ${TAG}"
echo "Version ${VERSION}"
echo "CDP tag ${CDP_TAG}"
echo "git describe $(shell git describe --tags --always --dirty)"
docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)" -f Dockerfile .
push: docker push:
docker push "$(IMAGE):$(TAG)" docker push "$(IMAGE):$(TAG)$(CDP_TAG)"
mock: mock:
docker run -it -p 8080:8080 "$(IMAGE):$(TAG)" --mock docker run -it -p 8080:8080 "$(IMAGE):$(TAG)" --mock

View File

@ -1,6 +1,6 @@
{ {
"name": "postgres-operator-ui", "name": "postgres-operator-ui",
"version": "1.0.0", "version": "1.3.0",
"description": "PostgreSQL Operator UI", "description": "PostgreSQL Operator UI",
"main": "src/app.js", "main": "src/app.js",
"config": { "config": {

View File

@ -408,7 +408,7 @@ new
ref='cpuLimit' ref='cpuLimit'
type='number' type='number'
placeholder='{ cpu.state.limit.initialValue }' placeholder='{ cpu.state.limit.initialValue }'
min='1' min='250'
required required
value='{ cpu.state.limit.state }' value='{ cpu.state.limit.state }'
onchange='{ cpu.state.limit.edit }' onchange='{ cpu.state.limit.edit }'
@ -434,7 +434,7 @@ new
onkeyup='{ memory.state.request.edit }' onkeyup='{ memory.state.request.edit }'
) )
.input-group-addon .input-group-addon
.input-units Gi .input-units Mi
.input-group .input-group
.input-group-addon.resource-type Limit .input-group-addon.resource-type Limit
@ -442,14 +442,14 @@ new
ref='memoryLimit' ref='memoryLimit'
type='number' type='number'
placeholder='{ memory.state.limit.initialValue }' placeholder='{ memory.state.limit.initialValue }'
min='1' min='250'
required required
value='{ memory.state.limit.state }' value='{ memory.state.limit.state }'
onchange='{ memory.state.limit.edit }' onchange='{ memory.state.limit.edit }'
onkeyup='{ memory.state.limit.edit }' onkeyup='{ memory.state.limit.edit }'
) )
.input-group-addon .input-group-addon
.input-units Gi .input-units Mi
.col-lg-3 .col-lg-3
help-general(config='{ opts.config }') help-general(config='{ opts.config }')
@ -519,10 +519,10 @@ new
resources: resources:
requests: requests:
cpu: {{ cpu.state.request.state }}m cpu: {{ cpu.state.request.state }}m
memory: {{ memory.state.request.state }}Gi memory: {{ memory.state.request.state }}Mi
limits: limits:
cpu: {{ cpu.state.limit.state }}m cpu: {{ cpu.state.limit.state }}m
memory: {{ memory.state.limit.state }}Gi{{#if restoring}} memory: {{ memory.state.limit.state }}Mi{{#if restoring}}
clone: clone:
cluster: "{{ backup.state.name.state }}" cluster: "{{ backup.state.name.state }}"
@ -786,8 +786,8 @@ new
return instance return instance
} }
this.cpu = DynamicResource({ request: 100, limit: 1000 }) this.cpu = DynamicResource({ request: 100, limit: 500 })
this.memory = DynamicResource({ request: 1, limit: 1 }) this.memory = DynamicResource({ request: 100, limit: 500 })
this.backup = DynamicSet({ this.backup = DynamicSet({
type: () => 'empty', type: () => 'empty',

View File

@ -76,6 +76,9 @@ postgresql
.alert.alert-danger(if='{ progress.requestStatus !== "OK" }') Create request failed .alert.alert-danger(if='{ progress.requestStatus !== "OK" }') Create request failed
.alert.alert-success(if='{ progress.requestStatus === "OK" }') Create request successful ({ new Date(progress.createdTimestamp).toLocaleString() }) .alert.alert-success(if='{ progress.requestStatus === "OK" }') Create request successful ({ new Date(progress.createdTimestamp).toLocaleString() })
.alert.alert-info(if='{ !progress.postgresql }') PostgreSQL cluster manifest pending
.alert.alert-success(if='{ progress.postgresql }') PostgreSQL cluster manifest created
.alert.alert-info(if='{ !progress.statefulSet }') StatefulSet pending .alert.alert-info(if='{ !progress.statefulSet }') StatefulSet pending
.alert.alert-success(if='{ progress.statefulSet }') StatefulSet created .alert.alert-success(if='{ progress.statefulSet }') StatefulSet created

View File

@ -45,12 +45,14 @@ postgresqls
thead thead
tr tr
th(style='width: 120px') Team th(style='width: 120px') Team
th(style='width: 130px') Namespace
th Name
th(style='width: 50px') Pods th(style='width: 50px') Pods
th(style='width: 140px') CPU th(style='width: 140px') CPU
th(style='width: 130px') Memory th(style='width: 130px') Memory
th(style='width: 100px') Size th(style='width: 100px') Size
th(style='width: 130px') Namespace th(style='width: 120px') Cost/Month
th Name th(stlye='width: 120px')
tbody tbody
tr( tr(
@ -58,19 +60,21 @@ postgresqls
hidden='{ !namespaced_name.toLowerCase().includes(filter.state.toLowerCase()) }' hidden='{ !namespaced_name.toLowerCase().includes(filter.state.toLowerCase()) }'
) )
td { team } td { team }
td { nodes }
td { cpu } / { cpu_limit }
td { memory } / { memory_limit }
td { volume_size }
td(style='white-space: pre') td(style='white-space: pre')
| { namespace } | { namespace }
td td
a( a(
href='/#/status/{ cluster_path(this) }' href='/#/status/{ cluster_path(this) }'
) )
| { name } | { name }
td { nodes }
td { cpu } / { cpu_limit }
td { memory } / { memory_limit }
td { volume_size }
td { calcCosts(nodes, cpu, memory, volume_size) }$
td
.btn-group.pull-right( .btn-group.pull-right(
aria-label='Cluster { qname } actions' aria-label='Cluster { qname } actions'
@ -124,12 +128,14 @@ postgresqls
thead thead
tr tr
th(style='width: 120px') Team th(style='width: 120px') Team
th(style='width: 130px') Namespace
th Name
th(style='width: 50px') Pods th(style='width: 50px') Pods
th(style='width: 140px') CPU th(style='width: 140px') CPU
th(style='width: 130px') Memory th(style='width: 130px') Memory
th(style='width: 100px') Size th(style='width: 100px') Size
th(style='width: 130px') Namespace th(style='width: 120px') Cost/Month
th Name th(stlye='width: 120px')
tbody tbody
tr( tr(
@ -137,20 +143,20 @@ postgresqls
hidden='{ !namespaced_name.toLowerCase().includes(filter.state.toLowerCase()) }' hidden='{ !namespaced_name.toLowerCase().includes(filter.state.toLowerCase()) }'
) )
td { team } td { team }
td { nodes }
td { cpu } / { cpu_limit }
td { memory } / { memory_limit }
td { volume_size }
td(style='white-space: pre') td(style='white-space: pre')
| { namespace } | { namespace }
td td
a( a(
href='/#/status/{ cluster_path(this) }' href='/#/status/{ cluster_path(this) }'
) )
| { name } | { name }
td { nodes }
td { cpu } / { cpu_limit }
td { memory } / { memory_limit }
td { volume_size }
td { calcCosts(nodes, cpu, memory, volume_size) }$
td
.btn-group.pull-right( .btn-group.pull-right(
aria-label='Cluster { qname } actions' aria-label='Cluster { qname } actions'
@ -223,6 +229,45 @@ postgresqls
+ '/' + encodeURI(cluster.name) + '/' + encodeURI(cluster.name)
) )
const calcCosts = this.calcCosts = (nodes, cpu, memory, disk) => {
costs = nodes * (toCores(cpu) * opts.config.cost_core + toMemory(memory) * opts.config.cost_memory + toDisk(disk) * opts.config.cost_ebs)
return costs.toFixed(2)
}
const toDisk = this.toDisk = value => {
if(value.endsWith("Gi")) {
value = value.substring(0, value.length-2)
value = Number(value)
return value
}
return value
}
const toMemory = this.toMemory = value => {
if (value.endsWith("Mi")) {
value = value.substring(0, value.length-2)
value = Number(value) / 1000.
return value
}
else if(value.endsWith("Gi")) {
value = value.substring(0, value.length-2)
value = Number(value)
return value
}
return value
}
const toCores = this.toCores = value => {
if (value.endsWith("m")) {
value = value.substring(0, value.length-1)
value = Number(value) / 1000.
return value
}
return value
}
this.on('mount', () => this.on('mount', () =>
jQuery jQuery
.get('/postgresqls') .get('/postgresqls')

View File

@ -4,23 +4,23 @@ metadata:
name: "postgres-operator-ui" name: "postgres-operator-ui"
namespace: "default" namespace: "default"
labels: labels:
application: "postgres-operator-ui" name: "postgres-operator-ui"
team: "acid" team: "acid"
spec: spec:
replicas: 1 replicas: 1
selector: selector:
matchLabels: matchLabels:
application: "postgres-operator-ui" name: "postgres-operator-ui"
template: template:
metadata: metadata:
labels: labels:
application: "postgres-operator-ui" name: "postgres-operator-ui"
team: "acid" team: "acid"
spec: spec:
serviceAccountName: postgres-operator-ui serviceAccountName: postgres-operator-ui
containers: containers:
- name: "service" - name: "service"
image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.2.0 image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.3.0
ports: ports:
- containerPort: 8081 - containerPort: 8081
protocol: "TCP" protocol: "TCP"
@ -32,8 +32,8 @@ spec:
timeoutSeconds: 1 timeoutSeconds: 1
resources: resources:
limits: limits:
cpu: "300m" cpu: "200m"
memory: "3000Mi" memory: "200Mi"
requests: requests:
cpu: "100m" cpu: "100m"
memory: "100Mi" memory: "100Mi"
@ -41,7 +41,9 @@ spec:
- name: "APP_URL" - name: "APP_URL"
value: "http://localhost:8081" value: "http://localhost:8081"
- name: "OPERATOR_API_URL" - name: "OPERATOR_API_URL"
value: "http://localhost:8080" value: "http://postgres-operator:8080"
- name: "OPERATOR_CLUSTER_NAME_LABEL"
value: "cluster-name"
- name: "TARGET_NAMESPACE" - name: "TARGET_NAMESPACE"
value: "default" value: "default"
- name: "TEAMS" - name: "TEAMS"
@ -60,9 +62,14 @@ spec:
"replica_load_balancer_visible": true, "replica_load_balancer_visible": true,
"resources_visible": true, "resources_visible": true,
"users_visible": true, "users_visible": true,
"cost_ebs": 0.119,
"cost_core": 0.0575,
"cost_memory": 0.014375,
"postgresql_versions": [ "postgresql_versions": [
"12",
"11", "11",
"10", "10",
"9.6" "9.6",
"9.5"
] ]
} }

View File

@ -5,7 +5,7 @@ metadata:
namespace: default namespace: default
--- ---
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
name: postgres-operator-ui name: postgres-operator-ui
@ -61,7 +61,5 @@ roleRef:
name: postgres-operator-ui name: postgres-operator-ui
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
# note: the cluster role binding needs to be defined
# for every namespace the operator-ui service account lives in.
name: postgres-operator-ui name: postgres-operator-ui
namespace: default namespace: default

View File

@ -76,6 +76,7 @@ ACCESS_TOKEN_URL = getenv('ACCESS_TOKEN_URL')
TOKENINFO_URL = getenv('OAUTH2_TOKEN_INFO_URL') TOKENINFO_URL = getenv('OAUTH2_TOKEN_INFO_URL')
OPERATOR_API_URL = getenv('OPERATOR_API_URL', 'http://postgres-operator') OPERATOR_API_URL = getenv('OPERATOR_API_URL', 'http://postgres-operator')
OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-name')
OPERATOR_UI_CONFIG = getenv('OPERATOR_UI_CONFIG', '{}') OPERATOR_UI_CONFIG = getenv('OPERATOR_UI_CONFIG', '{}')
OPERATOR_UI_MAINTENANCE_CHECK = getenv('OPERATOR_UI_MAINTENANCE_CHECK', '{}') OPERATOR_UI_MAINTENANCE_CHECK = getenv('OPERATOR_UI_MAINTENANCE_CHECK', '{}')
READ_ONLY_MODE = getenv('READ_ONLY_MODE', False) in [True, 'true'] READ_ONLY_MODE = getenv('READ_ONLY_MODE', False) in [True, 'true']
@ -84,6 +85,13 @@ SUPERUSER_TEAM = getenv('SUPERUSER_TEAM', 'acid')
TARGET_NAMESPACE = getenv('TARGET_NAMESPACE') TARGET_NAMESPACE = getenv('TARGET_NAMESPACE')
GOOGLE_ANALYTICS = getenv('GOOGLE_ANALYTICS', False) GOOGLE_ANALYTICS = getenv('GOOGLE_ANALYTICS', False)
# storage pricing, i.e. https://aws.amazon.com/ebs/pricing/
COST_EBS = float(getenv('COST_EBS', 0.119)) # GB per month
# compute costs, i.e. https://www.ec2instances.info/?region=eu-central-1&selected=m5.2xlarge
COST_CORE = 30.5 * 24 * float(getenv('COST_CORE', 0.0575)) # Core per hour m5.2xlarge / 8.
COST_MEMORY = 30.5 * 24 * float(getenv('COST_MEMORY', 0.014375)) # Memory GB m5.2xlarge / 32.
WALE_S3_ENDPOINT = getenv( WALE_S3_ENDPOINT = getenv(
'WALE_S3_ENDPOINT', 'WALE_S3_ENDPOINT',
'https+path://s3-eu-central-1.amazonaws.com:443', 'https+path://s3-eu-central-1.amazonaws.com:443',
@ -293,6 +301,9 @@ DEFAULT_UI_CONFIG = {
'dns_format_string': '{0}.{1}.{2}', 'dns_format_string': '{0}.{1}.{2}',
'pgui_link': '', 'pgui_link': '',
'static_network_whitelist': {}, 'static_network_whitelist': {},
'cost_ebs': COST_EBS,
'cost_core': COST_CORE,
'cost_memory': COST_MEMORY
} }
@ -1003,6 +1014,7 @@ def main(port, secret_key, debug, clusters: list):
logger.info(f'App URL: {APP_URL}') logger.info(f'App URL: {APP_URL}')
logger.info(f'Authorize URL: {AUTHORIZE_URL}') logger.info(f'Authorize URL: {AUTHORIZE_URL}')
logger.info(f'Operator API URL: {OPERATOR_API_URL}') logger.info(f'Operator API URL: {OPERATOR_API_URL}')
logger.info(f'Operator cluster name label: {OPERATOR_CLUSTER_NAME_LABEL}')
logger.info(f'Readonly mode: {"enabled" if READ_ONLY_MODE else "disabled"}') # noqa logger.info(f'Readonly mode: {"enabled" if READ_ONLY_MODE else "disabled"}') # noqa
logger.info(f'Spilo S3 backup bucket: {SPILO_S3_BACKUP_BUCKET}') logger.info(f'Spilo S3 backup bucket: {SPILO_S3_BACKUP_BUCKET}')
logger.info(f'Spilo S3 backup prefix: {SPILO_S3_BACKUP_PREFIX}') logger.info(f'Spilo S3 backup prefix: {SPILO_S3_BACKUP_PREFIX}')

View File

@ -3,7 +3,7 @@ from datetime import datetime, timezone
from furl import furl from furl import furl
from json import dumps from json import dumps
from logging import getLogger from logging import getLogger
from os import environ from os import environ, getenv
from requests import Session from requests import Session
from urllib.parse import urljoin from urllib.parse import urljoin
from uuid import UUID from uuid import UUID
@ -16,6 +16,8 @@ logger = getLogger(__name__)
session = Session() session = Session()
OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-name')
def request(cluster, path, **kwargs): def request(cluster, path, **kwargs):
if 'timeout' not in kwargs: if 'timeout' not in kwargs:
@ -137,7 +139,7 @@ def read_pods(cluster, namespace, spilo_cluster):
cluster=cluster, cluster=cluster,
resource_type='pods', resource_type='pods',
namespace=namespace, namespace=namespace,
label_selector={'cluster-name': spilo_cluster}, label_selector={OPERATOR_CLUSTER_NAME_LABEL: spilo_cluster},
) )

View File

@ -1,14 +1,15 @@
Flask-OAuthlib==0.9.5 Flask-OAuthlib==0.9.5
Flask==1.0.2 Flask==1.1.1
backoff==1.5.0 backoff==1.8.1
boto3==1.5.14 boto3==1.10.4
boto==2.48.0 boto==2.49.0
click==6.7 click==6.7
furl==1.0.1 furl==1.0.2
gevent==1.2.2 gevent==1.2.2
jq==0.1.6 jq==0.1.6
json_delta>=2.0 json_delta>=2.0
kubernetes==3.0.0 kubernetes==3.0.0
requests==2.20.1 requests==2.22.0
stups-tokens>=1.1.19 stups-tokens>=1.1.19
wal_e==1.1.0 wal_e==1.1.0
werkzeug==0.16.1

Some files were not shown because too many files have changed in this diff Show More