merge with master and resolve conflicts
This commit is contained in:
commit
c991651069
|
|
@ -0,0 +1,22 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
apiVersion: v1
|
||||
name: postgres-operator-ui
|
||||
version: 0.1.0
|
||||
appVersion: 1.3.0
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- name: Zalando
|
||||
email: opensource@zalando.de
|
||||
- name: siku4
|
||||
email: sk@sik-net.de
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
engine: gotpl
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
To verify that postgres-operator has started, run:
|
||||
|
||||
kubectl --namespace={{ .Release.Namespace }} get pods -l "app.kubernetes.io/name={{ template "postgres-operator-ui.name" . }}"
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "postgres-operator-ui.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "postgres-operator-ui.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a service account name.
|
||||
*/}}
|
||||
{{- define "postgres-operator-ui.serviceAccountName" -}}
|
||||
{{ default (include "postgres-operator-ui.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "postgres-operator-ui.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
{{ if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- acid.zalan.do
|
||||
resources:
|
||||
- postgresqls
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
{{ end }}
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
{{ if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{ end }}
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
name: {{ template "postgres-operator-ui.fullname" . }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
team: "acid" # Parameterize?
|
||||
spec:
|
||||
serviceAccountName: {{ include "postgres-operator-ui.serviceAccountName" . }}
|
||||
containers:
|
||||
- name: "service"
|
||||
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
protocol: "TCP"
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: "/health"
|
||||
port: 8081
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 1
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
env:
|
||||
- name: "APP_URL"
|
||||
value: "http://localhost:8081"
|
||||
- name: "OPERATOR_API_URL"
|
||||
value: {{ .Values.envs.operatorApiUrl }}
|
||||
- name: "TARGET_NAMESPACE"
|
||||
value: {{ .Values.envs.targetNamespace }}
|
||||
- name: "TEAMS"
|
||||
value: |-
|
||||
[
|
||||
"acid"
|
||||
]
|
||||
- name: "OPERATOR_UI_CONFIG"
|
||||
value: |-
|
||||
{
|
||||
"docs_link":"https://postgres-operator.readthedocs.io/en/latest/",
|
||||
"dns_format_string": "{1}-{0}.{2}",
|
||||
"databases_visible": true,
|
||||
"master_load_balancer_visible": true,
|
||||
"nat_gateways_visible": false,
|
||||
"replica_load_balancer_visible": true,
|
||||
"resources_visible": true,
|
||||
"users_visible": true,
|
||||
"postgresql_versions": [
|
||||
"12",
|
||||
"11",
|
||||
"10",
|
||||
"9.6",
|
||||
"9.5"
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "postgres-operator-ui.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ . }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
name: {{ template "postgres-operator-ui.fullname" . }}
|
||||
spec:
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: 8081
|
||||
protocol: TCP
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
||||
type: {{ .Values.service.type }}
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
{{ if .Values.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{ end }}
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
# Default values for postgres-operator-ui.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
# configure ui image
|
||||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator-ui
|
||||
tag: v1.2.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
# configure UI pod resources
|
||||
resources:
|
||||
limits:
|
||||
cpu: 300m
|
||||
memory: 3000Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
|
||||
# configure UI ENVs
|
||||
envs:
|
||||
# IMPORTANT: While operator chart and UI chart are idendependent, this is the interface between
|
||||
# UI and operator API. Insert the service name of the operator API here!
|
||||
operatorApiUrl: "http://postgres-operator:8080"
|
||||
targetNamespace: "default"
|
||||
|
||||
# configure UI service
|
||||
service:
|
||||
type: "ClusterIP"
|
||||
port: "8080"
|
||||
|
||||
# configure UI ingress. If needed: "enabled: true"
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: ui.example.org
|
||||
paths: [""]
|
||||
tls: []
|
||||
# - secretName: ui-tls
|
||||
# hosts:
|
||||
# - ui.exmaple.org
|
||||
|
|
@ -179,6 +179,12 @@ spec:
|
|||
default_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
min_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
min_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
timeouts:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -237,6 +243,8 @@ spec:
|
|||
type: string
|
||||
logical_backup_s3_endpoint:
|
||||
type: string
|
||||
logical_backup_s3_region:
|
||||
type: string
|
||||
logical_backup_s3_secret_access_key:
|
||||
type: string
|
||||
logical_backup_s3_sse:
|
||||
|
|
|
|||
|
|
@ -266,6 +266,10 @@ spec:
|
|||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
# Note: the value specified here must not be zero or be higher
|
||||
# than the corresponding limit.
|
||||
serviceAnnotations:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
sidecars:
|
||||
type: array
|
||||
nullable: true
|
||||
|
|
|
|||
|
|
@ -0,0 +1,53 @@
|
|||
{{ if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: postgres-pod
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
# Patroni needs to watch and manage endpoints
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
# Patroni needs to watch pods
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
# to let Patroni create a headless service
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- create
|
||||
# to run privileged pods
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
resourceNames:
|
||||
- privileged
|
||||
verbs:
|
||||
- use
|
||||
{{ end }}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
{{ if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "postgres-operator.serviceAccountName" . }}
|
||||
|
|
@ -9,6 +9,7 @@ metadata:
|
|||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
# all verbs allowed for custom operator resources
|
||||
- apiGroups:
|
||||
- acid.zalan.do
|
||||
resources:
|
||||
|
|
@ -16,7 +17,15 @@ rules:
|
|||
- postgresqls/status
|
||||
- operatorconfigurations
|
||||
verbs:
|
||||
- "*"
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
# to create or get/update CRDs when starting up
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
|
|
@ -26,12 +35,14 @@ rules:
|
|||
- get
|
||||
- patch
|
||||
- update
|
||||
# to read configuration from ConfigMaps
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
# to manage endpoints which are also used by Patroni
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
|
@ -43,7 +54,9 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch # needed if zalando-postgres-operator account is used for pods as well
|
||||
- update
|
||||
- watch
|
||||
# to CRUD secrets for database access
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
|
@ -53,6 +66,7 @@ rules:
|
|||
- update
|
||||
- delete
|
||||
- get
|
||||
# to check nodes for node readiness label
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
|
@ -61,6 +75,7 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
# to read or delete existing PVCs. Creation via StatefulSet
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
|
@ -69,6 +84,7 @@ rules:
|
|||
- delete
|
||||
- get
|
||||
- list
|
||||
# to read existing PVs. Creation should be done via dynamic provisioning
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
|
@ -77,6 +93,7 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- update # only for resizing AWS volumes
|
||||
# to watch Spilo pods and do rolling updates. Creation via StatefulSet
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
|
@ -86,13 +103,16 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
# to resize the filesystem in Spilo pods when increasing volume size
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/exec
|
||||
verbs:
|
||||
- create
|
||||
# to CRUD services to point to Postgres cluster instances
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
|
@ -102,6 +122,8 @@ rules:
|
|||
- delete
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
# to CRUD the StatefulSet which controls the Postgres cluster instances
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
|
|
@ -112,12 +134,26 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- patch
|
||||
# to CRUD cron jobs for logical backups
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- cronjobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
# to get namespaces operator resources can run in
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
# to define PDBs. Update happens via delete/create
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
|
|
@ -126,6 +162,7 @@ rules:
|
|||
- create
|
||||
- delete
|
||||
- get
|
||||
# to create ServiceAccounts in each namespace the operator watches
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
|
@ -133,30 +170,21 @@ rules:
|
|||
verbs:
|
||||
- get
|
||||
- create
|
||||
# to create role bindings to the postgres-pod service account
|
||||
- apiGroups:
|
||||
- "rbac.authorization.k8s.io"
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- rolebindings
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
# to grant privilege to run privileged pods
|
||||
- apiGroups:
|
||||
- "rbac.authorization.k8s.io"
|
||||
- extensions
|
||||
resources:
|
||||
- clusterroles
|
||||
verbs:
|
||||
- bind
|
||||
- podsecuritypolicies
|
||||
resourceNames:
|
||||
- {{ include "postgres-operator.serviceAccountName" . }}
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- cronjobs # enables logical backups
|
||||
- privileged
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- use
|
||||
{{ end }}
|
||||
|
|
|
|||
|
|
@ -14,8 +14,6 @@ roleRef:
|
|||
name: {{ include "postgres-operator.serviceAccountName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
# note: the cluster role binding needs to be defined
|
||||
# for every namespace the operator service account lives in.
|
||||
name: {{ include "postgres-operator.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{ end }}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ metadata:
|
|||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
data:
|
||||
pod_service_account_name: {{ include "postgres-operator.serviceAccountName" . }}
|
||||
{{ toYaml .Values.configGeneral | indent 2 }}
|
||||
{{ toYaml .Values.configUsers | indent 2 }}
|
||||
{{ toYaml .Values.configKubernetes | indent 2 }}
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ configuration:
|
|||
{{ toYaml .Values.configUsers | indent 4 }}
|
||||
kubernetes:
|
||||
oauth_token_secret_name: {{ template "postgres-operator.fullname" . }}
|
||||
pod_service_account_name: {{ include "postgres-operator.serviceAccountName" . }}
|
||||
{{ toYaml .Values.configKubernetes | indent 4 }}
|
||||
postgres_pod_resources:
|
||||
{{ toYaml .Values.configPostgresPodResources | indent 4 }}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
name: {{ template "postgres-operator.fullname" . }}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 8080
|
||||
protocol: TCP
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
|
|
@ -100,6 +100,8 @@ configKubernetes:
|
|||
pod_management_policy: "ordered_ready"
|
||||
# label assigned to the Postgres pods (and services/endpoints)
|
||||
pod_role_label: spilo-role
|
||||
# name of service account to be used by postgres cluster pods
|
||||
pod_service_account_name: "postgres-pod"
|
||||
# Postgres pods are terminated forcefully after this timeout
|
||||
pod_terminate_grace_period: 5m
|
||||
# template for database user secrets generated by the operator
|
||||
|
|
@ -115,13 +117,17 @@ configKubernetes:
|
|||
# configure resource requests for the Postgres pods
|
||||
configPostgresPodResources:
|
||||
# CPU limits for the postgres containers
|
||||
default_cpu_limit: "3"
|
||||
# cpu request value for the postgres containers
|
||||
default_cpu_limit: "1"
|
||||
# CPU request value for the postgres containers
|
||||
default_cpu_request: 100m
|
||||
# memory limits for the postgres containers
|
||||
default_memory_limit: 1Gi
|
||||
default_memory_limit: 500Mi
|
||||
# memory request value for the postgres containers
|
||||
default_memory_request: 100Mi
|
||||
# hard CPU minimum required to properly run a Postgres cluster
|
||||
min_cpu_limit: 250m
|
||||
# hard memory minimum required to properly run a Postgres cluster
|
||||
min_memory_limit: 250Mi
|
||||
|
||||
# timeouts related to some operator actions
|
||||
configTimeouts:
|
||||
|
|
@ -200,6 +206,8 @@ configLogicalBackup:
|
|||
logical_backup_s3_access_key_id: ""
|
||||
# S3 bucket to store backup results
|
||||
logical_backup_s3_bucket: "my-bucket-url"
|
||||
# S3 region of bucket
|
||||
logical_backup_s3_region: ""
|
||||
# S3 endpoint url when not using AWS
|
||||
logical_backup_s3_endpoint: ""
|
||||
# S3 Secret Access Key
|
||||
|
|
@ -251,7 +259,7 @@ configScalyr:
|
|||
# CPU rquest value for the Scalyr sidecar
|
||||
scalyr_cpu_request: 100m
|
||||
# Memory limit value for the Scalyr sidecar
|
||||
scalyr_memory_limit: 1Gi
|
||||
scalyr_memory_limit: 500Mi
|
||||
# Memory request value for the Scalyr sidecar
|
||||
scalyr_memory_request: 50Mi
|
||||
|
||||
|
|
@ -272,13 +280,13 @@ serviceAccount:
|
|||
|
||||
priorityClassName: ""
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 300Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 300Mi
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 250Mi
|
||||
|
||||
# Affinity for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
|
|
|
|||
|
|
@ -93,6 +93,8 @@ configKubernetes:
|
|||
pod_management_policy: "ordered_ready"
|
||||
# label assigned to the Postgres pods (and services/endpoints)
|
||||
pod_role_label: spilo-role
|
||||
# name of service account to be used by postgres cluster pods
|
||||
pod_service_account_name: "postgres-pod"
|
||||
# Postgres pods are terminated forcefully after this timeout
|
||||
pod_terminate_grace_period: 5m
|
||||
# template for database user secrets generated by the operator
|
||||
|
|
@ -108,13 +110,17 @@ configKubernetes:
|
|||
# configure resource requests for the Postgres pods
|
||||
configPostgresPodResources:
|
||||
# CPU limits for the postgres containers
|
||||
default_cpu_limit: "3"
|
||||
# cpu request value for the postgres containers
|
||||
default_cpu_limit: "1"
|
||||
# CPU request value for the postgres containers
|
||||
default_cpu_request: 100m
|
||||
# memory limits for the postgres containers
|
||||
default_memory_limit: 1Gi
|
||||
default_memory_limit: 500Mi
|
||||
# memory request value for the postgres containers
|
||||
default_memory_request: 100Mi
|
||||
# hard CPU minimum required to properly run a Postgres cluster
|
||||
min_cpu_limit: 250m
|
||||
# hard memory minimum required to properly run a Postgres cluster
|
||||
min_memory_limit: 250Mi
|
||||
|
||||
# timeouts related to some operator actions
|
||||
configTimeouts:
|
||||
|
|
@ -191,6 +197,8 @@ configLogicalBackup:
|
|||
logical_backup_s3_access_key_id: ""
|
||||
# S3 bucket to store backup results
|
||||
logical_backup_s3_bucket: "my-bucket-url"
|
||||
# S3 region of bucket
|
||||
logical_backup_s3_region: ""
|
||||
# S3 endpoint url when not using AWS
|
||||
logical_backup_s3_endpoint: ""
|
||||
# S3 Secret Access Key
|
||||
|
|
@ -248,13 +256,13 @@ serviceAccount:
|
|||
|
||||
priorityClassName: ""
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 300Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 300Mi
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 250Mi
|
||||
|
||||
# Affinity for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ RUN apt-get update \
|
|||
&& curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
|
||||
&& apt-get update \
|
||||
&& apt-get install --no-install-recommends -y \
|
||||
postgresql-client-12 \
|
||||
postgresql-client-11 \
|
||||
postgresql-client-10 \
|
||||
postgresql-client-9.6 \
|
||||
|
|
@ -28,6 +29,6 @@ RUN apt-get update \
|
|||
|
||||
COPY dump.sh ./
|
||||
|
||||
ENV PG_DIR=/usr/lib/postgresql/
|
||||
ENV PG_DIR=/usr/lib/postgresql
|
||||
|
||||
ENTRYPOINT ["/dump.sh"]
|
||||
|
|
|
|||
|
|
@ -6,12 +6,10 @@ set -o nounset
|
|||
set -o pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
# make script trace visible via `kubectl logs`
|
||||
set -o xtrace
|
||||
|
||||
ALL_DB_SIZE_QUERY="select sum(pg_database_size(datname)::numeric) from pg_database;"
|
||||
PG_BIN=$PG_DIR/$PG_VERSION/bin
|
||||
DUMP_SIZE_COEFF=5
|
||||
ERRORCOUNT=0
|
||||
|
||||
TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
|
||||
K8S_API_URL=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/api/v1
|
||||
|
|
@ -42,9 +40,10 @@ function aws_upload {
|
|||
|
||||
[[ ! -z "$EXPECTED_SIZE" ]] && args+=("--expected-size=$EXPECTED_SIZE")
|
||||
[[ ! -z "$LOGICAL_BACKUP_S3_ENDPOINT" ]] && args+=("--endpoint-url=$LOGICAL_BACKUP_S3_ENDPOINT")
|
||||
[[ ! "$LOGICAL_BACKUP_S3_SSE" == "" ]] && args+=("--sse=$LOGICAL_BACKUP_S3_SSE")
|
||||
[[ ! -z "$LOGICAL_BACKUP_S3_REGION" ]] && args+=("--region=$LOGICAL_BACKUP_S3_REGION")
|
||||
[[ ! -z "$LOGICAL_BACKUP_S3_SSE" ]] && args+=("--sse=$LOGICAL_BACKUP_S3_SSE")
|
||||
|
||||
aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}" --debug
|
||||
aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}"
|
||||
}
|
||||
|
||||
function get_pods {
|
||||
|
|
@ -93,4 +92,9 @@ for search in "${search_strategy[@]}"; do
|
|||
|
||||
done
|
||||
|
||||
set -x
|
||||
dump | compress | aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF))
|
||||
[[ ${PIPESTATUS[0]} != 0 || ${PIPESTATUS[1]} != 0 || ${PIPESTATUS[2]} != 0 ]] && (( ERRORCOUNT += 1 ))
|
||||
set +x
|
||||
|
||||
exit $ERRORCOUNT
|
||||
|
|
|
|||
|
|
@ -47,6 +47,12 @@ patching the CRD manifest:
|
|||
zk8 patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}'
|
||||
```
|
||||
|
||||
## Non-default cluster domain
|
||||
|
||||
If your cluster uses a DNS domain other than the default `cluster.local`, this
|
||||
needs to be set in the operator configuration (`cluster_domain` variable). This
|
||||
is used by the operator to connect to the clusters after creation.
|
||||
|
||||
## Namespaces
|
||||
|
||||
### Select the namespace to deploy to
|
||||
|
|
@ -89,36 +95,13 @@ lacks access rights to any of them (except K8s system namespaces like
|
|||
'list pods' execute at the cluster scope and fail at the first violation of
|
||||
access rights.
|
||||
|
||||
The watched namespace also needs to have a (possibly different) service account
|
||||
in the case database pods need to talk to the K8s API (e.g. when using
|
||||
K8s-native configuration of Patroni). The operator checks that the
|
||||
`pod_service_account_name` exists in the target namespace, and, if not, deploys
|
||||
there the `pod_service_account_definition` from the operator
|
||||
[`Config`](../pkg/util/config/config.go) with the default value of:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: operator
|
||||
```
|
||||
|
||||
In this definition, the operator overwrites the account's name to match
|
||||
`pod_service_account_name` and the `default` namespace to match the target
|
||||
namespace. The operator performs **no** further syncing of this account.
|
||||
|
||||
## Non-default cluster domain
|
||||
|
||||
If your cluster uses a DNS domain other than the default `cluster.local`, this
|
||||
needs to be set in the operator configuration (`cluster_domain` variable). This
|
||||
is used by the operator to connect to the clusters after creation.
|
||||
|
||||
## Role-based access control for the operator
|
||||
|
||||
The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml)
|
||||
defines the service account, cluster roles and bindings needed for the operator
|
||||
to function under access control restrictions. To deploy the operator with this
|
||||
RBAC policy use:
|
||||
to function under access control restrictions. The file also includes a cluster
|
||||
role `postgres-pod` with privileges for Patroni to watch and manage pods and
|
||||
endpoints. To deploy the operator with this RBAC policies use:
|
||||
|
||||
```bash
|
||||
kubectl create -f manifests/configmap.yaml
|
||||
|
|
@ -127,14 +110,14 @@ kubectl create -f manifests/postgres-operator.yaml
|
|||
kubectl create -f manifests/minimal-postgres-manifest.yaml
|
||||
```
|
||||
|
||||
### Service account and cluster roles
|
||||
### Namespaced service account and role binding
|
||||
|
||||
Note that the service account is named `zalando-postgres-operator`. You may have
|
||||
to change the `service_account_name` in the operator ConfigMap and
|
||||
`serviceAccountName` in the `postgres-operator` deployment appropriately. This
|
||||
is done intentionally to avoid breaking those setups that already work with the
|
||||
default `operator` account. In the future the operator should ideally be run
|
||||
under the `zalando-postgres-operator` service account.
|
||||
For each namespace the operator watches it creates (or reads) a service account
|
||||
and role binding to be used by the Postgres Pods. The service account is bound
|
||||
to the `postgres-pod` cluster role. The name and definitions of these resources
|
||||
can be [configured](reference/operator_parameters.md#kubernetes-resources).
|
||||
Note, that the operator performs **no** further syncing of namespaced service
|
||||
accounts and role bindings.
|
||||
|
||||
### Give K8s users access to create/list `postgresqls`
|
||||
|
||||
|
|
@ -376,6 +359,17 @@ cluster manifest. In the case any of these variables are omitted from the
|
|||
manifest, the operator configuration settings `enable_master_load_balancer` and
|
||||
`enable_replica_load_balancer` apply. Note that the operator settings affect
|
||||
all Postgresql services running in all namespaces watched by the operator.
|
||||
If load balancing is enabled two default annotations will be applied to its
|
||||
services:
|
||||
|
||||
- `external-dns.alpha.kubernetes.io/hostname` with the value defined by the
|
||||
operator configs `master_dns_name_format` and `replica_dns_name_format`.
|
||||
This value can't be overwritten. If any changing in its value is needed, it
|
||||
MUST be done changing the DNS format operator config parameters; and
|
||||
- `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` with
|
||||
a default value of "3600". This value can be overwritten with the operator
|
||||
config parameter `custom_service_annotations` or the cluster parameter
|
||||
`serviceAnnotations`.
|
||||
|
||||
To limit the range of IP addresses that can reach a load balancer, specify the
|
||||
desired ranges in the `allowedSourceRanges` field (applies to both master and
|
||||
|
|
@ -486,37 +480,71 @@ A secret can be pre-provisioned in different ways:
|
|||
|
||||
## Setting up the Postgres Operator UI
|
||||
|
||||
With the v1.2 release the Postgres Operator is shipped with a browser-based
|
||||
Since the v1.2 release the Postgres Operator is shipped with a browser-based
|
||||
configuration user interface (UI) that simplifies managing Postgres clusters
|
||||
with the operator. The UI runs with Node.js and comes with it's own Docker
|
||||
image.
|
||||
with the operator.
|
||||
|
||||
Run NPM to continuously compile `tags/js` code. Basically, it creates an
|
||||
`app.js` file in: `static/build/app.js`
|
||||
### Building the UI image
|
||||
|
||||
```
|
||||
(cd ui/app && npm start)
|
||||
```
|
||||
|
||||
To build the Docker image open a shell and change to the `ui` folder. Then run:
|
||||
The UI runs with Node.js and comes with it's own Docker
|
||||
image. However, installing Node.js to build the operator UI is not required. It
|
||||
is handled via Docker containers when running:
|
||||
|
||||
```bash
|
||||
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.2.0 .
|
||||
make docker
|
||||
```
|
||||
|
||||
Apply all manifests for the `ui/manifests` folder to deploy the Postgres
|
||||
Operator UI on K8s. For local tests you don't need the Ingress resource.
|
||||
### Configure endpoints and options
|
||||
|
||||
The UI talks to the K8s API server as well as the Postgres Operator [REST API](developer.md#debugging-the-operator).
|
||||
K8s API server URLs are loaded from the machine's kubeconfig environment by
|
||||
default. Alternatively, a list can also be passed when starting the Python
|
||||
application with the `--cluster` option.
|
||||
|
||||
The Operator API endpoint can be configured via the `OPERATOR_API_URL`
|
||||
environment variables in the [deployment manifest](../ui/manifests/deployment.yaml#L40).
|
||||
You can also expose the operator API through a [service](../manifests/api-service.yaml).
|
||||
Some displayed options can be disabled from UI using simple flags under the
|
||||
`OPERATOR_UI_CONFIG` field in the deployment.
|
||||
|
||||
### Deploy the UI on K8s
|
||||
|
||||
Now, apply all manifests from the `ui/manifests` folder to deploy the Postgres
|
||||
Operator UI on K8s. Replace the image tag in the deployment manifest if you
|
||||
want to test the image you've built with `make docker`. Make sure the pods for
|
||||
the operator and the UI are both running.
|
||||
|
||||
```bash
|
||||
kubectl apply -f ui/manifests
|
||||
sed -e "s/\(image\:.*\:\).*$/\1$TAG/" manifests/deployment.yaml | kubectl apply -f manifests/
|
||||
kubectl get all -l application=postgres-operator-ui
|
||||
```
|
||||
|
||||
Make sure the pods for the operator and the UI are both running. For local
|
||||
testing you need to apply proxying and port forwarding so that the UI can talk
|
||||
to the K8s and Postgres Operator REST API. You can use the provided
|
||||
`run_local.sh` script for this. Make sure it uses the correct URL to your K8s
|
||||
API server, e.g. for minikube it would be `https://192.168.99.100:8443`.
|
||||
### Local testing
|
||||
|
||||
For local testing you need to apply K8s proxying and operator pod port
|
||||
forwarding so that the UI can talk to the K8s and Postgres Operator REST API.
|
||||
The Ingress resource is not needed. You can use the provided `run_local.sh`
|
||||
script for this. Make sure that:
|
||||
|
||||
* Python dependencies are installed on your machine
|
||||
* the K8s API server URL is set for kubectl commands, e.g. for minikube it would usually be `https://192.168.99.100:8443`.
|
||||
* the pod label selectors for port forwarding are correct
|
||||
|
||||
When testing with minikube you have to build the image in its docker environment
|
||||
(running `make docker` doesn't do it for you). From the `ui` directory execute:
|
||||
|
||||
```bash
|
||||
# compile and build operator UI
|
||||
make docker
|
||||
|
||||
# build in image in minikube docker env
|
||||
eval $(minikube docker-env)
|
||||
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.3.0 .
|
||||
|
||||
# apply UI manifests next to a running Postgres Operator
|
||||
kubectl apply -f manifests/
|
||||
|
||||
# install python dependencies to run UI locally
|
||||
pip3 install -r requirements
|
||||
./run_local.sh
|
||||
```
|
||||
|
|
|
|||
|
|
@ -31,9 +31,13 @@ status page.
|
|||

|
||||
|
||||
Usually, the startup should only take up to 1 minute. If you feel the process
|
||||
got stuck click on the "Logs" button to inspect the operator logs. From the
|
||||
"Status" field in the top menu you can also retrieve the logs and queue of each
|
||||
worker the operator is using. The number of concurrent workers can be
|
||||
got stuck click on the "Logs" button to inspect the operator logs. If the logs
|
||||
look fine, but the UI seems to got stuck, check if you are have configured the
|
||||
same [cluster name label](../ui/manifests/deployment.yaml#L45) like for the
|
||||
[operator](../manifests/configmap.yaml#L13).
|
||||
|
||||
From the "Status" field in the top menu you can also retrieve the logs and queue
|
||||
of each worker the operator is using. The number of concurrent workers can be
|
||||
[configured](reference/operator_parameters.md#general).
|
||||
|
||||

|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ cd postgres-operator
|
|||
kubectl create -f manifests/configmap.yaml # configuration
|
||||
kubectl create -f manifests/operator-service-account-rbac.yaml # identity and permissions
|
||||
kubectl create -f manifests/postgres-operator.yaml # deployment
|
||||
kubectl create -f manifests/api-service.yaml # operator API to be used by UI
|
||||
```
|
||||
|
||||
There is a [Kustomization](https://github.com/kubernetes-sigs/kustomize)
|
||||
|
|
@ -104,7 +105,7 @@ kubectl create -f https://operatorhub.io/install/postgres-operator.yaml
|
|||
This installs the operator in the `operators` namespace. More information can be
|
||||
found on [operatorhub.io](https://operatorhub.io/operator/postgres-operator).
|
||||
|
||||
## Create a Postgres cluster
|
||||
## Check if Postgres Operator is running
|
||||
|
||||
Starting the operator may take a few seconds. Check if the operator pod is
|
||||
running before applying a Postgres cluster manifest.
|
||||
|
|
@ -115,7 +116,61 @@ kubectl get pod -l name=postgres-operator
|
|||
|
||||
# if you've created the operator using helm chart
|
||||
kubectl get pod -l app.kubernetes.io/name=postgres-operator
|
||||
```
|
||||
|
||||
If the operator doesn't get into `Running` state, either check the latest K8s
|
||||
events of the deployment or pod with `kubectl describe` or inspect the operator
|
||||
logs:
|
||||
|
||||
```bash
|
||||
kubectl logs "$(kubectl get pod -l name=postgres-operator --output='name')"
|
||||
```
|
||||
|
||||
## Deploy the operator UI
|
||||
|
||||
In the following paragraphs we describe how to access and manage PostgreSQL
|
||||
clusters from the command line with kubectl. But it can also be done from the
|
||||
browser-based [Postgres Operator UI](operator-ui.md). Before deploying the UI
|
||||
make sure the operator is running and its REST API is reachable through a
|
||||
[K8s service](../manifests/api-service.yaml). The URL to this API must be
|
||||
configured in the [deployment manifest](../ui/manifests/deployment.yaml#L43)
|
||||
of the UI.
|
||||
|
||||
To deploy the UI simply apply all its manifests files or use the UI helm chart:
|
||||
|
||||
```bash
|
||||
# manual deployment
|
||||
kubectl apply -f ui/manifests/
|
||||
|
||||
# or helm chart
|
||||
helm install postgres-operator-ui ./charts/postgres-operator-ui
|
||||
```
|
||||
|
||||
Like with the operator, check if the UI pod gets into `Running` state:
|
||||
|
||||
```bash
|
||||
# if you've created the operator using yaml manifests
|
||||
kubectl get pod -l name=postgres-operator-ui
|
||||
|
||||
# if you've created the operator using helm chart
|
||||
kubectl get pod -l app.kubernetes.io/name=postgres-operator-ui
|
||||
```
|
||||
|
||||
You can now access the web interface by port forwarding the UI pod (mind the
|
||||
label selector) and enter `localhost:8081` in your browser:
|
||||
|
||||
```bash
|
||||
kubectl port-forward "$(kubectl get pod -l name=postgres-operator-ui --output='name')" 8081
|
||||
```
|
||||
|
||||
Available option are explained in detail in the [UI docs](operator-ui.md).
|
||||
|
||||
## Create a Postgres cluster
|
||||
|
||||
If the operator pod is running it listens to new events regarding `postgresql`
|
||||
resources. Now, it's time to submit your first Postgres cluster manifest.
|
||||
|
||||
```bash
|
||||
# create a Postgres cluster
|
||||
kubectl create -f manifests/minimal-postgres-manifest.yaml
|
||||
```
|
||||
|
|
|
|||
|
|
@ -122,6 +122,11 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
A map of key value pairs that gets attached as [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
|
||||
to each pod created for the database.
|
||||
|
||||
* **serviceAnnotations**
|
||||
A map of key value pairs that gets attached as [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
|
||||
to the services created for the database cluster. Check the
|
||||
[administrator docs](https://github.com/zalando/postgres-operator/blob/master/docs/administrator.md#load-balancers-and-allowed-ip-ranges)
|
||||
for more information regarding default values and overwrite rules.
|
||||
|
||||
* **enableShmVolume**
|
||||
Start a database pod without limitations on shm memory. By default Docker
|
||||
|
|
|
|||
|
|
@ -152,21 +152,22 @@ configuration they are grouped under the `kubernetes` key.
|
|||
service account used by Patroni running on individual Pods to communicate
|
||||
with the operator. Required even if native Kubernetes support in Patroni is
|
||||
not used, because Patroni keeps pod labels in sync with the instance role.
|
||||
The default is `operator`.
|
||||
The default is `postgres-pod`.
|
||||
|
||||
* **pod_service_account_definition**
|
||||
The operator tries to create the pod Service Account in the namespace that
|
||||
doesn't define such an account using the YAML definition provided by this
|
||||
option. If not defined, a simple definition that contains only the name will
|
||||
be used. The default is empty.
|
||||
On Postgres cluster creation the operator tries to create the service account
|
||||
for the Postgres pods if it does not exist in the namespace. The internal
|
||||
default service account definition (defines only the name) can be overwritten
|
||||
with this parameter. Make sure to provide a valid YAML or JSON string. The
|
||||
default is empty.
|
||||
|
||||
* **pod_service_account_role_binding_definition**
|
||||
This definition must bind pod service account to a role with permission
|
||||
This definition must bind the pod service account to a role with permission
|
||||
sufficient for the pods to start and for Patroni to access K8s endpoints;
|
||||
service account on its own lacks any such rights starting with K8s v1.8. If
|
||||
not explicitly defined by the user, a simple definition that binds the
|
||||
account to the operator's own 'zalando-postgres-operator' cluster role will
|
||||
be used. The default is empty.
|
||||
account to the 'postgres-pod' [cluster role](../../manifests/operator-service-account-rbac.yaml#L198)
|
||||
will be used. The default is empty.
|
||||
|
||||
* **pod_terminate_grace_period**
|
||||
Postgres pods are [terminated forcefully](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods)
|
||||
|
|
@ -318,11 +319,19 @@ CRD-based configuration.
|
|||
|
||||
* **default_cpu_limit**
|
||||
CPU limits for the Postgres containers, unless overridden by cluster-specific
|
||||
settings. The default is `3`.
|
||||
settings. The default is `1`.
|
||||
|
||||
* **default_memory_limit**
|
||||
memory limits for the Postgres containers, unless overridden by cluster-specific
|
||||
settings. The default is `1Gi`.
|
||||
settings. The default is `500Mi`.
|
||||
|
||||
* **min_cpu_limit**
|
||||
hard CPU minimum what we consider to be required to properly run Postgres
|
||||
clusters with Patroni on Kubernetes. The default is `250m`.
|
||||
|
||||
* **min_memory_limit**
|
||||
hard memory minimum what we consider to be required to properly run Postgres
|
||||
clusters with Patroni on Kubernetes. The default is `250Mi`.
|
||||
|
||||
## Operator timeouts
|
||||
|
||||
|
|
@ -380,8 +389,9 @@ In the CRD-based configuration they are grouped under the `load_balancer` key.
|
|||
`false`.
|
||||
|
||||
* **custom_service_annotations**
|
||||
when load balancing is enabled, LoadBalancer service is created and
|
||||
this parameter takes service annotations that are applied to service.
|
||||
This key/value map provides a list of annotations that get attached to each
|
||||
service of a cluster created by the operator. If the annotation key is also
|
||||
provided by the cluster definition, the manifest value is used.
|
||||
Optional.
|
||||
|
||||
* **master_dns_name_format** defines the DNS name string template for the
|
||||
|
|
@ -453,8 +463,11 @@ grouped under the `logical_backup` key.
|
|||
S3 bucket to store backup results. The bucket has to be present and
|
||||
accessible by Postgres pods. Default: empty.
|
||||
|
||||
* **logical_backup_s3_region**
|
||||
Specifies the region of the bucket which is required with some non-AWS S3 storage services. The default is empty.
|
||||
|
||||
* **logical_backup_s3_endpoint**
|
||||
When using non-AWS S3 storage, endpoint can be set as a ENV variable.
|
||||
When using non-AWS S3 storage, endpoint can be set as a ENV variable. The default is empty.
|
||||
|
||||
* **logical_backup_s3_sse**
|
||||
Specify server side encription that S3 storage is using. If empty string
|
||||
|
|
@ -579,4 +592,4 @@ scalyr sidecar. In the CRD-based configuration they are grouped under the
|
|||
CPU limit value for the Scalyr sidecar. The default is `1`.
|
||||
|
||||
* **scalyr_memory_limit**
|
||||
Memory limit value for the Scalyr sidecar. The default is `1Gi`.
|
||||
Memory limit value for the Scalyr sidecar. The default is `500Mi`.
|
||||
|
|
|
|||
10
docs/user.md
10
docs/user.md
|
|
@ -232,11 +232,11 @@ spec:
|
|||
memory: 300Mi
|
||||
```
|
||||
|
||||
The minimum limit to properly run the `postgresql` resource is `256m` for `cpu`
|
||||
and `256Mi` for `memory`. If a lower value is set in the manifest the operator
|
||||
will cancel ADD or UPDATE events on this resource with an error. If no
|
||||
resources are defined in the manifest the operator will obtain the configured
|
||||
[default requests](reference/operator_parameters.md#kubernetes-resource-requests).
|
||||
The minimum limits to properly run the `postgresql` resource are configured to
|
||||
`250m` for `cpu` and `250Mi` for `memory`. If a lower value is set in the
|
||||
manifest the operator will raise the limits to the configured minimum values.
|
||||
If no resources are defined in the manifest they will be obtained from the
|
||||
configured [default requests](reference/operator_parameters.md#kubernetes-resource-requests).
|
||||
|
||||
## Use taints and tolerations for dedicated PostgreSQL nodes
|
||||
|
||||
|
|
|
|||
|
|
@ -44,3 +44,4 @@ The current tests are all bundled in [`test_e2e.py`](tests/test_e2e.py):
|
|||
* taint-based eviction of Postgres pods
|
||||
* invoking logical backup cron job
|
||||
* uniqueness of master pod
|
||||
* custom service annotations
|
||||
|
|
|
|||
|
|
@ -58,6 +58,106 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml")
|
||||
k8s.wait_for_pod_start('spilo-role=master')
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_enable_load_balancer(self):
|
||||
'''
|
||||
Test if services are updated when enabling/disabling load balancers
|
||||
'''
|
||||
|
||||
k8s = self.k8s
|
||||
cluster_label = 'cluster-name=acid-minimal-cluster'
|
||||
|
||||
# enable load balancer services
|
||||
pg_patch_enable_lbs = {
|
||||
"spec": {
|
||||
"enableMasterLoadBalancer": True,
|
||||
"enableReplicaLoadBalancer": True
|
||||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs)
|
||||
# wait for service recreation
|
||||
time.sleep(60)
|
||||
|
||||
master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master')
|
||||
self.assertEqual(master_svc_type, 'LoadBalancer',
|
||||
"Expected LoadBalancer service type for master, found {}".format(master_svc_type))
|
||||
|
||||
repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica')
|
||||
self.assertEqual(repl_svc_type, 'LoadBalancer',
|
||||
"Expected LoadBalancer service type for replica, found {}".format(repl_svc_type))
|
||||
|
||||
# disable load balancer services again
|
||||
pg_patch_disable_lbs = {
|
||||
"spec": {
|
||||
"enableMasterLoadBalancer": False,
|
||||
"enableReplicaLoadBalancer": False
|
||||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs)
|
||||
# wait for service recreation
|
||||
time.sleep(60)
|
||||
|
||||
master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master')
|
||||
self.assertEqual(master_svc_type, 'ClusterIP',
|
||||
"Expected ClusterIP service type for master, found {}".format(master_svc_type))
|
||||
|
||||
repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica')
|
||||
self.assertEqual(repl_svc_type, 'ClusterIP',
|
||||
"Expected ClusterIP service type for replica, found {}".format(repl_svc_type))
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_min_resource_limits(self):
|
||||
'''
|
||||
Lower resource limits below configured minimum and let operator fix it
|
||||
'''
|
||||
k8s = self.k8s
|
||||
cluster_label = 'cluster-name=acid-minimal-cluster'
|
||||
_, failover_targets = k8s.get_pg_nodes(cluster_label)
|
||||
|
||||
# configure minimum boundaries for CPU and memory limits
|
||||
minCPULimit = '500m'
|
||||
minMemoryLimit = '500Mi'
|
||||
patch_min_resource_limits = {
|
||||
"data": {
|
||||
"min_cpu_limit": minCPULimit,
|
||||
"min_memory_limit": minMemoryLimit
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_min_resource_limits)
|
||||
|
||||
# lower resource limits below minimum
|
||||
pg_patch_resources = {
|
||||
"spec": {
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "10m",
|
||||
"memory": "50Mi"
|
||||
},
|
||||
"limits": {
|
||||
"cpu": "200m",
|
||||
"memory": "200Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
|
||||
k8s.wait_for_master_failover(failover_targets)
|
||||
|
||||
pods = k8s.api.core_v1.list_namespaced_pod(
|
||||
'default', label_selector='spilo-role=master,' + cluster_label).items
|
||||
self.assert_master_is_unique()
|
||||
masterPod = pods[0]
|
||||
|
||||
self.assertEqual(masterPod.spec.containers[0].resources.limits['cpu'], minCPULimit,
|
||||
"Expected CPU limit {}, found {}"
|
||||
.format(minCPULimit, masterPod.spec.containers[0].resources.limits['cpu']))
|
||||
self.assertEqual(masterPod.spec.containers[0].resources.limits['memory'], minMemoryLimit,
|
||||
"Expected memory limit {}, found {}"
|
||||
.format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory']))
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_multi_namespace_support(self):
|
||||
'''
|
||||
|
|
@ -76,10 +176,9 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_scaling(self):
|
||||
"""
|
||||
'''
|
||||
Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime.
|
||||
"""
|
||||
|
||||
'''
|
||||
k8s = self.k8s
|
||||
labels = "cluster-name=acid-minimal-cluster"
|
||||
|
||||
|
|
@ -93,9 +192,9 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_taint_based_eviction(self):
|
||||
"""
|
||||
'''
|
||||
Add taint "postgres=:NoExecute" to node with master. This must cause a failover.
|
||||
"""
|
||||
'''
|
||||
k8s = self.k8s
|
||||
cluster_label = 'cluster-name=acid-minimal-cluster'
|
||||
|
||||
|
|
@ -126,7 +225,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
# patch node and test if master is failing over to one of the expected nodes
|
||||
k8s.api.core_v1.patch_node(current_master_node, body)
|
||||
k8s.wait_for_master_failover(failover_targets)
|
||||
k8s.wait_for_pod_start('spilo-role=replica')
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
|
||||
new_master_node, new_replica_nodes = k8s.get_pg_nodes(cluster_label)
|
||||
self.assertNotEqual(current_master_node, new_master_node,
|
||||
|
|
@ -145,7 +244,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_logical_backup_cron_job(self):
|
||||
"""
|
||||
'''
|
||||
Ensure we can (a) create the cron job at user request for a specific PG cluster
|
||||
(b) update the cluster-wide image for the logical backup pod
|
||||
(c) delete the job at user request
|
||||
|
|
@ -153,7 +252,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
Limitations:
|
||||
(a) Does not run the actual batch job because there is no S3 mock to upload backups to
|
||||
(b) Assumes 'acid-minimal-cluster' exists as defined in setUp
|
||||
"""
|
||||
'''
|
||||
|
||||
k8s = self.k8s
|
||||
|
||||
|
|
@ -161,8 +260,8 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
schedule = "7 7 7 7 *"
|
||||
pg_patch_enable_backup = {
|
||||
"spec": {
|
||||
"enableLogicalBackup": True,
|
||||
"logicalBackupSchedule": schedule
|
||||
"enableLogicalBackup": True,
|
||||
"logicalBackupSchedule": schedule
|
||||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
|
|
@ -184,7 +283,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
image = "test-image-name"
|
||||
patch_logical_backup_image = {
|
||||
"data": {
|
||||
"logical_backup_docker_image": image,
|
||||
"logical_backup_docker_image": image,
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_logical_backup_image)
|
||||
|
|
@ -197,7 +296,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
# delete the logical backup cron job
|
||||
pg_patch_disable_backup = {
|
||||
"spec": {
|
||||
"enableLogicalBackup": False,
|
||||
"enableLogicalBackup": False,
|
||||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
|
|
@ -207,11 +306,51 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.assertEqual(0, len(jobs),
|
||||
"Expected 0 logical backup jobs, found {}".format(len(jobs)))
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_service_annotations(self):
|
||||
'''
|
||||
Create a Postgres cluster with service annotations and check them.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
patch_custom_service_annotations = {
|
||||
"data": {
|
||||
"custom_service_annotations": "foo:bar",
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_custom_service_annotations)
|
||||
|
||||
pg_patch_custom_annotations = {
|
||||
"spec": {
|
||||
"serviceAnnotations": {
|
||||
"annotation.key": "value"
|
||||
}
|
||||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations)
|
||||
|
||||
annotations = {
|
||||
"annotation.key": "value",
|
||||
"foo": "bar",
|
||||
}
|
||||
self.assertTrue(k8s.check_service_annotations(
|
||||
"cluster-name=acid-service-annotations,spilo-role=master", annotations))
|
||||
self.assertTrue(k8s.check_service_annotations(
|
||||
"cluster-name=acid-service-annotations,spilo-role=replica", annotations))
|
||||
|
||||
# clean up
|
||||
unpatch_custom_service_annotations = {
|
||||
"data": {
|
||||
"custom_service_annotations": "",
|
||||
}
|
||||
}
|
||||
k8s.update_config(unpatch_custom_service_annotations)
|
||||
|
||||
def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"):
|
||||
"""
|
||||
'''
|
||||
Check that there is a single pod in the k8s cluster with the label "spilo-role=master"
|
||||
To be called manually after operations that affect pods
|
||||
"""
|
||||
'''
|
||||
|
||||
k8s = self.k8s
|
||||
labels = 'spilo-role=master,cluster-name=' + clusterName
|
||||
|
|
@ -272,6 +411,23 @@ class K8s:
|
|||
pod_phase = pods[0].status.phase
|
||||
time.sleep(self.RETRY_TIMEOUT_SEC)
|
||||
|
||||
def get_service_type(self, svc_labels, namespace='default'):
|
||||
svc_type = ''
|
||||
svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items
|
||||
for svc in svcs:
|
||||
svc_type = svc.spec.type
|
||||
return svc_type
|
||||
|
||||
def check_service_annotations(self, svc_labels, annotations, namespace='default'):
|
||||
svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items
|
||||
for svc in svcs:
|
||||
if len(svc.metadata.annotations) != len(annotations):
|
||||
return False
|
||||
for key in svc.metadata.annotations:
|
||||
if svc.metadata.annotations[key] != annotations[key]:
|
||||
return False
|
||||
return True
|
||||
|
||||
def wait_for_pg_to_scale(self, number_of_instances, namespace='default'):
|
||||
|
||||
body = {
|
||||
|
|
@ -280,7 +436,7 @@ class K8s:
|
|||
}
|
||||
}
|
||||
_ = self.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body)
|
||||
"acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body)
|
||||
|
||||
labels = 'cluster-name=acid-minimal-cluster'
|
||||
while self.count_pods_with_label(labels) != number_of_instances:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: postgres-operator
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 8080
|
||||
protocol: TCP
|
||||
targetPort: 8080
|
||||
selector:
|
||||
name: postgres-operator
|
||||
|
|
@ -32,6 +32,8 @@ spec:
|
|||
# spiloFSGroup: 103
|
||||
# podAnnotations:
|
||||
# annotation.key: value
|
||||
# serviceAnnotations:
|
||||
# annotation.key: value
|
||||
# podPriorityClassName: "spilo-pod-priority"
|
||||
# tolerations:
|
||||
# - key: postgres
|
||||
|
|
@ -42,8 +44,8 @@ spec:
|
|||
cpu: 10m
|
||||
memory: 100Mi
|
||||
limits:
|
||||
cpu: 300m
|
||||
memory: 300Mi
|
||||
cpu: 500m
|
||||
memory: 500Mi
|
||||
patroni:
|
||||
initdb:
|
||||
encoding: "UTF8"
|
||||
|
|
|
|||
|
|
@ -15,9 +15,9 @@ data:
|
|||
# custom_pod_annotations: "keya:valuea,keyb:valueb"
|
||||
db_hosted_zone: db.example.com
|
||||
debug_logging: "true"
|
||||
# default_cpu_limit: "3"
|
||||
# default_cpu_limit: "1"
|
||||
# default_cpu_request: 100m
|
||||
# default_memory_limit: 1Gi
|
||||
# default_memory_limit: 500Mi
|
||||
# default_memory_request: 100Mi
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16
|
||||
# enable_admin_role_for_users: "true"
|
||||
|
|
@ -40,6 +40,7 @@ data:
|
|||
# logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup"
|
||||
# logical_backup_s3_access_key_id: ""
|
||||
# logical_backup_s3_bucket: "my-bucket-url"
|
||||
# logical_backup_s3_region: ""
|
||||
# logical_backup_s3_endpoint: ""
|
||||
# logical_backup_s3_secret_access_key: ""
|
||||
# logical_backup_s3_sse: "AES256"
|
||||
|
|
@ -48,6 +49,8 @@ data:
|
|||
# master_pod_move_timeout: 10m
|
||||
# max_instances: "-1"
|
||||
# min_instances: "-1"
|
||||
# min_cpu_limit: 250m
|
||||
# min_memory_limit: 250Mi
|
||||
# node_readiness_label: ""
|
||||
# oauth_token_secret_name: postgresql-operator
|
||||
# pam_configuration: |
|
||||
|
|
@ -60,7 +63,7 @@ data:
|
|||
pod_label_wait_timeout: 10m
|
||||
pod_management_policy: "ordered_ready"
|
||||
pod_role_label: spilo-role
|
||||
pod_service_account_name: "zalando-postgres-operator"
|
||||
pod_service_account_name: "postgres-pod"
|
||||
pod_terminate_grace_period: 5m
|
||||
# postgres_superuser_teams: "postgres_superusers"
|
||||
# protected_role_names: "admin"
|
||||
|
|
|
|||
|
|
@ -4,3 +4,4 @@ resources:
|
|||
- configmap.yaml
|
||||
- operator-service-account-rbac.yaml
|
||||
- postgres-operator.yaml
|
||||
- api-service.yaml
|
||||
|
|
|
|||
|
|
@ -1,14 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: zalando-postgres-operator
|
||||
name: postgres-operator
|
||||
namespace: default
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: zalando-postgres-operator
|
||||
name: postgres-operator
|
||||
rules:
|
||||
# all verbs allowed for custom operator resources
|
||||
- apiGroups:
|
||||
|
|
@ -18,7 +18,14 @@ rules:
|
|||
- postgresqls/status
|
||||
- operatorconfigurations
|
||||
verbs:
|
||||
- "*"
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
# to create or get/update CRDs when starting up
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
|
|
@ -48,7 +55,8 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch # needed if zalando-postgres-operator account is used for pods as well
|
||||
- update
|
||||
- watch
|
||||
# to CRUD secrets for database access
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
|
@ -96,6 +104,7 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
# to resize the filesystem in Spilo pods when increasing volume size
|
||||
- apiGroups:
|
||||
|
|
@ -114,6 +123,7 @@ rules:
|
|||
- delete
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
# to CRUD the StatefulSet which controls the Postgres cluster instances
|
||||
- apiGroups:
|
||||
- apps
|
||||
|
|
@ -125,6 +135,18 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- patch
|
||||
# to CRUD cron jobs for logical backups
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- cronjobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
# to get namespaces operator resources can run in
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
|
@ -149,39 +171,82 @@ rules:
|
|||
verbs:
|
||||
- get
|
||||
- create
|
||||
# to create role bindings to the operator service account
|
||||
# to create role bindings to the postgres-pod service account
|
||||
- apiGroups:
|
||||
- "rbac.authorization.k8s.io"
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- rolebindings
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
# to CRUD cron jobs for logical backups
|
||||
# to grant privilege to run privileged pods
|
||||
- apiGroups:
|
||||
- batch
|
||||
- extensions
|
||||
resources:
|
||||
- cronjobs
|
||||
- podsecuritypolicies
|
||||
resourceNames:
|
||||
- privileged
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- use
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: zalando-postgres-operator
|
||||
name: postgres-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: zalando-postgres-operator
|
||||
name: postgres-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
# note: the cluster role binding needs to be defined
|
||||
# for every namespace the operator service account lives in.
|
||||
name: zalando-postgres-operator
|
||||
name: postgres-operator
|
||||
namespace: default
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: postgres-pod
|
||||
rules:
|
||||
# Patroni needs to watch and manage endpoints
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
# Patroni needs to watch pods
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
# to let Patroni create a headless service
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- create
|
||||
# to run privileged pods
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
resourceNames:
|
||||
- privileged
|
||||
verbs:
|
||||
- use
|
||||
|
|
|
|||
|
|
@ -155,6 +155,12 @@ spec:
|
|||
default_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
min_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
min_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
timeouts:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -213,6 +219,8 @@ spec:
|
|||
type: string
|
||||
logical_backup_s3_endpoint:
|
||||
type: string
|
||||
logical_backup_s3_region:
|
||||
type: string
|
||||
logical_backup_s3_secret_access_key:
|
||||
type: string
|
||||
logical_backup_s3_sse:
|
||||
|
|
|
|||
|
|
@ -12,17 +12,17 @@ spec:
|
|||
labels:
|
||||
name: postgres-operator
|
||||
spec:
|
||||
serviceAccountName: zalando-postgres-operator
|
||||
serviceAccountName: postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.3.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
cpu: 100m
|
||||
memory: 250Mi
|
||||
limits:
|
||||
cpu: 2000m
|
||||
cpu: 500m
|
||||
memory: 500Mi
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ configuration:
|
|||
# pod_priority_class_name: ""
|
||||
pod_role_label: spilo-role
|
||||
# pod_service_account_definition: ""
|
||||
pod_service_account_name: zalando-postgres-operator
|
||||
pod_service_account_name: postgres-pod
|
||||
# pod_service_account_role_binding_definition: ""
|
||||
pod_terminate_grace_period: 5m
|
||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
|
|
@ -54,10 +54,12 @@ configuration:
|
|||
# toleration: {}
|
||||
# watched_namespace: ""
|
||||
postgres_pod_resources:
|
||||
default_cpu_limit: "3"
|
||||
default_cpu_limit: "1"
|
||||
default_cpu_request: 100m
|
||||
default_memory_limit: 1Gi
|
||||
default_memory_limit: 500Mi
|
||||
default_memory_request: 100Mi
|
||||
# min_cpu_limit: 250m
|
||||
# min_memory_limit: 250Mi
|
||||
timeouts:
|
||||
pod_label_wait_timeout: 10m
|
||||
pod_deletion_wait_timeout: 10m
|
||||
|
|
@ -86,6 +88,7 @@ configuration:
|
|||
# logical_backup_s3_access_key_id: ""
|
||||
logical_backup_s3_bucket: "my-bucket-url"
|
||||
# logical_backup_s3_endpoint: ""
|
||||
# logical_backup_s3_region: ""
|
||||
# logical_backup_s3_secret_access_key: ""
|
||||
logical_backup_s3_sse: "AES256"
|
||||
logical_backup_schedule: "30 00 * * *"
|
||||
|
|
@ -115,6 +118,6 @@ configuration:
|
|||
scalyr_cpu_limit: "1"
|
||||
scalyr_cpu_request: 100m
|
||||
# scalyr_image: ""
|
||||
scalyr_memory_limit: 1Gi
|
||||
scalyr_memory_limit: 500Mi
|
||||
scalyr_memory_request: 50Mi
|
||||
# scalyr_server_url: ""
|
||||
|
|
|
|||
|
|
@ -230,6 +230,10 @@ spec:
|
|||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
# Note: the value specified here must not be zero or be higher
|
||||
# than the corresponding limit.
|
||||
serviceAnnotations:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
sidecars:
|
||||
type: array
|
||||
nullable: true
|
||||
|
|
|
|||
|
|
@ -11,7 +11,14 @@ rules:
|
|||
- postgresqls
|
||||
- postgresqls/status
|
||||
verbs:
|
||||
- "*"
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
|
@ -48,4 +55,3 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
|
|
|
|||
|
|
@ -383,6 +383,14 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
},
|
||||
"serviceAnnotations": {
|
||||
Type: "object",
|
||||
AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{
|
||||
Schema: &apiextv1beta1.JSONSchemaProps{
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
"sidecars": {
|
||||
Type: "array",
|
||||
Items: &apiextv1beta1.JSONSchemaPropsOrArray{
|
||||
|
|
@ -810,6 +818,14 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
|||
Type: "string",
|
||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||
},
|
||||
"min_cpu_limit": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||
},
|
||||
"min_memory_limit": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||
},
|
||||
},
|
||||
},
|
||||
"timeouts": {
|
||||
|
|
@ -901,6 +917,9 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
|||
"logical_backup_s3_endpoint": {
|
||||
Type: "string",
|
||||
},
|
||||
"logical_backup_s3_region": {
|
||||
Type: "string",
|
||||
},
|
||||
"logical_backup_s3_secret_access_key": {
|
||||
Type: "string",
|
||||
},
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ type KubernetesMetaConfiguration struct {
|
|||
// TODO: use namespacedname
|
||||
PodEnvironmentConfigMap string `json:"pod_environment_configmap,omitempty"`
|
||||
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
|
||||
MasterPodMoveTimeout time.Duration `json:"master_pod_move_timeout,omitempty"`
|
||||
MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"`
|
||||
EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"`
|
||||
PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"`
|
||||
PodManagementPolicy string `json:"pod_management_policy,omitempty"`
|
||||
|
|
@ -79,6 +79,8 @@ type PostgresPodResourcesDefaults struct {
|
|||
DefaultMemoryRequest string `json:"default_memory_request,omitempty"`
|
||||
DefaultCPULimit string `json:"default_cpu_limit,omitempty"`
|
||||
DefaultMemoryLimit string `json:"default_memory_limit,omitempty"`
|
||||
MinCPULimit string `json:"min_cpu_limit,omitempty"`
|
||||
MinMemoryLimit string `json:"min_memory_limit,omitempty"`
|
||||
}
|
||||
|
||||
// OperatorTimeouts defines the timeout of ResourceCheck, PodWait, ReadyWait
|
||||
|
|
@ -155,6 +157,7 @@ type OperatorLogicalBackupConfiguration struct {
|
|||
Schedule string `json:"logical_backup_schedule,omitempty"`
|
||||
DockerImage string `json:"logical_backup_docker_image,omitempty"`
|
||||
S3Bucket string `json:"logical_backup_s3_bucket,omitempty"`
|
||||
S3Region string `json:"logical_backup_s3_region,omitempty"`
|
||||
S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"`
|
||||
S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"`
|
||||
S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"`
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ type PostgresSpec struct {
|
|||
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
|
||||
StandbyCluster *StandbyDescription `json:"standby"`
|
||||
PodAnnotations map[string]string `json:"podAnnotations"`
|
||||
ServiceAnnotations map[string]string `json:"serviceAnnotations"`
|
||||
|
||||
// deprecated json tags
|
||||
InitContainersOld []v1.Container `json:"init_containers,omitempty"`
|
||||
|
|
|
|||
|
|
@ -13,127 +13,139 @@ import (
|
|||
)
|
||||
|
||||
var parseTimeTests = []struct {
|
||||
in string
|
||||
out metav1.Time
|
||||
err error
|
||||
about string
|
||||
in string
|
||||
out metav1.Time
|
||||
err error
|
||||
}{
|
||||
{"16:08", mustParseTime("16:08"), nil},
|
||||
{"11:00", mustParseTime("11:00"), nil},
|
||||
{"23:59", mustParseTime("23:59"), nil},
|
||||
{"parse common time with minutes", "16:08", mustParseTime("16:08"), nil},
|
||||
{"parse time with zeroed minutes", "11:00", mustParseTime("11:00"), nil},
|
||||
{"parse corner case last minute of the day", "23:59", mustParseTime("23:59"), nil},
|
||||
|
||||
{"26:09", metav1.Now(), errors.New(`parsing time "26:09": hour out of range`)},
|
||||
{"23:69", metav1.Now(), errors.New(`parsing time "23:69": minute out of range`)},
|
||||
{"expect error as hour is out of range", "26:09", metav1.Now(), errors.New(`parsing time "26:09": hour out of range`)},
|
||||
{"expect error as minute is out of range", "23:69", metav1.Now(), errors.New(`parsing time "23:69": minute out of range`)},
|
||||
}
|
||||
|
||||
var parseWeekdayTests = []struct {
|
||||
in string
|
||||
out time.Weekday
|
||||
err error
|
||||
about string
|
||||
in string
|
||||
out time.Weekday
|
||||
err error
|
||||
}{
|
||||
{"Wed", time.Wednesday, nil},
|
||||
{"Sunday", time.Weekday(0), errors.New("incorrect weekday")},
|
||||
{"", time.Weekday(0), errors.New("incorrect weekday")},
|
||||
{"parse common weekday", "Wed", time.Wednesday, nil},
|
||||
{"expect error as weekday is invalid", "Sunday", time.Weekday(0), errors.New("incorrect weekday")},
|
||||
{"expect error as weekday is empty", "", time.Weekday(0), errors.New("incorrect weekday")},
|
||||
}
|
||||
|
||||
var clusterNames = []struct {
|
||||
about string
|
||||
in string
|
||||
inTeam string
|
||||
clusterName string
|
||||
err error
|
||||
}{
|
||||
{"acid-test", "acid", "test", nil},
|
||||
{"test-my-name", "test", "my-name", nil},
|
||||
{"my-team-another-test", "my-team", "another-test", nil},
|
||||
{"------strange-team-cluster", "-----", "strange-team-cluster",
|
||||
{"common team and cluster name", "acid-test", "acid", "test", nil},
|
||||
{"cluster name with hyphen", "test-my-name", "test", "my-name", nil},
|
||||
{"cluster and team name with hyphen", "my-team-another-test", "my-team", "another-test", nil},
|
||||
{"expect error as cluster name is just hyphens", "------strange-team-cluster", "-----", "strange-team-cluster",
|
||||
errors.New(`name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)},
|
||||
{"fooobar-fooobarfooobarfooobarfooobarfooobarfooobarfooobarfooobar", "fooobar", "",
|
||||
{"expect error as cluster name is too long", "fooobar-fooobarfooobarfooobarfooobarfooobarfooobarfooobarfooobar", "fooobar", "",
|
||||
errors.New("name cannot be longer than 58 characters")},
|
||||
{"acid-test", "test", "", errors.New("name must match {TEAM}-{NAME} format")},
|
||||
{"-test", "", "", errors.New("team name is empty")},
|
||||
{"-test", "-", "", errors.New("name must match {TEAM}-{NAME} format")},
|
||||
{"", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '', team name '-'")},
|
||||
{"-", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '-', team name '-'")},
|
||||
{"expect error as cluster name does not match {TEAM}-{NAME} format", "acid-test", "test", "", errors.New("name must match {TEAM}-{NAME} format")},
|
||||
{"expect error as team and cluster name are empty", "-test", "", "", errors.New("team name is empty")},
|
||||
{"expect error as cluster name is empty and team name is a hyphen", "-test", "-", "", errors.New("name must match {TEAM}-{NAME} format")},
|
||||
{"expect error as cluster name is empty, team name is a hyphen and cluster name is empty", "", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '', team name '-'")},
|
||||
{"expect error as cluster and team name are hyphens", "-", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '-', team name '-'")},
|
||||
// user may specify the team part of the full cluster name differently from the team name returned by the Teams API
|
||||
// in the case the actual Teams API name is long enough, this will fail the check
|
||||
{"foo-bar", "qwerty", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name 'foo-bar', team name 'qwerty'")},
|
||||
{"expect error as team name does not match", "foo-bar", "qwerty", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name 'foo-bar', team name 'qwerty'")},
|
||||
}
|
||||
|
||||
var cloneClusterDescriptions = []struct {
|
||||
in *CloneDescription
|
||||
err error
|
||||
about string
|
||||
in *CloneDescription
|
||||
err error
|
||||
}{
|
||||
{&CloneDescription{"foo+bar", "", "NotEmpty", "", "", "", "", nil}, nil},
|
||||
{&CloneDescription{"foo+bar", "", "", "", "", "", "", nil},
|
||||
{"cluster name invalid but EndTimeSet is not empty", &CloneDescription{"foo+bar", "", "NotEmpty", "", "", "", "", nil}, nil},
|
||||
{"expect error as cluster name does not match DNS-1035", &CloneDescription{"foo+bar", "", "", "", "", "", "", nil},
|
||||
errors.New(`clone cluster name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)},
|
||||
{&CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", "", "", "", "", "", nil},
|
||||
{"expect error as cluster name is too long", &CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", "", "", "", "", "", nil},
|
||||
errors.New("clone cluster name must be no longer than 63 characters")},
|
||||
{&CloneDescription{"foobar", "", "", "", "", "", "", nil}, nil},
|
||||
{"common cluster name", &CloneDescription{"foobar", "", "", "", "", "", "", nil}, nil},
|
||||
}
|
||||
|
||||
var maintenanceWindows = []struct {
|
||||
in []byte
|
||||
out MaintenanceWindow
|
||||
err error
|
||||
}{{[]byte(`"Tue:10:00-20:00"`),
|
||||
about string
|
||||
in []byte
|
||||
out MaintenanceWindow
|
||||
err error
|
||||
}{{"regular scenario",
|
||||
[]byte(`"Tue:10:00-20:00"`),
|
||||
MaintenanceWindow{
|
||||
Everyday: false,
|
||||
Weekday: time.Tuesday,
|
||||
StartTime: mustParseTime("10:00"),
|
||||
EndTime: mustParseTime("20:00"),
|
||||
}, nil},
|
||||
{[]byte(`"Mon:10:00-10:00"`),
|
||||
{"starts and ends at the same time",
|
||||
[]byte(`"Mon:10:00-10:00"`),
|
||||
MaintenanceWindow{
|
||||
Everyday: false,
|
||||
Weekday: time.Monday,
|
||||
StartTime: mustParseTime("10:00"),
|
||||
EndTime: mustParseTime("10:00"),
|
||||
}, nil},
|
||||
{[]byte(`"Sun:00:00-00:00"`),
|
||||
{"starts and ends 00:00 on sunday",
|
||||
[]byte(`"Sun:00:00-00:00"`),
|
||||
MaintenanceWindow{
|
||||
Everyday: false,
|
||||
Weekday: time.Sunday,
|
||||
StartTime: mustParseTime("00:00"),
|
||||
EndTime: mustParseTime("00:00"),
|
||||
}, nil},
|
||||
{[]byte(`"01:00-10:00"`),
|
||||
{"without day indication should define to sunday",
|
||||
[]byte(`"01:00-10:00"`),
|
||||
MaintenanceWindow{
|
||||
Everyday: true,
|
||||
Weekday: time.Sunday,
|
||||
StartTime: mustParseTime("01:00"),
|
||||
EndTime: mustParseTime("10:00"),
|
||||
}, nil},
|
||||
{[]byte(`"Mon:12:00-11:00"`), MaintenanceWindow{}, errors.New(`'From' time must be prior to the 'To' time`)},
|
||||
{[]byte(`"Wed:33:00-00:00"`), MaintenanceWindow{}, errors.New(`could not parse start time: parsing time "33:00": hour out of range`)},
|
||||
{[]byte(`"Wed:00:00-26:00"`), MaintenanceWindow{}, errors.New(`could not parse end time: parsing time "26:00": hour out of range`)},
|
||||
{[]byte(`"Sunday:00:00-00:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)},
|
||||
{[]byte(`":00:00-10:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)},
|
||||
{[]byte(`"Mon:10:00-00:00"`), MaintenanceWindow{}, errors.New(`'From' time must be prior to the 'To' time`)},
|
||||
{[]byte(`"Mon:00:00:00-10:00:00"`), MaintenanceWindow{}, errors.New(`incorrect maintenance window format`)},
|
||||
{[]byte(`"Mon:00:00"`), MaintenanceWindow{}, errors.New("incorrect maintenance window format")},
|
||||
{[]byte(`"Mon:00:00-00:00:00"`), MaintenanceWindow{}, errors.New("could not parse end time: incorrect time format")}}
|
||||
{"expect error as 'From' is later than 'To'", []byte(`"Mon:12:00-11:00"`), MaintenanceWindow{}, errors.New(`'From' time must be prior to the 'To' time`)},
|
||||
{"expect error as 'From' is later than 'To' with 00:00 corner case", []byte(`"Mon:10:00-00:00"`), MaintenanceWindow{}, errors.New(`'From' time must be prior to the 'To' time`)},
|
||||
{"expect error as 'From' time is not valid", []byte(`"Wed:33:00-00:00"`), MaintenanceWindow{}, errors.New(`could not parse start time: parsing time "33:00": hour out of range`)},
|
||||
{"expect error as 'To' time is not valid", []byte(`"Wed:00:00-26:00"`), MaintenanceWindow{}, errors.New(`could not parse end time: parsing time "26:00": hour out of range`)},
|
||||
{"expect error as weekday is not valid", []byte(`"Sunday:00:00-00:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)},
|
||||
{"expect error as weekday is empty", []byte(`":00:00-10:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)},
|
||||
{"expect error as maintenance window set seconds", []byte(`"Mon:00:00:00-10:00:00"`), MaintenanceWindow{}, errors.New(`incorrect maintenance window format`)},
|
||||
{"expect error as 'To' time set seconds", []byte(`"Mon:00:00-00:00:00"`), MaintenanceWindow{}, errors.New("could not parse end time: incorrect time format")},
|
||||
{"expect error as 'To' time is missing", []byte(`"Mon:00:00"`), MaintenanceWindow{}, errors.New("incorrect maintenance window format")}}
|
||||
|
||||
var postgresStatus = []struct {
|
||||
in []byte
|
||||
out PostgresStatus
|
||||
err error
|
||||
about string
|
||||
in []byte
|
||||
out PostgresStatus
|
||||
err error
|
||||
}{
|
||||
{[]byte(`{"PostgresClusterStatus":"Running"}`),
|
||||
{"cluster running", []byte(`{"PostgresClusterStatus":"Running"}`),
|
||||
PostgresStatus{PostgresClusterStatus: ClusterStatusRunning}, nil},
|
||||
{[]byte(`{"PostgresClusterStatus":""}`),
|
||||
{"cluster status undefined", []byte(`{"PostgresClusterStatus":""}`),
|
||||
PostgresStatus{PostgresClusterStatus: ClusterStatusUnknown}, nil},
|
||||
{[]byte(`"Running"`),
|
||||
{"cluster running without full JSON format", []byte(`"Running"`),
|
||||
PostgresStatus{PostgresClusterStatus: ClusterStatusRunning}, nil},
|
||||
{[]byte(`""`),
|
||||
{"cluster status empty", []byte(`""`),
|
||||
PostgresStatus{PostgresClusterStatus: ClusterStatusUnknown}, nil}}
|
||||
|
||||
var tmp postgresqlCopy
|
||||
var unmarshalCluster = []struct {
|
||||
about string
|
||||
in []byte
|
||||
out Postgresql
|
||||
marshal []byte
|
||||
err error
|
||||
}{
|
||||
// example with simple status field
|
||||
{
|
||||
about: "example with simple status field",
|
||||
in: []byte(`{
|
||||
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
|
||||
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`),
|
||||
|
|
@ -147,12 +159,14 @@ var unmarshalCluster = []struct {
|
|||
},
|
||||
Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid},
|
||||
// This error message can vary between Go versions, so compute it for the current version.
|
||||
Error: json.Unmarshal([]byte(`{"teamId": 0}`), &PostgresSpec{}).Error(),
|
||||
Error: json.Unmarshal([]byte(`{
|
||||
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
|
||||
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
|
||||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`),
|
||||
err: nil},
|
||||
// example with /status subresource
|
||||
{
|
||||
about: "example with /status subresource",
|
||||
in: []byte(`{
|
||||
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
|
||||
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`),
|
||||
|
|
@ -166,13 +180,14 @@ var unmarshalCluster = []struct {
|
|||
},
|
||||
Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid},
|
||||
// This error message can vary between Go versions, so compute it for the current version.
|
||||
Error: json.Unmarshal([]byte(`{"teamId": 0}`), &PostgresSpec{}).Error(),
|
||||
Error: json.Unmarshal([]byte(`{
|
||||
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
|
||||
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
|
||||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||
err: nil},
|
||||
// example with detailed input manifest
|
||||
// and deprecated pod_priority_class_name -> podPriorityClassName
|
||||
{
|
||||
about: "example with detailed input manifest and deprecated pod_priority_class_name -> podPriorityClassName",
|
||||
in: []byte(`{
|
||||
"kind": "Postgresql",
|
||||
"apiVersion": "acid.zalan.do/v1",
|
||||
|
|
@ -321,9 +336,9 @@ var unmarshalCluster = []struct {
|
|||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
||||
err: nil},
|
||||
// example with teamId set in input
|
||||
{
|
||||
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "teapot-testcluster1"}, "spec": {"teamId": "acid"}}`),
|
||||
about: "example with teamId set in input",
|
||||
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "teapot-testcluster1"}, "spec": {"teamId": "acid"}}`),
|
||||
out: Postgresql{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Postgresql",
|
||||
|
|
@ -338,9 +353,9 @@ var unmarshalCluster = []struct {
|
|||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||
err: nil},
|
||||
// clone example
|
||||
{
|
||||
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "clone": {"cluster": "team-batman"}}}`),
|
||||
about: "example with clone",
|
||||
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "clone": {"cluster": "team-batman"}}}`),
|
||||
out: Postgresql{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Postgresql",
|
||||
|
|
@ -360,9 +375,9 @@ var unmarshalCluster = []struct {
|
|||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
||||
err: nil},
|
||||
// standby example
|
||||
{
|
||||
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "standby": {"s3_wal_path": "s3://custom/path/to/bucket/"}}}`),
|
||||
about: "standby example",
|
||||
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "standby": {"s3_wal_path": "s3://custom/path/to/bucket/"}}}`),
|
||||
out: Postgresql{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Postgresql",
|
||||
|
|
@ -382,24 +397,28 @@ var unmarshalCluster = []struct {
|
|||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"standby":{"s3_wal_path":"s3://custom/path/to/bucket/"}},"status":{"PostgresClusterStatus":""}}`),
|
||||
err: nil},
|
||||
// erroneous examples
|
||||
{
|
||||
about: "expect error on malformatted JSON",
|
||||
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1"`),
|
||||
out: Postgresql{},
|
||||
marshal: []byte{},
|
||||
err: errors.New("unexpected end of JSON input")},
|
||||
{
|
||||
about: "expect error on JSON with field's value malformatted",
|
||||
in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||
out: Postgresql{},
|
||||
marshal: []byte{},
|
||||
err: errors.New("invalid character 'q' looking for beginning of value")}}
|
||||
err: errors.New("invalid character 'q' looking for beginning of value"),
|
||||
},
|
||||
}
|
||||
|
||||
var postgresqlList = []struct {
|
||||
in []byte
|
||||
out PostgresqlList
|
||||
err error
|
||||
about string
|
||||
in []byte
|
||||
out PostgresqlList
|
||||
err error
|
||||
}{
|
||||
{[]byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"9.6"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`),
|
||||
{"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"9.6"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`),
|
||||
PostgresqlList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "List",
|
||||
|
|
@ -433,20 +452,88 @@ var postgresqlList = []struct {
|
|||
}},
|
||||
},
|
||||
nil},
|
||||
{[]byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace"`),
|
||||
{"expect error on malformatted JSON", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace"`),
|
||||
PostgresqlList{},
|
||||
errors.New("unexpected end of JSON input")}}
|
||||
|
||||
var annotations = []struct {
|
||||
var podAnnotations = []struct {
|
||||
about string
|
||||
in []byte
|
||||
annotations map[string]string
|
||||
err error
|
||||
}{{
|
||||
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"podAnnotations": {"foo": "bar"},"teamId": "acid", "clone": {"cluster": "team-batman"}}}`),
|
||||
about: "common annotations",
|
||||
in: []byte(`{
|
||||
"kind": "Postgresql",
|
||||
"apiVersion": "acid.zalan.do/v1",
|
||||
"metadata": {
|
||||
"name": "acid-testcluster1"
|
||||
},
|
||||
"spec": {
|
||||
"podAnnotations": {
|
||||
"foo": "bar"
|
||||
},
|
||||
"teamId": "acid",
|
||||
"clone": {
|
||||
"cluster": "team-batman"
|
||||
}
|
||||
}
|
||||
}`),
|
||||
annotations: map[string]string{"foo": "bar"},
|
||||
err: nil},
|
||||
}
|
||||
|
||||
var serviceAnnotations = []struct {
|
||||
about string
|
||||
in []byte
|
||||
annotations map[string]string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
about: "common single annotation",
|
||||
in: []byte(`{
|
||||
"kind": "Postgresql",
|
||||
"apiVersion": "acid.zalan.do/v1",
|
||||
"metadata": {
|
||||
"name": "acid-testcluster1"
|
||||
},
|
||||
"spec": {
|
||||
"serviceAnnotations": {
|
||||
"foo": "bar"
|
||||
},
|
||||
"teamId": "acid",
|
||||
"clone": {
|
||||
"cluster": "team-batman"
|
||||
}
|
||||
}
|
||||
}`),
|
||||
annotations: map[string]string{"foo": "bar"},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
about: "common two annotations",
|
||||
in: []byte(`{
|
||||
"kind": "Postgresql",
|
||||
"apiVersion": "acid.zalan.do/v1",
|
||||
"metadata": {
|
||||
"name": "acid-testcluster1"
|
||||
},
|
||||
"spec": {
|
||||
"serviceAnnotations": {
|
||||
"foo": "bar",
|
||||
"post": "gres"
|
||||
},
|
||||
"teamId": "acid",
|
||||
"clone": {
|
||||
"cluster": "team-batman"
|
||||
}
|
||||
}
|
||||
}`),
|
||||
annotations: map[string]string{"foo": "bar", "post": "gres"},
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
|
||||
func mustParseTime(s string) metav1.Time {
|
||||
v, err := time.Parse("15:04", s)
|
||||
if err != nil {
|
||||
|
|
@ -458,230 +545,277 @@ func mustParseTime(s string) metav1.Time {
|
|||
|
||||
func TestParseTime(t *testing.T) {
|
||||
for _, tt := range parseTimeTests {
|
||||
aTime, err := parseTime(tt.in)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("ParseTime expected error: %v, got: %v", tt.err, err)
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
aTime, err := parseTime(tt.in)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("ParseTime expected error: %v, got: %v", tt.err, err)
|
||||
}
|
||||
return
|
||||
} else if tt.err != nil {
|
||||
t.Errorf("Expected error: %v", tt.err)
|
||||
}
|
||||
continue
|
||||
} else if tt.err != nil {
|
||||
t.Errorf("Expected error: %v", tt.err)
|
||||
}
|
||||
|
||||
if aTime != tt.out {
|
||||
t.Errorf("Expected time: %v, got: %v", tt.out, aTime)
|
||||
}
|
||||
if aTime != tt.out {
|
||||
t.Errorf("Expected time: %v, got: %v", tt.out, aTime)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWeekdayTime(t *testing.T) {
|
||||
for _, tt := range parseWeekdayTests {
|
||||
aTime, err := parseWeekday(tt.in)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("ParseWeekday expected error: %v, got: %v", tt.err, err)
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
aTime, err := parseWeekday(tt.in)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("ParseWeekday expected error: %v, got: %v", tt.err, err)
|
||||
}
|
||||
return
|
||||
} else if tt.err != nil {
|
||||
t.Errorf("Expected error: %v", tt.err)
|
||||
}
|
||||
continue
|
||||
} else if tt.err != nil {
|
||||
t.Errorf("Expected error: %v", tt.err)
|
||||
}
|
||||
|
||||
if aTime != tt.out {
|
||||
t.Errorf("Expected weekday: %v, got: %v", tt.out, aTime)
|
||||
}
|
||||
if aTime != tt.out {
|
||||
t.Errorf("Expected weekday: %v, got: %v", tt.out, aTime)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterAnnotations(t *testing.T) {
|
||||
for _, tt := range annotations {
|
||||
var cluster Postgresql
|
||||
err := cluster.UnmarshalJSON(tt.in)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("Unable to marshal cluster with annotations: expected %v got %v", tt.err, err)
|
||||
func TestPodAnnotations(t *testing.T) {
|
||||
for _, tt := range podAnnotations {
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
var cluster Postgresql
|
||||
err := cluster.UnmarshalJSON(tt.in)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("Unable to marshal cluster with podAnnotations: expected %v got %v", tt.err, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
for k, v := range cluster.Spec.PodAnnotations {
|
||||
found, expected := v, tt.annotations[k]
|
||||
if found != expected {
|
||||
t.Errorf("Didn't find correct value for key %v in for podAnnotations: Expected %v found %v", k, expected, found)
|
||||
for k, v := range cluster.Spec.PodAnnotations {
|
||||
found, expected := v, tt.annotations[k]
|
||||
if found != expected {
|
||||
t.Errorf("Didn't find correct value for key %v in for podAnnotations: Expected %v found %v", k, expected, found)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceAnnotations(t *testing.T) {
|
||||
for _, tt := range serviceAnnotations {
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
var cluster Postgresql
|
||||
err := cluster.UnmarshalJSON(tt.in)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("Unable to marshal cluster with serviceAnnotations: expected %v got %v", tt.err, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
for k, v := range cluster.Spec.ServiceAnnotations {
|
||||
found, expected := v, tt.annotations[k]
|
||||
if found != expected {
|
||||
t.Errorf("Didn't find correct value for key %v in for serviceAnnotations: Expected %v found %v", k, expected, found)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterName(t *testing.T) {
|
||||
for _, tt := range clusterNames {
|
||||
name, err := extractClusterName(tt.in, tt.inTeam)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("extractClusterName expected error: %v, got: %v", tt.err, err)
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
name, err := extractClusterName(tt.in, tt.inTeam)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("extractClusterName expected error: %v, got: %v", tt.err, err)
|
||||
}
|
||||
return
|
||||
} else if tt.err != nil {
|
||||
t.Errorf("Expected error: %v", tt.err)
|
||||
}
|
||||
continue
|
||||
} else if tt.err != nil {
|
||||
t.Errorf("Expected error: %v", tt.err)
|
||||
}
|
||||
if name != tt.clusterName {
|
||||
t.Errorf("Expected cluserName: %q, got: %q", tt.clusterName, name)
|
||||
}
|
||||
if name != tt.clusterName {
|
||||
t.Errorf("Expected cluserName: %q, got: %q", tt.clusterName, name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloneClusterDescription(t *testing.T) {
|
||||
for _, tt := range cloneClusterDescriptions {
|
||||
if err := validateCloneClusterDescription(tt.in); err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("testCloneClusterDescription expected error: %v, got: %v", tt.err, err)
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
if err := validateCloneClusterDescription(tt.in); err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("testCloneClusterDescription expected error: %v, got: %v", tt.err, err)
|
||||
}
|
||||
} else if tt.err != nil {
|
||||
t.Errorf("Expected error: %v", tt.err)
|
||||
}
|
||||
} else if tt.err != nil {
|
||||
t.Errorf("Expected error: %v", tt.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalMaintenanceWindow(t *testing.T) {
|
||||
for _, tt := range maintenanceWindows {
|
||||
var m MaintenanceWindow
|
||||
err := m.UnmarshalJSON(tt.in)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("MaintenanceWindow unmarshal expected error: %v, got %v", tt.err, err)
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
var m MaintenanceWindow
|
||||
err := m.UnmarshalJSON(tt.in)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("MaintenanceWindow unmarshal expected error: %v, got %v", tt.err, err)
|
||||
}
|
||||
return
|
||||
} else if tt.err != nil {
|
||||
t.Errorf("Expected error: %v", tt.err)
|
||||
}
|
||||
continue
|
||||
} else if tt.err != nil {
|
||||
t.Errorf("Expected error: %v", tt.err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(m, tt.out) {
|
||||
t.Errorf("Expected maintenance window: %#v, got: %#v", tt.out, m)
|
||||
}
|
||||
if !reflect.DeepEqual(m, tt.out) {
|
||||
t.Errorf("Expected maintenance window: %#v, got: %#v", tt.out, m)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalMaintenanceWindow(t *testing.T) {
|
||||
for _, tt := range maintenanceWindows {
|
||||
if tt.err != nil {
|
||||
continue
|
||||
}
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
if tt.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
s, err := tt.out.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Errorf("Marshal Error: %v", err)
|
||||
}
|
||||
s, err := tt.out.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Errorf("Marshal Error: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(s, tt.in) {
|
||||
t.Errorf("Expected Marshal: %q, got: %q", string(tt.in), string(s))
|
||||
}
|
||||
if !bytes.Equal(s, tt.in) {
|
||||
t.Errorf("Expected Marshal: %q, got: %q", string(tt.in), string(s))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalPostgresStatus(t *testing.T) {
|
||||
for _, tt := range postgresStatus {
|
||||
var ps PostgresStatus
|
||||
err := ps.UnmarshalJSON(tt.in)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("CR status unmarshal expected error: %v, got %v", tt.err, err)
|
||||
}
|
||||
continue
|
||||
//} else if tt.err != nil {
|
||||
//t.Errorf("Expected error: %v", tt.err)
|
||||
}
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
|
||||
if !reflect.DeepEqual(ps, tt.out) {
|
||||
t.Errorf("Expected status: %#v, got: %#v", tt.out, ps)
|
||||
}
|
||||
var ps PostgresStatus
|
||||
err := ps.UnmarshalJSON(tt.in)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("CR status unmarshal expected error: %v, got %v", tt.err, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ps, tt.out) {
|
||||
t.Errorf("Expected status: %#v, got: %#v", tt.out, ps)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostgresUnmarshal(t *testing.T) {
|
||||
for _, tt := range unmarshalCluster {
|
||||
var cluster Postgresql
|
||||
err := cluster.UnmarshalJSON(tt.in)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("Unmarshal expected error: %v, got: %v", tt.err, err)
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
var cluster Postgresql
|
||||
err := cluster.UnmarshalJSON(tt.in)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("Unmarshal expected error: %v, got: %v", tt.err, err)
|
||||
}
|
||||
return
|
||||
} else if tt.err != nil {
|
||||
t.Errorf("Expected error: %v", tt.err)
|
||||
}
|
||||
continue
|
||||
} else if tt.err != nil {
|
||||
t.Errorf("Expected error: %v", tt.err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cluster, tt.out) {
|
||||
t.Errorf("Expected Postgresql: %#v, got %#v", tt.out, cluster)
|
||||
}
|
||||
if !reflect.DeepEqual(cluster, tt.out) {
|
||||
t.Errorf("Expected Postgresql: %#v, got %#v", tt.out, cluster)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshal(t *testing.T) {
|
||||
for _, tt := range unmarshalCluster {
|
||||
if tt.err != nil {
|
||||
continue
|
||||
}
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
|
||||
// Unmarshal and marshal example to capture api changes
|
||||
var cluster Postgresql
|
||||
err := cluster.UnmarshalJSON(tt.marshal)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("Backwards compatibility unmarshal expected error: %v, got: %v", tt.err, err)
|
||||
if tt.err != nil {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
expected, err := json.Marshal(cluster)
|
||||
if err != nil {
|
||||
t.Errorf("Backwards compatibility marshal error: %v", err)
|
||||
}
|
||||
|
||||
m, err := json.Marshal(tt.out)
|
||||
if err != nil {
|
||||
t.Errorf("Marshal error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(m, expected) {
|
||||
t.Errorf("Marshal Postgresql \nexpected: %q, \ngot: %q", string(expected), string(m))
|
||||
}
|
||||
// Unmarshal and marshal example to capture api changes
|
||||
var cluster Postgresql
|
||||
err := cluster.UnmarshalJSON(tt.marshal)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("Backwards compatibility unmarshal expected error: %v, got: %v", tt.err, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
expected, err := json.Marshal(cluster)
|
||||
if err != nil {
|
||||
t.Errorf("Backwards compatibility marshal error: %v", err)
|
||||
}
|
||||
|
||||
m, err := json.Marshal(tt.out)
|
||||
if err != nil {
|
||||
t.Errorf("Marshal error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(m, expected) {
|
||||
t.Errorf("Marshal Postgresql \nexpected: %q, \ngot: %q", string(expected), string(m))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostgresMeta(t *testing.T) {
|
||||
for _, tt := range unmarshalCluster {
|
||||
if a := tt.out.GetObjectKind(); a != &tt.out.TypeMeta {
|
||||
t.Errorf("GetObjectKindMeta \nexpected: %v, \ngot: %v", tt.out.TypeMeta, a)
|
||||
}
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
|
||||
if a := tt.out.GetObjectMeta(); reflect.DeepEqual(a, tt.out.ObjectMeta) {
|
||||
t.Errorf("GetObjectMeta \nexpected: %v, \ngot: %v", tt.out.ObjectMeta, a)
|
||||
}
|
||||
if a := tt.out.GetObjectKind(); a != &tt.out.TypeMeta {
|
||||
t.Errorf("GetObjectKindMeta \nexpected: %v, \ngot: %v", tt.out.TypeMeta, a)
|
||||
}
|
||||
|
||||
if a := tt.out.GetObjectMeta(); reflect.DeepEqual(a, tt.out.ObjectMeta) {
|
||||
t.Errorf("GetObjectMeta \nexpected: %v, \ngot: %v", tt.out.ObjectMeta, a)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostgresListMeta(t *testing.T) {
|
||||
for _, tt := range postgresqlList {
|
||||
if tt.err != nil {
|
||||
continue
|
||||
}
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
if tt.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if a := tt.out.GetObjectKind(); a != &tt.out.TypeMeta {
|
||||
t.Errorf("GetObjectKindMeta expected: %v, got: %v", tt.out.TypeMeta, a)
|
||||
}
|
||||
if a := tt.out.GetObjectKind(); a != &tt.out.TypeMeta {
|
||||
t.Errorf("GetObjectKindMeta expected: %v, got: %v", tt.out.TypeMeta, a)
|
||||
}
|
||||
|
||||
if a := tt.out.GetListMeta(); reflect.DeepEqual(a, tt.out.ListMeta) {
|
||||
t.Errorf("GetObjectMeta expected: %v, got: %v", tt.out.ListMeta, a)
|
||||
}
|
||||
if a := tt.out.GetListMeta(); reflect.DeepEqual(a, tt.out.ListMeta) {
|
||||
t.Errorf("GetObjectMeta expected: %v, got: %v", tt.out.ListMeta, a)
|
||||
}
|
||||
|
||||
return
|
||||
return
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostgresqlClone(t *testing.T) {
|
||||
for _, tt := range unmarshalCluster {
|
||||
cp := &tt.out
|
||||
cp.Error = ""
|
||||
clone := cp.Clone()
|
||||
if !reflect.DeepEqual(clone, cp) {
|
||||
t.Errorf("TestPostgresqlClone expected: \n%#v\n, got \n%#v", cp, clone)
|
||||
}
|
||||
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
cp := &tt.out
|
||||
cp.Error = ""
|
||||
clone := cp.Clone()
|
||||
if !reflect.DeepEqual(clone, cp) {
|
||||
t.Errorf("TestPostgresqlClone expected: \n%#v\n, got \n%#v", cp, clone)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -514,6 +514,13 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
|||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.ServiceAnnotations != nil {
|
||||
in, out := &in.ServiceAnnotations, &out.ServiceAnnotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.InitContainersOld != nil {
|
||||
in, out := &in.InitContainersOld, &out.InitContainersOld
|
||||
*out = make([]corev1.Container, len(*in))
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/zalando/postgres-operator/pkg/util/patroni"
|
||||
"github.com/zalando/postgres-operator/pkg/util/teams"
|
||||
"github.com/zalando/postgres-operator/pkg/util/users"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -45,7 +45,7 @@ type Config struct {
|
|||
RestConfig *rest.Config
|
||||
InfrastructureRoles map[string]spec.PgUser // inherited from the controller
|
||||
PodServiceAccount *v1.ServiceAccount
|
||||
PodServiceAccountRoleBinding *rbacv1beta1.RoleBinding
|
||||
PodServiceAccountRoleBinding *rbacv1.RoleBinding
|
||||
}
|
||||
|
||||
type kubeResources struct {
|
||||
|
|
@ -227,8 +227,8 @@ func (c *Cluster) Create() error {
|
|||
|
||||
c.setStatus(acidv1.ClusterStatusCreating)
|
||||
|
||||
if err = c.validateResources(&c.Spec); err != nil {
|
||||
return fmt.Errorf("insufficient resource limits specified: %v", err)
|
||||
if err = c.enforceMinResourceLimits(&c.Spec); err != nil {
|
||||
return fmt.Errorf("could not enforce minimum resource limits: %v", err)
|
||||
}
|
||||
|
||||
for _, role := range []PostgresRole{Master, Replica} {
|
||||
|
|
@ -495,38 +495,38 @@ func compareResourcesAssumeFirstNotNil(a *v1.ResourceRequirements, b *v1.Resourc
|
|||
|
||||
}
|
||||
|
||||
func (c *Cluster) validateResources(spec *acidv1.PostgresSpec) error {
|
||||
|
||||
// setting limits too low can cause unnecessary evictions / OOM kills
|
||||
const (
|
||||
cpuMinLimit = "256m"
|
||||
memoryMinLimit = "256Mi"
|
||||
)
|
||||
func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
|
||||
|
||||
var (
|
||||
isSmaller bool
|
||||
err error
|
||||
)
|
||||
|
||||
// setting limits too low can cause unnecessary evictions / OOM kills
|
||||
minCPULimit := c.OpConfig.MinCPULimit
|
||||
minMemoryLimit := c.OpConfig.MinMemoryLimit
|
||||
|
||||
cpuLimit := spec.Resources.ResourceLimits.CPU
|
||||
if cpuLimit != "" {
|
||||
isSmaller, err = util.IsSmallerQuantity(cpuLimit, cpuMinLimit)
|
||||
isSmaller, err = util.IsSmallerQuantity(cpuLimit, minCPULimit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error validating CPU limit: %v", err)
|
||||
return fmt.Errorf("could not compare defined CPU limit %s with configured minimum value %s: %v", cpuLimit, minCPULimit, err)
|
||||
}
|
||||
if isSmaller {
|
||||
return fmt.Errorf("defined CPU limit %s is below required minimum %s to properly run postgresql resource", cpuLimit, cpuMinLimit)
|
||||
c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit)
|
||||
spec.Resources.ResourceLimits.CPU = minCPULimit
|
||||
}
|
||||
}
|
||||
|
||||
memoryLimit := spec.Resources.ResourceLimits.Memory
|
||||
if memoryLimit != "" {
|
||||
isSmaller, err = util.IsSmallerQuantity(memoryLimit, memoryMinLimit)
|
||||
isSmaller, err = util.IsSmallerQuantity(memoryLimit, minMemoryLimit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error validating memory limit: %v", err)
|
||||
return fmt.Errorf("could not compare defined memory limit %s with configured minimum value %s: %v", memoryLimit, minMemoryLimit, err)
|
||||
}
|
||||
if isSmaller {
|
||||
return fmt.Errorf("defined memory limit %s is below required minimum %s to properly run postgresql resource", memoryLimit, memoryMinLimit)
|
||||
c.logger.Warningf("defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit)
|
||||
spec.Resources.ResourceLimits.Memory = minMemoryLimit
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -543,7 +543,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
oldStatus := c.Status
|
||||
c.setStatus(acidv1.ClusterStatusUpdating)
|
||||
c.setSpec(newSpec)
|
||||
|
||||
|
|
@ -555,22 +554,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}()
|
||||
|
||||
if err := c.validateResources(&newSpec.Spec); err != nil {
|
||||
err = fmt.Errorf("insufficient resource limits specified: %v", err)
|
||||
|
||||
// cancel update only when (already too low) pod resources were edited
|
||||
// if cluster was successfully running before the update, continue but log a warning
|
||||
isCPULimitSmaller, err2 := util.IsSmallerQuantity(newSpec.Spec.Resources.ResourceLimits.CPU, oldSpec.Spec.Resources.ResourceLimits.CPU)
|
||||
isMemoryLimitSmaller, err3 := util.IsSmallerQuantity(newSpec.Spec.Resources.ResourceLimits.Memory, oldSpec.Spec.Resources.ResourceLimits.Memory)
|
||||
|
||||
if oldStatus.Running() && !isCPULimitSmaller && !isMemoryLimitSmaller && err2 == nil && err3 == nil {
|
||||
c.logger.Warning(err)
|
||||
} else {
|
||||
updateFailed = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if oldSpec.Spec.PgVersion != newSpec.Spec.PgVersion { // PG versions comparison
|
||||
c.logger.Warningf("postgresql version change(%q -> %q) has no effect", oldSpec.Spec.PgVersion, newSpec.Spec.PgVersion)
|
||||
//we need that hack to generate statefulset with the old version
|
||||
|
|
@ -616,6 +599,12 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
|
||||
// Statefulset
|
||||
func() {
|
||||
if err := c.enforceMinResourceLimits(&c.Spec); err != nil {
|
||||
c.logger.Errorf("could not sync resources: %v", err)
|
||||
updateFailed = true
|
||||
return
|
||||
}
|
||||
|
||||
oldSs, err := c.generateStatefulSet(&oldSpec.Spec)
|
||||
if err != nil {
|
||||
c.logger.Errorf("could not generate old statefulset spec: %v", err)
|
||||
|
|
@ -623,6 +612,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
return
|
||||
}
|
||||
|
||||
// update newSpec to for latter comparison with oldSpec
|
||||
c.enforceMinResourceLimits(&newSpec.Spec)
|
||||
|
||||
newSs, err := c.generateStatefulSet(&newSpec.Spec)
|
||||
if err != nil {
|
||||
c.logger.Errorf("could not generate new statefulset spec: %v", err)
|
||||
|
|
|
|||
|
|
@ -355,6 +355,12 @@ func TestPodAnnotations(t *testing.T) {
|
|||
database: map[string]string{"foo": "bar"},
|
||||
merged: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
subTest: "Both Annotations",
|
||||
operator: map[string]string{"foo": "bar"},
|
||||
database: map[string]string{"post": "gres"},
|
||||
merged: map[string]string{"foo": "bar", "post": "gres"},
|
||||
},
|
||||
{
|
||||
subTest: "Database Config overrides Operator Config Annotations",
|
||||
operator: map[string]string{"foo": "bar", "global": "foo"},
|
||||
|
|
@ -382,3 +388,319 @@ func TestPodAnnotations(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceAnnotations(t *testing.T) {
|
||||
enabled := true
|
||||
disabled := false
|
||||
tests := []struct {
|
||||
about string
|
||||
role PostgresRole
|
||||
enableMasterLoadBalancerSpec *bool
|
||||
enableMasterLoadBalancerOC bool
|
||||
enableReplicaLoadBalancerSpec *bool
|
||||
enableReplicaLoadBalancerOC bool
|
||||
operatorAnnotations map[string]string
|
||||
clusterAnnotations map[string]string
|
||||
expect map[string]string
|
||||
}{
|
||||
//MASTER
|
||||
{
|
||||
about: "Master with no annotations and EnableMasterLoadBalancer disabled on spec and OperatorConfig",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerSpec: &disabled,
|
||||
enableMasterLoadBalancerOC: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
expect: make(map[string]string),
|
||||
},
|
||||
{
|
||||
about: "Master with no annotations and EnableMasterLoadBalancer enabled on spec",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerSpec: &enabled,
|
||||
enableMasterLoadBalancerOC: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Master with no annotations and EnableMasterLoadBalancer enabled only on operator config",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerSpec: &disabled,
|
||||
enableMasterLoadBalancerOC: true,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
expect: make(map[string]string),
|
||||
},
|
||||
{
|
||||
about: "Master with no annotations and EnableMasterLoadBalancer defined only on operator config",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Master with cluster annotations and load balancer enabled",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{"foo": "bar"},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Master with cluster annotations and load balancer disabled",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerSpec: &disabled,
|
||||
enableMasterLoadBalancerOC: true,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{"foo": "bar"},
|
||||
expect: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
about: "Master with operator annotations and load balancer enabled",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
operatorAnnotations: map[string]string{"foo": "bar"},
|
||||
clusterAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Master with operator annotations override default annotations",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
operatorAnnotations: map[string]string{
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
clusterAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Master with cluster annotations override default annotations",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Master with cluster annotations do not override external-dns annotations",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com",
|
||||
},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Master with operator annotations do not override external-dns annotations",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
clusterAnnotations: make(map[string]string),
|
||||
operatorAnnotations: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com",
|
||||
},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
// REPLICA
|
||||
{
|
||||
about: "Replica with no annotations and EnableReplicaLoadBalancer disabled on spec and OperatorConfig",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerSpec: &disabled,
|
||||
enableReplicaLoadBalancerOC: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
expect: make(map[string]string),
|
||||
},
|
||||
{
|
||||
about: "Replica with no annotations and EnableReplicaLoadBalancer enabled on spec",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerSpec: &enabled,
|
||||
enableReplicaLoadBalancerOC: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Replica with no annotations and EnableReplicaLoadBalancer enabled only on operator config",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerSpec: &disabled,
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
expect: make(map[string]string),
|
||||
},
|
||||
{
|
||||
about: "Replica with no annotations and EnableReplicaLoadBalancer defined only on operator config",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Replica with cluster annotations and load balancer enabled",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{"foo": "bar"},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Replica with cluster annotations and load balancer disabled",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerSpec: &disabled,
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{"foo": "bar"},
|
||||
expect: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
about: "Replica with operator annotations and load balancer enabled",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
operatorAnnotations: map[string]string{"foo": "bar"},
|
||||
clusterAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Replica with operator annotations override default annotations",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
operatorAnnotations: map[string]string{
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
clusterAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Replica with cluster annotations override default annotations",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Replica with cluster annotations do not override external-dns annotations",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com",
|
||||
},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Replica with operator annotations do not override external-dns annotations",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
clusterAnnotations: make(map[string]string),
|
||||
operatorAnnotations: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com",
|
||||
},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
// COMMON
|
||||
{
|
||||
about: "cluster annotations append to operator annotations",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: false,
|
||||
operatorAnnotations: map[string]string{"foo": "bar"},
|
||||
clusterAnnotations: map[string]string{"post": "gres"},
|
||||
expect: map[string]string{"foo": "bar", "post": "gres"},
|
||||
},
|
||||
{
|
||||
about: "cluster annotations override operator annotations",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: false,
|
||||
operatorAnnotations: map[string]string{"foo": "bar", "post": "gres"},
|
||||
clusterAnnotations: map[string]string{"post": "greSQL"},
|
||||
expect: map[string]string{"foo": "bar", "post": "greSQL"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
cl.OpConfig.CustomServiceAnnotations = tt.operatorAnnotations
|
||||
cl.OpConfig.EnableMasterLoadBalancer = tt.enableMasterLoadBalancerOC
|
||||
cl.OpConfig.EnableReplicaLoadBalancer = tt.enableReplicaLoadBalancerOC
|
||||
cl.OpConfig.MasterDNSNameFormat = "{cluster}.{team}.{hostedzone}"
|
||||
cl.OpConfig.ReplicaDNSNameFormat = "{cluster}-repl.{team}.{hostedzone}"
|
||||
cl.OpConfig.DbHostedZone = "db.example.com"
|
||||
|
||||
cl.Postgresql.Spec.ClusterName = "test"
|
||||
cl.Postgresql.Spec.TeamID = "acid"
|
||||
cl.Postgresql.Spec.ServiceAnnotations = tt.clusterAnnotations
|
||||
cl.Postgresql.Spec.EnableMasterLoadBalancer = tt.enableMasterLoadBalancerSpec
|
||||
cl.Postgresql.Spec.EnableReplicaLoadBalancer = tt.enableReplicaLoadBalancerSpec
|
||||
|
||||
got := cl.generateServiceAnnotations(tt.role, &cl.Postgresql.Spec)
|
||||
if len(tt.expect) != len(got) {
|
||||
t.Errorf("expected %d annotation(s), got %d", len(tt.expect), len(got))
|
||||
return
|
||||
}
|
||||
for k, v := range got {
|
||||
if tt.expect[k] != v {
|
||||
t.Errorf("expected annotation '%v' with value '%v', got value '%v'", k, tt.expect[k], v)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1051,6 +1051,7 @@ func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
|
|||
/* Limit the max number of pods to one, if this is standby-cluster */
|
||||
if spec.StandbyCluster != nil {
|
||||
c.logger.Info("Standby cluster can have maximum of 1 pod")
|
||||
min = 1
|
||||
max = 1
|
||||
}
|
||||
if max >= 0 && newcur > max {
|
||||
|
|
@ -1229,14 +1230,6 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *ac
|
|||
}
|
||||
|
||||
func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) *v1.Service {
|
||||
var dnsName string
|
||||
|
||||
if role == Master {
|
||||
dnsName = c.masterDNSName()
|
||||
} else {
|
||||
dnsName = c.replicaDNSName()
|
||||
}
|
||||
|
||||
serviceSpec := v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
|
|
@ -1246,8 +1239,6 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
|
|||
serviceSpec.Selector = c.roleLabelsSet(false, role)
|
||||
}
|
||||
|
||||
var annotations map[string]string
|
||||
|
||||
if c.shouldCreateLoadBalancerForService(role, spec) {
|
||||
|
||||
// spec.AllowedSourceRanges evaluates to the empty slice of zero length
|
||||
|
|
@ -1261,18 +1252,6 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
|
|||
|
||||
c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges)
|
||||
serviceSpec.Type = v1.ServiceTypeLoadBalancer
|
||||
|
||||
annotations = map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: dnsName,
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
}
|
||||
|
||||
if len(c.OpConfig.CustomServiceAnnotations) != 0 {
|
||||
c.logger.Debugf("There are custom annotations defined, creating them.")
|
||||
for customAnnotationKey, customAnnotationValue := range c.OpConfig.CustomServiceAnnotations {
|
||||
annotations[customAnnotationKey] = customAnnotationValue
|
||||
}
|
||||
}
|
||||
} else if role == Replica {
|
||||
// before PR #258, the replica service was only created if allocated a LB
|
||||
// now we always create the service but warn if the LB is absent
|
||||
|
|
@ -1284,7 +1263,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
|
|||
Name: c.serviceName(role),
|
||||
Namespace: c.Namespace,
|
||||
Labels: c.roleLabelsSet(true, role),
|
||||
Annotations: annotations,
|
||||
Annotations: c.generateServiceAnnotations(role, spec),
|
||||
},
|
||||
Spec: serviceSpec,
|
||||
}
|
||||
|
|
@ -1292,6 +1271,42 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
|
|||
return service
|
||||
}
|
||||
|
||||
func (c *Cluster) generateServiceAnnotations(role PostgresRole, spec *acidv1.PostgresSpec) map[string]string {
|
||||
annotations := make(map[string]string)
|
||||
|
||||
for k, v := range c.OpConfig.CustomServiceAnnotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
if spec != nil || spec.ServiceAnnotations != nil {
|
||||
for k, v := range spec.ServiceAnnotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if c.shouldCreateLoadBalancerForService(role, spec) {
|
||||
var dnsName string
|
||||
if role == Master {
|
||||
dnsName = c.masterDNSName()
|
||||
} else {
|
||||
dnsName = c.replicaDNSName()
|
||||
}
|
||||
|
||||
// Just set ELB Timeout annotation with default value, if it does not
|
||||
// have a cutom value
|
||||
if _, ok := annotations[constants.ElbTimeoutAnnotationName]; !ok {
|
||||
annotations[constants.ElbTimeoutAnnotationName] = constants.ElbTimeoutAnnotationValue
|
||||
}
|
||||
// External DNS name annotation is not customizable
|
||||
annotations[constants.ZalandoDNSNameAnnotation] = dnsName
|
||||
}
|
||||
|
||||
if len(annotations) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return annotations
|
||||
}
|
||||
|
||||
func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubset) *v1.Endpoints {
|
||||
endpoints := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
@ -1588,6 +1603,10 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
|
|||
Name: "LOGICAL_BACKUP_S3_BUCKET",
|
||||
Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket,
|
||||
},
|
||||
{
|
||||
Name: "LOGICAL_BACKUP_S3_REGION",
|
||||
Value: c.OpConfig.LogicalBackup.LogicalBackupS3Region,
|
||||
},
|
||||
{
|
||||
Name: "LOGICAL_BACKUP_S3_ENDPOINT",
|
||||
Value: c.OpConfig.LogicalBackup.LogicalBackupS3Endpoint,
|
||||
|
|
|
|||
|
|
@ -366,6 +366,11 @@ func (c *Cluster) createService(role PostgresRole) (*v1.Service, error) {
|
|||
}
|
||||
|
||||
func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error {
|
||||
var (
|
||||
svc *v1.Service
|
||||
err error
|
||||
)
|
||||
|
||||
c.setProcessName("updating %v service", role)
|
||||
|
||||
if c.Services[role] == nil {
|
||||
|
|
@ -373,70 +378,6 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
|
|||
}
|
||||
|
||||
serviceName := util.NameFromMeta(c.Services[role].ObjectMeta)
|
||||
endpointName := util.NameFromMeta(c.Endpoints[role].ObjectMeta)
|
||||
// TODO: check if it possible to change the service type with a patch in future versions of Kubernetes
|
||||
if newService.Spec.Type != c.Services[role].Spec.Type {
|
||||
// service type has changed, need to replace the service completely.
|
||||
// we cannot use just patch the current service, since it may contain attributes incompatible with the new type.
|
||||
var (
|
||||
currentEndpoint *v1.Endpoints
|
||||
err error
|
||||
)
|
||||
|
||||
if role == Master {
|
||||
// for the master service we need to re-create the endpoint as well. Get the up-to-date version of
|
||||
// the addresses stored in it before the service is deleted (deletion of the service removes the endpoint)
|
||||
currentEndpoint, err = c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get current cluster %s endpoints: %v", role, err)
|
||||
}
|
||||
}
|
||||
err = c.KubeClient.Services(serviceName.Namespace).Delete(serviceName.Name, c.deleteOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not delete service %q: %v", serviceName, err)
|
||||
}
|
||||
|
||||
// wait until the service is truly deleted
|
||||
c.logger.Debugf("waiting for service to be deleted")
|
||||
|
||||
err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
|
||||
func() (bool, error) {
|
||||
_, err2 := c.KubeClient.Services(serviceName.Namespace).Get(serviceName.Name, metav1.GetOptions{})
|
||||
if err2 == nil {
|
||||
return false, nil
|
||||
}
|
||||
if k8sutil.ResourceNotFound(err2) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err2
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not delete service %q: %v", serviceName, err)
|
||||
}
|
||||
|
||||
// make sure we clear the stored service and endpoint status if the subsequent create fails.
|
||||
c.Services[role] = nil
|
||||
c.Endpoints[role] = nil
|
||||
if role == Master {
|
||||
// create the new endpoint using the addresses obtained from the previous one
|
||||
endpointSpec := c.generateEndpoint(role, currentEndpoint.Subsets)
|
||||
ep, err := c.KubeClient.Endpoints(endpointSpec.Namespace).Create(endpointSpec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create endpoint %q: %v", endpointName, err)
|
||||
}
|
||||
|
||||
c.Endpoints[role] = ep
|
||||
}
|
||||
|
||||
svc, err := c.KubeClient.Services(serviceName.Namespace).Create(newService)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create service %q: %v", serviceName, err)
|
||||
}
|
||||
|
||||
c.Services[role] = svc
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// update the service annotation in order to propagate ELB notation.
|
||||
if len(newService.ObjectMeta.Annotations) > 0 {
|
||||
|
|
@ -454,18 +395,30 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
|
|||
}
|
||||
}
|
||||
|
||||
patchData, err := specPatch(newService.Spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not form patch for the service %q: %v", serviceName, err)
|
||||
}
|
||||
// now, patch the service spec, but when disabling LoadBalancers do update instead
|
||||
// patch does not work because of LoadBalancerSourceRanges field (even if set to nil)
|
||||
oldServiceType := c.Services[role].Spec.Type
|
||||
newServiceType := newService.Spec.Type
|
||||
if newServiceType == "ClusterIP" && newServiceType != oldServiceType {
|
||||
newService.ResourceVersion = c.Services[role].ResourceVersion
|
||||
newService.Spec.ClusterIP = c.Services[role].Spec.ClusterIP
|
||||
svc, err = c.KubeClient.Services(serviceName.Namespace).Update(newService)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not update service %q: %v", serviceName, err)
|
||||
}
|
||||
} else {
|
||||
patchData, err := specPatch(newService.Spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not form patch for the service %q: %v", serviceName, err)
|
||||
}
|
||||
|
||||
// update the service spec
|
||||
svc, err := c.KubeClient.Services(serviceName.Namespace).Patch(
|
||||
serviceName.Name,
|
||||
types.MergePatchType,
|
||||
patchData, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not patch service %q: %v", serviceName, err)
|
||||
svc, err = c.KubeClient.Services(serviceName.Namespace).Patch(
|
||||
serviceName.Name,
|
||||
types.MergePatchType,
|
||||
patchData, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not patch service %q: %v", serviceName, err)
|
||||
}
|
||||
}
|
||||
c.Services[role] = svc
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
oldStatus := c.Status
|
||||
c.setSpec(newSpec)
|
||||
|
||||
defer func() {
|
||||
|
|
@ -35,16 +34,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}()
|
||||
|
||||
if err = c.validateResources(&c.Spec); err != nil {
|
||||
err = fmt.Errorf("insufficient resource limits specified: %v", err)
|
||||
if oldStatus.Running() {
|
||||
c.logger.Warning(err)
|
||||
err = nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = c.initUsers(); err != nil {
|
||||
err = fmt.Errorf("could not init users: %v", err)
|
||||
return err
|
||||
|
|
@ -76,6 +65,11 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err = c.enforceMinResourceLimits(&c.Spec); err != nil {
|
||||
err = fmt.Errorf("could not enforce minimum resource limits: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
c.logger.Debugf("syncing statefulsets")
|
||||
if err = c.syncStatefulSet(); err != nil {
|
||||
if !k8sutil.ResourceAlreadyExists(err) {
|
||||
|
|
@ -122,7 +116,7 @@ func (c *Cluster) syncServices() error {
|
|||
c.logger.Debugf("syncing %s service", role)
|
||||
|
||||
if err := c.syncEndpoint(role); err != nil {
|
||||
return fmt.Errorf("could not sync %s endpont: %v", role, err)
|
||||
return fmt.Errorf("could not sync %s endpoint: %v", role, err)
|
||||
}
|
||||
|
||||
if err := c.syncService(role); err != nil {
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
|
|
@ -57,7 +57,7 @@ type Controller struct {
|
|||
workerLogs map[uint32]ringlog.RingLogger
|
||||
|
||||
PodServiceAccount *v1.ServiceAccount
|
||||
PodServiceAccountRoleBinding *rbacv1beta1.RoleBinding
|
||||
PodServiceAccountRoleBinding *rbacv1.RoleBinding
|
||||
}
|
||||
|
||||
// NewController creates a new controller
|
||||
|
|
@ -161,11 +161,12 @@ func (c *Controller) initPodServiceAccount() {
|
|||
|
||||
if c.opConfig.PodServiceAccountDefinition == "" {
|
||||
c.opConfig.PodServiceAccountDefinition = `
|
||||
{ "apiVersion": "v1",
|
||||
"kind": "ServiceAccount",
|
||||
"metadata": {
|
||||
"name": "operator"
|
||||
}
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ServiceAccount",
|
||||
"metadata": {
|
||||
"name": "postgres-pod"
|
||||
}
|
||||
}`
|
||||
}
|
||||
|
||||
|
|
@ -175,13 +176,13 @@ func (c *Controller) initPodServiceAccount() {
|
|||
|
||||
switch {
|
||||
case err != nil:
|
||||
panic(fmt.Errorf("Unable to parse pod service account definition from the operator config map: %v", err))
|
||||
panic(fmt.Errorf("Unable to parse pod service account definition from the operator configuration: %v", err))
|
||||
case groupVersionKind.Kind != "ServiceAccount":
|
||||
panic(fmt.Errorf("pod service account definition in the operator config map defines another type of resource: %v", groupVersionKind.Kind))
|
||||
panic(fmt.Errorf("pod service account definition in the operator configuration defines another type of resource: %v", groupVersionKind.Kind))
|
||||
default:
|
||||
c.PodServiceAccount = obj.(*v1.ServiceAccount)
|
||||
if c.PodServiceAccount.Name != c.opConfig.PodServiceAccountName {
|
||||
c.logger.Warnf("in the operator config map, the pod service account name %v does not match the name %v given in the account definition; using the former for consistency", c.opConfig.PodServiceAccountName, c.PodServiceAccount.Name)
|
||||
c.logger.Warnf("in the operator configuration, the pod service account name %v does not match the name %v given in the account definition; using the former for consistency", c.opConfig.PodServiceAccountName, c.PodServiceAccount.Name)
|
||||
c.PodServiceAccount.Name = c.opConfig.PodServiceAccountName
|
||||
}
|
||||
c.PodServiceAccount.Namespace = ""
|
||||
|
|
@ -198,7 +199,7 @@ func (c *Controller) initRoleBinding() {
|
|||
if c.opConfig.PodServiceAccountRoleBindingDefinition == "" {
|
||||
c.opConfig.PodServiceAccountRoleBindingDefinition = fmt.Sprintf(`
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1",
|
||||
"kind": "RoleBinding",
|
||||
"metadata": {
|
||||
"name": "%s"
|
||||
|
|
@ -223,11 +224,11 @@ func (c *Controller) initRoleBinding() {
|
|||
|
||||
switch {
|
||||
case err != nil:
|
||||
panic(fmt.Errorf("Unable to parse the definition of the role binding for the pod service account definition from the operator config map: %v", err))
|
||||
panic(fmt.Errorf("unable to parse the definition of the role binding for the pod service account definition from the operator configuration: %v", err))
|
||||
case groupVersionKind.Kind != "RoleBinding":
|
||||
panic(fmt.Errorf("role binding definition in the operator config map defines another type of resource: %v", groupVersionKind.Kind))
|
||||
panic(fmt.Errorf("role binding definition in the operator configuration defines another type of resource: %v", groupVersionKind.Kind))
|
||||
default:
|
||||
c.PodServiceAccountRoleBinding = obj.(*rbacv1beta1.RoleBinding)
|
||||
c.PodServiceAccountRoleBinding = obj.(*rbacv1.RoleBinding)
|
||||
c.PodServiceAccountRoleBinding.Namespace = ""
|
||||
c.logger.Info("successfully parsed")
|
||||
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel
|
||||
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName
|
||||
result.PodManagementPolicy = fromCRD.Kubernetes.PodManagementPolicy
|
||||
result.MasterPodMoveTimeout = fromCRD.Kubernetes.MasterPodMoveTimeout
|
||||
result.MasterPodMoveTimeout = time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout)
|
||||
result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity
|
||||
result.PodAntiAffinityTopologyKey = fromCRD.Kubernetes.PodAntiAffinityTopologyKey
|
||||
|
||||
|
|
@ -75,6 +75,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest
|
||||
result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit
|
||||
result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit
|
||||
result.MinCPULimit = fromCRD.PostgresPodResources.MinCPULimit
|
||||
result.MinMemoryLimit = fromCRD.PostgresPodResources.MinMemoryLimit
|
||||
|
||||
// timeout config
|
||||
result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval)
|
||||
|
|
@ -104,6 +106,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.LogicalBackupSchedule = fromCRD.LogicalBackup.Schedule
|
||||
result.LogicalBackupDockerImage = fromCRD.LogicalBackup.DockerImage
|
||||
result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket
|
||||
result.LogicalBackupS3Region = fromCRD.LogicalBackup.S3Region
|
||||
result.LogicalBackupS3Endpoint = fromCRD.LogicalBackup.S3Endpoint
|
||||
result.LogicalBackupS3AccessKeyID = fromCRD.LogicalBackup.S3AccessKeyID
|
||||
result.LogicalBackupS3SecretAccessKey = fromCRD.LogicalBackup.S3SecretAccessKey
|
||||
|
|
|
|||
|
|
@ -37,8 +37,10 @@ type Resources struct {
|
|||
PodToleration map[string]string `name:"toleration" default:""`
|
||||
DefaultCPURequest string `name:"default_cpu_request" default:"100m"`
|
||||
DefaultMemoryRequest string `name:"default_memory_request" default:"100Mi"`
|
||||
DefaultCPULimit string `name:"default_cpu_limit" default:"3"`
|
||||
DefaultMemoryLimit string `name:"default_memory_limit" default:"1Gi"`
|
||||
DefaultCPULimit string `name:"default_cpu_limit" default:"1"`
|
||||
DefaultMemoryLimit string `name:"default_memory_limit" default:"500Mi"`
|
||||
MinCPULimit string `name:"min_cpu_limit" default:"250m"`
|
||||
MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"`
|
||||
PodEnvironmentConfigMap string `name:"pod_environment_configmap" default:""`
|
||||
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
||||
MaxInstances int32 `name:"max_instances" default:"-1"`
|
||||
|
|
@ -66,7 +68,7 @@ type Scalyr struct {
|
|||
ScalyrCPURequest string `name:"scalyr_cpu_request" default:"100m"`
|
||||
ScalyrMemoryRequest string `name:"scalyr_memory_request" default:"50Mi"`
|
||||
ScalyrCPULimit string `name:"scalyr_cpu_limit" default:"1"`
|
||||
ScalyrMemoryLimit string `name:"scalyr_memory_limit" default:"1Gi"`
|
||||
ScalyrMemoryLimit string `name:"scalyr_memory_limit" default:"500Mi"`
|
||||
}
|
||||
|
||||
// LogicalBackup defines configuration for logical backup
|
||||
|
|
@ -74,6 +76,7 @@ type LogicalBackup struct {
|
|||
LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"`
|
||||
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup"`
|
||||
LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""`
|
||||
LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""`
|
||||
LogicalBackupS3Endpoint string `name:"logical_backup_s3_endpoint" default:""`
|
||||
LogicalBackupS3AccessKeyID string `name:"logical_backup_s3_access_key_id" default:""`
|
||||
LogicalBackupS3SecretAccessKey string `name:"logical_backup_s3_secret_access_key" default:""`
|
||||
|
|
@ -93,7 +96,7 @@ type Config struct {
|
|||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16"`
|
||||
Sidecars map[string]string `name:"sidecar_docker_images"`
|
||||
// default name `operator` enables backward compatibility with the older ServiceAccountName field
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"operator"`
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||
// value of this string must be valid JSON or YAML; see initPodServiceAccount
|
||||
PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""`
|
||||
PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""`
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import (
|
|||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1"
|
||||
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||
apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
|
|
@ -19,7 +18,7 @@ import (
|
|||
appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1"
|
||||
rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1"
|
||||
rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
|
|
@ -40,7 +39,7 @@ type KubernetesClient struct {
|
|||
corev1.NamespacesGetter
|
||||
corev1.ServiceAccountsGetter
|
||||
appsv1.StatefulSetsGetter
|
||||
rbacv1beta1.RoleBindingsGetter
|
||||
rbacv1.RoleBindingsGetter
|
||||
policyv1beta1.PodDisruptionBudgetsGetter
|
||||
apiextbeta1.CustomResourceDefinitionsGetter
|
||||
clientbatchv1beta1.CronJobsGetter
|
||||
|
|
@ -104,7 +103,7 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) {
|
|||
kubeClient.StatefulSetsGetter = client.AppsV1()
|
||||
kubeClient.PodDisruptionBudgetsGetter = client.PolicyV1beta1()
|
||||
kubeClient.RESTClient = client.CoreV1().RESTClient()
|
||||
kubeClient.RoleBindingsGetter = client.RbacV1beta1()
|
||||
kubeClient.RoleBindingsGetter = client.RbacV1()
|
||||
kubeClient.CronJobsGetter = client.BatchV1beta1()
|
||||
|
||||
apiextClient, err := apiextclient.NewForConfig(cfg)
|
||||
|
|
@ -136,21 +135,37 @@ func SameService(cur, new *v1.Service) (match bool, reason string) {
|
|||
}
|
||||
}
|
||||
|
||||
oldDNSAnnotation := cur.Annotations[constants.ZalandoDNSNameAnnotation]
|
||||
newDNSAnnotation := new.Annotations[constants.ZalandoDNSNameAnnotation]
|
||||
oldELBAnnotation := cur.Annotations[constants.ElbTimeoutAnnotationName]
|
||||
newELBAnnotation := new.Annotations[constants.ElbTimeoutAnnotationName]
|
||||
match = true
|
||||
|
||||
if oldDNSAnnotation != newDNSAnnotation {
|
||||
return false, fmt.Sprintf("new service's %q annotation value %q doesn't match the current one %q",
|
||||
constants.ZalandoDNSNameAnnotation, newDNSAnnotation, oldDNSAnnotation)
|
||||
}
|
||||
if oldELBAnnotation != newELBAnnotation {
|
||||
return false, fmt.Sprintf("new service's %q annotation value %q doesn't match the current one %q",
|
||||
constants.ElbTimeoutAnnotationName, oldELBAnnotation, newELBAnnotation)
|
||||
reasonPrefix := "new service's annotations doesn't match the current one:"
|
||||
for ann := range cur.Annotations {
|
||||
if _, ok := new.Annotations[ann]; !ok {
|
||||
match = false
|
||||
if len(reason) == 0 {
|
||||
reason = reasonPrefix
|
||||
}
|
||||
reason += fmt.Sprintf(" Removed '%s'.", ann)
|
||||
}
|
||||
}
|
||||
|
||||
return true, ""
|
||||
for ann := range new.Annotations {
|
||||
v, ok := cur.Annotations[ann]
|
||||
if !ok {
|
||||
if len(reason) == 0 {
|
||||
reason = reasonPrefix
|
||||
}
|
||||
reason += fmt.Sprintf(" Added '%s' with value '%s'.", ann, new.Annotations[ann])
|
||||
match = false
|
||||
} else if v != new.Annotations[ann] {
|
||||
if len(reason) == 0 {
|
||||
reason = reasonPrefix
|
||||
}
|
||||
reason += fmt.Sprintf(" '%s' changed from '%s' to '%s'.", ann, v, new.Annotations[ann])
|
||||
match = false
|
||||
}
|
||||
}
|
||||
|
||||
return match, reason
|
||||
}
|
||||
|
||||
// SamePDB compares the PodDisruptionBudgets
|
||||
|
|
|
|||
|
|
@ -0,0 +1,311 @@
|
|||
package k8sutil
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func newsService(ann map[string]string, svcT v1.ServiceType, lbSr []string) *v1.Service {
|
||||
svc := &v1.Service{
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: svcT,
|
||||
LoadBalancerSourceRanges: lbSr,
|
||||
},
|
||||
}
|
||||
svc.Annotations = ann
|
||||
return svc
|
||||
}
|
||||
|
||||
func TestSameService(t *testing.T) {
|
||||
tests := []struct {
|
||||
about string
|
||||
current *v1.Service
|
||||
new *v1.Service
|
||||
reason string
|
||||
match bool
|
||||
}{
|
||||
{
|
||||
about: "two equal services",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
about: "services differ on service type",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's type "LoadBalancer" doesn't match the current one "ClusterIP"`,
|
||||
},
|
||||
{
|
||||
about: "services differ on lb source ranges",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"185.249.56.0/22"}),
|
||||
match: false,
|
||||
reason: `new service's LoadBalancerSourceRange doesn't match the current one`,
|
||||
},
|
||||
{
|
||||
about: "new service doesn't have lb source ranges",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{}),
|
||||
match: false,
|
||||
reason: `new service's LoadBalancerSourceRange doesn't match the current one`,
|
||||
},
|
||||
{
|
||||
about: "services differ on DNS annotation",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "new_clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations doesn't match the current one: 'external-dns.alpha.kubernetes.io/hostname' changed from 'clstr.acid.zalan.do' to 'new_clstr.acid.zalan.do'.`,
|
||||
},
|
||||
{
|
||||
about: "services differ on AWS ELB annotation",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: "1800",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations doesn't match the current one: 'service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout' changed from '3600' to '1800'.`,
|
||||
},
|
||||
{
|
||||
about: "service changes existing annotation",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "baz",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations doesn't match the current one: 'foo' changed from 'bar' to 'baz'.`,
|
||||
},
|
||||
{
|
||||
about: "service changes multiple existing annotations",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
"bar": "foo",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "baz",
|
||||
"bar": "fooz",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
// Test just the prefix to avoid flakiness and map sorting
|
||||
reason: `new service's annotations doesn't match the current one:`,
|
||||
},
|
||||
{
|
||||
about: "service adds a new custom annotation",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations doesn't match the current one: Added 'foo' with value 'bar'.`,
|
||||
},
|
||||
{
|
||||
about: "service removes a custom annotation",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations doesn't match the current one: Removed 'foo'.`,
|
||||
},
|
||||
{
|
||||
about: "service removes a custom annotation and adds a new one",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"bar": "foo",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations doesn't match the current one: Removed 'foo'. Added 'bar' with value 'foo'.`,
|
||||
},
|
||||
{
|
||||
about: "service removes a custom annotation, adds a new one and change another",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
"zalan": "do",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"bar": "foo",
|
||||
"zalan": "do.com",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
// Test just the prefix to avoid flakiness and map sorting
|
||||
reason: `new service's annotations doesn't match the current one: Removed 'foo'.`,
|
||||
},
|
||||
{
|
||||
about: "service add annotations",
|
||||
current: newsService(
|
||||
map[string]string{},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
// Test just the prefix to avoid flakiness and map sorting
|
||||
reason: `new service's annotations doesn't match the current one: Added `,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
match, reason := SameService(tt.current, tt.new)
|
||||
if match && !tt.match {
|
||||
t.Errorf("expected services to do not match: '%q' and '%q'", tt.current, tt.new)
|
||||
return
|
||||
}
|
||||
if !match && tt.match {
|
||||
t.Errorf("expected services to be the same: '%q' and '%q'", tt.current, tt.new)
|
||||
return
|
||||
}
|
||||
if !match && !tt.match {
|
||||
if !strings.HasPrefix(reason, tt.reason) {
|
||||
t.Errorf("expected reason prefix '%s', found '%s'", tt.reason, reason)
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
20
ui/Makefile
20
ui/Makefile
|
|
@ -1,17 +1,6 @@
|
|||
.PHONY: clean test appjs docker push mock
|
||||
|
||||
BINARY ?= postgres-operator-ui
|
||||
BUILD_FLAGS ?= -v
|
||||
CGO_ENABLED ?= 0
|
||||
ifeq ($(RACE),1)
|
||||
BUILD_FLAGS += -race -a
|
||||
CGO_ENABLED=1
|
||||
endif
|
||||
|
||||
LOCAL_BUILD_FLAGS ?= $(BUILD_FLAGS)
|
||||
LDFLAGS ?= -X=main.version=$(VERSION)
|
||||
|
||||
IMAGE ?= registry.opensource.zalan.do/acid/$(BINARY)
|
||||
IMAGE ?= registry.opensource.zalan.do/acid/postgres-operator-ui
|
||||
VERSION ?= $(shell git describe --tags --always --dirty)
|
||||
TAG ?= $(VERSION)
|
||||
GITHEAD = $(shell git rev-parse --short HEAD)
|
||||
|
|
@ -32,8 +21,11 @@ appjs:
|
|||
docker run $(TTYFLAGS) -u $$(id -u) -v $$(pwd):/workdir -w /workdir/app node:10.1.0-alpine npm run build
|
||||
|
||||
docker: appjs
|
||||
docker build --build-arg "VERSION=$(VERSION)" -t "$(IMAGE):$(TAG)" .
|
||||
@echo 'Docker image $(IMAGE):$(TAG) can now be used.'
|
||||
echo `(env)`
|
||||
echo "Tag ${TAG}"
|
||||
echo "Version ${VERSION}"
|
||||
echo "git describe $(shell git describe --tags --always --dirty)"
|
||||
docker build --rm -t "$(IMAGE):$(TAG)" -f Dockerfile .
|
||||
|
||||
push: docker
|
||||
docker push "$(IMAGE):$(TAG)"
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "postgres-operator-ui",
|
||||
"version": "1.0.0",
|
||||
"version": "1.3.0",
|
||||
"description": "PostgreSQL Operator UI",
|
||||
"main": "src/app.js",
|
||||
"config": {
|
||||
|
|
|
|||
|
|
@ -408,7 +408,7 @@ new
|
|||
ref='cpuLimit'
|
||||
type='number'
|
||||
placeholder='{ cpu.state.limit.initialValue }'
|
||||
min='1'
|
||||
min='250'
|
||||
required
|
||||
value='{ cpu.state.limit.state }'
|
||||
onchange='{ cpu.state.limit.edit }'
|
||||
|
|
@ -434,7 +434,7 @@ new
|
|||
onkeyup='{ memory.state.request.edit }'
|
||||
)
|
||||
.input-group-addon
|
||||
.input-units Gi
|
||||
.input-units Mi
|
||||
|
||||
.input-group
|
||||
.input-group-addon.resource-type Limit
|
||||
|
|
@ -442,14 +442,14 @@ new
|
|||
ref='memoryLimit'
|
||||
type='number'
|
||||
placeholder='{ memory.state.limit.initialValue }'
|
||||
min='1'
|
||||
min='250'
|
||||
required
|
||||
value='{ memory.state.limit.state }'
|
||||
onchange='{ memory.state.limit.edit }'
|
||||
onkeyup='{ memory.state.limit.edit }'
|
||||
)
|
||||
.input-group-addon
|
||||
.input-units Gi
|
||||
.input-units Mi
|
||||
|
||||
.col-lg-3
|
||||
help-general(config='{ opts.config }')
|
||||
|
|
@ -519,10 +519,10 @@ new
|
|||
resources:
|
||||
requests:
|
||||
cpu: {{ cpu.state.request.state }}m
|
||||
memory: {{ memory.state.request.state }}Gi
|
||||
memory: {{ memory.state.request.state }}Mi
|
||||
limits:
|
||||
cpu: {{ cpu.state.limit.state }}m
|
||||
memory: {{ memory.state.limit.state }}Gi{{#if restoring}}
|
||||
memory: {{ memory.state.limit.state }}Mi{{#if restoring}}
|
||||
|
||||
clone:
|
||||
cluster: "{{ backup.state.name.state }}"
|
||||
|
|
@ -786,8 +786,8 @@ new
|
|||
return instance
|
||||
}
|
||||
|
||||
this.cpu = DynamicResource({ request: 100, limit: 1000 })
|
||||
this.memory = DynamicResource({ request: 1, limit: 1 })
|
||||
this.cpu = DynamicResource({ request: 100, limit: 500 })
|
||||
this.memory = DynamicResource({ request: 100, limit: 500 })
|
||||
|
||||
this.backup = DynamicSet({
|
||||
type: () => 'empty',
|
||||
|
|
|
|||
|
|
@ -76,6 +76,9 @@ postgresql
|
|||
.alert.alert-danger(if='{ progress.requestStatus !== "OK" }') Create request failed
|
||||
.alert.alert-success(if='{ progress.requestStatus === "OK" }') Create request successful ({ new Date(progress.createdTimestamp).toLocaleString() })
|
||||
|
||||
.alert.alert-info(if='{ !progress.postgresql }') PostgreSQL cluster manifest pending
|
||||
.alert.alert-success(if='{ progress.postgresql }') PostgreSQL cluster manifest created
|
||||
|
||||
.alert.alert-info(if='{ !progress.statefulSet }') StatefulSet pending
|
||||
.alert.alert-success(if='{ progress.statefulSet }') StatefulSet created
|
||||
|
||||
|
|
|
|||
|
|
@ -45,12 +45,14 @@ postgresqls
|
|||
thead
|
||||
tr
|
||||
th(style='width: 120px') Team
|
||||
th(style='width: 130px') Namespace
|
||||
th Name
|
||||
th(style='width: 50px') Pods
|
||||
th(style='width: 140px') CPU
|
||||
th(style='width: 130px') Memory
|
||||
th(style='width: 100px') Size
|
||||
th(style='width: 130px') Namespace
|
||||
th Name
|
||||
th(style='width: 120px') Cost/Month
|
||||
th(stlye='width: 120px')
|
||||
|
||||
tbody
|
||||
tr(
|
||||
|
|
@ -58,19 +60,21 @@ postgresqls
|
|||
hidden='{ !namespaced_name.toLowerCase().includes(filter.state.toLowerCase()) }'
|
||||
)
|
||||
td { team }
|
||||
td { nodes }
|
||||
td { cpu } / { cpu_limit }
|
||||
td { memory } / { memory_limit }
|
||||
td { volume_size }
|
||||
|
||||
td(style='white-space: pre')
|
||||
| { namespace }
|
||||
|
||||
td
|
||||
a(
|
||||
href='/#/status/{ cluster_path(this) }'
|
||||
)
|
||||
| { name }
|
||||
td { nodes }
|
||||
td { cpu } / { cpu_limit }
|
||||
td { memory } / { memory_limit }
|
||||
td { volume_size }
|
||||
td { calcCosts(nodes, cpu, memory, volume_size) }$
|
||||
|
||||
td
|
||||
|
||||
|
||||
.btn-group.pull-right(
|
||||
aria-label='Cluster { qname } actions'
|
||||
|
|
@ -124,12 +128,14 @@ postgresqls
|
|||
thead
|
||||
tr
|
||||
th(style='width: 120px') Team
|
||||
th(style='width: 130px') Namespace
|
||||
th Name
|
||||
th(style='width: 50px') Pods
|
||||
th(style='width: 140px') CPU
|
||||
th(style='width: 130px') Memory
|
||||
th(style='width: 100px') Size
|
||||
th(style='width: 130px') Namespace
|
||||
th Name
|
||||
th(style='width: 120px') Cost/Month
|
||||
th(stlye='width: 120px')
|
||||
|
||||
tbody
|
||||
tr(
|
||||
|
|
@ -137,20 +143,20 @@ postgresqls
|
|||
hidden='{ !namespaced_name.toLowerCase().includes(filter.state.toLowerCase()) }'
|
||||
)
|
||||
td { team }
|
||||
td { nodes }
|
||||
td { cpu } / { cpu_limit }
|
||||
td { memory } / { memory_limit }
|
||||
td { volume_size }
|
||||
|
||||
td(style='white-space: pre')
|
||||
| { namespace }
|
||||
|
||||
td
|
||||
|
||||
a(
|
||||
href='/#/status/{ cluster_path(this) }'
|
||||
)
|
||||
| { name }
|
||||
td { nodes }
|
||||
td { cpu } / { cpu_limit }
|
||||
td { memory } / { memory_limit }
|
||||
td { volume_size }
|
||||
td { calcCosts(nodes, cpu, memory, volume_size) }$
|
||||
|
||||
td
|
||||
|
||||
.btn-group.pull-right(
|
||||
aria-label='Cluster { qname } actions'
|
||||
|
|
@ -223,6 +229,45 @@ postgresqls
|
|||
+ '/' + encodeURI(cluster.name)
|
||||
)
|
||||
|
||||
const calcCosts = this.calcCosts = (nodes, cpu, memory, disk) => {
|
||||
costs = nodes * (toCores(cpu) * opts.config.cost_core + toMemory(memory) * opts.config.cost_memory + toDisk(disk) * opts.config.cost_ebs)
|
||||
return costs.toFixed(2)
|
||||
}
|
||||
|
||||
const toDisk = this.toDisk = value => {
|
||||
if(value.endsWith("Gi")) {
|
||||
value = value.substring(0, value.length-2)
|
||||
value = Number(value)
|
||||
return value
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
const toMemory = this.toMemory = value => {
|
||||
if (value.endsWith("Mi")) {
|
||||
value = value.substring(0, value.length-2)
|
||||
value = Number(value) / 1000.
|
||||
return value
|
||||
}
|
||||
else if(value.endsWith("Gi")) {
|
||||
value = value.substring(0, value.length-2)
|
||||
value = Number(value)
|
||||
return value
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
const toCores = this.toCores = value => {
|
||||
if (value.endsWith("m")) {
|
||||
value = value.substring(0, value.length-1)
|
||||
value = Number(value) / 1000.
|
||||
return value
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
this.on('mount', () =>
|
||||
jQuery
|
||||
.get('/postgresqls')
|
||||
|
|
|
|||
|
|
@ -4,23 +4,23 @@ metadata:
|
|||
name: "postgres-operator-ui"
|
||||
namespace: "default"
|
||||
labels:
|
||||
application: "postgres-operator-ui"
|
||||
name: "postgres-operator-ui"
|
||||
team: "acid"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
application: "postgres-operator-ui"
|
||||
name: "postgres-operator-ui"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
application: "postgres-operator-ui"
|
||||
name: "postgres-operator-ui"
|
||||
team: "acid"
|
||||
spec:
|
||||
serviceAccountName: postgres-operator-ui
|
||||
containers:
|
||||
- name: "service"
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.2.0
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.3.0
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
protocol: "TCP"
|
||||
|
|
@ -32,8 +32,8 @@ spec:
|
|||
timeoutSeconds: 1
|
||||
resources:
|
||||
limits:
|
||||
cpu: "300m"
|
||||
memory: "3000Mi"
|
||||
cpu: "200m"
|
||||
memory: "200Mi"
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "100Mi"
|
||||
|
|
@ -41,7 +41,9 @@ spec:
|
|||
- name: "APP_URL"
|
||||
value: "http://localhost:8081"
|
||||
- name: "OPERATOR_API_URL"
|
||||
value: "http://localhost:8080"
|
||||
value: "http://postgres-operator:8080"
|
||||
- name: "OPERATOR_CLUSTER_NAME_LABEL"
|
||||
value: "cluster-name"
|
||||
- name: "TARGET_NAMESPACE"
|
||||
value: "default"
|
||||
- name: "TEAMS"
|
||||
|
|
@ -60,9 +62,14 @@ spec:
|
|||
"replica_load_balancer_visible": true,
|
||||
"resources_visible": true,
|
||||
"users_visible": true,
|
||||
"cost_ebs": 0.119,
|
||||
"cost_core": 0.0575,
|
||||
"cost_memory": 0.014375,
|
||||
"postgresql_versions": [
|
||||
"12",
|
||||
"11",
|
||||
"10",
|
||||
"9.6"
|
||||
"9.6",
|
||||
"9.5"
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ metadata:
|
|||
namespace: default
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: postgres-operator-ui
|
||||
|
|
@ -61,7 +61,5 @@ roleRef:
|
|||
name: postgres-operator-ui
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
# note: the cluster role binding needs to be defined
|
||||
# for every namespace the operator-ui service account lives in.
|
||||
name: postgres-operator-ui
|
||||
namespace: default
|
||||
|
|
|
|||
|
|
@ -76,6 +76,7 @@ ACCESS_TOKEN_URL = getenv('ACCESS_TOKEN_URL')
|
|||
TOKENINFO_URL = getenv('OAUTH2_TOKEN_INFO_URL')
|
||||
|
||||
OPERATOR_API_URL = getenv('OPERATOR_API_URL', 'http://postgres-operator')
|
||||
OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-name')
|
||||
OPERATOR_UI_CONFIG = getenv('OPERATOR_UI_CONFIG', '{}')
|
||||
OPERATOR_UI_MAINTENANCE_CHECK = getenv('OPERATOR_UI_MAINTENANCE_CHECK', '{}')
|
||||
READ_ONLY_MODE = getenv('READ_ONLY_MODE', False) in [True, 'true']
|
||||
|
|
@ -84,6 +85,13 @@ SUPERUSER_TEAM = getenv('SUPERUSER_TEAM', 'acid')
|
|||
TARGET_NAMESPACE = getenv('TARGET_NAMESPACE')
|
||||
GOOGLE_ANALYTICS = getenv('GOOGLE_ANALYTICS', False)
|
||||
|
||||
# storage pricing, i.e. https://aws.amazon.com/ebs/pricing/
|
||||
COST_EBS = float(getenv('COST_EBS', 0.119)) # GB per month
|
||||
|
||||
# compute costs, i.e. https://www.ec2instances.info/?region=eu-central-1&selected=m5.2xlarge
|
||||
COST_CORE = 30.5 * 24 * float(getenv('COST_CORE', 0.0575)) # Core per hour m5.2xlarge / 8.
|
||||
COST_MEMORY = 30.5 * 24 * float(getenv('COST_MEMORY', 0.014375)) # Memory GB m5.2xlarge / 32.
|
||||
|
||||
WALE_S3_ENDPOINT = getenv(
|
||||
'WALE_S3_ENDPOINT',
|
||||
'https+path://s3-eu-central-1.amazonaws.com:443',
|
||||
|
|
@ -293,6 +301,9 @@ DEFAULT_UI_CONFIG = {
|
|||
'dns_format_string': '{0}.{1}.{2}',
|
||||
'pgui_link': '',
|
||||
'static_network_whitelist': {},
|
||||
'cost_ebs': COST_EBS,
|
||||
'cost_core': COST_CORE,
|
||||
'cost_memory': COST_MEMORY
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1003,6 +1014,7 @@ def main(port, secret_key, debug, clusters: list):
|
|||
logger.info(f'App URL: {APP_URL}')
|
||||
logger.info(f'Authorize URL: {AUTHORIZE_URL}')
|
||||
logger.info(f'Operator API URL: {OPERATOR_API_URL}')
|
||||
logger.info(f'Operator cluster name label: {OPERATOR_CLUSTER_NAME_LABEL}')
|
||||
logger.info(f'Readonly mode: {"enabled" if READ_ONLY_MODE else "disabled"}') # noqa
|
||||
logger.info(f'Spilo S3 backup bucket: {SPILO_S3_BACKUP_BUCKET}')
|
||||
logger.info(f'Spilo S3 backup prefix: {SPILO_S3_BACKUP_PREFIX}')
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from datetime import datetime, timezone
|
|||
from furl import furl
|
||||
from json import dumps
|
||||
from logging import getLogger
|
||||
from os import environ
|
||||
from os import environ, getenv
|
||||
from requests import Session
|
||||
from urllib.parse import urljoin
|
||||
from uuid import UUID
|
||||
|
|
@ -16,6 +16,8 @@ logger = getLogger(__name__)
|
|||
|
||||
session = Session()
|
||||
|
||||
OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-name')
|
||||
|
||||
|
||||
def request(cluster, path, **kwargs):
|
||||
if 'timeout' not in kwargs:
|
||||
|
|
@ -137,7 +139,7 @@ def read_pods(cluster, namespace, spilo_cluster):
|
|||
cluster=cluster,
|
||||
resource_type='pods',
|
||||
namespace=namespace,
|
||||
label_selector={'cluster-name': spilo_cluster},
|
||||
label_selector={OPERATOR_CLUSTER_NAME_LABEL: spilo_cluster},
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,14 +1,14 @@
|
|||
Flask-OAuthlib==0.9.5
|
||||
Flask==1.0.2
|
||||
backoff==1.5.0
|
||||
boto3==1.5.14
|
||||
boto==2.48.0
|
||||
Flask==1.1.1
|
||||
backoff==1.8.1
|
||||
boto3==1.10.4
|
||||
boto==2.49.0
|
||||
click==6.7
|
||||
furl==1.0.1
|
||||
furl==1.0.2
|
||||
gevent==1.2.2
|
||||
jq==0.1.6
|
||||
json_delta>=2.0
|
||||
kubernetes==3.0.0
|
||||
requests==2.20.1
|
||||
requests==2.22.0
|
||||
stups-tokens>=1.1.19
|
||||
wal_e==1.1.0
|
||||
wal_e==1.1.0
|
||||
|
|
|
|||
|
|
@ -19,10 +19,15 @@ default_operator_ui_config='{
|
|||
"nat_gateways_visible": false,
|
||||
"resources_visible": true,
|
||||
"users_visible": true,
|
||||
"cost_ebs": 0.119,
|
||||
"cost_core": 0.0575,
|
||||
"cost_memory": 0.014375,
|
||||
"postgresql_versions": [
|
||||
"12",
|
||||
"11",
|
||||
"10",
|
||||
"9.6"
|
||||
"9.6",
|
||||
"9.5"
|
||||
],
|
||||
"static_network_whitelist": {
|
||||
"localhost": ["172.0.0.1/32"]
|
||||
|
|
|
|||
Loading…
Reference in New Issue